input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
refreshed (and thus marks
the end of the menu).
:param str menu_name: (optional; default same as menu_title)
string that is printed in progress messages to describe the
menu.
"""
assert isinstance(target, tcfl.tc.target_c)
assert isinstance(menu_title, str)
assert canary_end_menu_redrawn == None \
or isinstance(canary_end_menu_redrawn, str)
if menu_name == None:
menu_name = menu_title
else:
assert isinstance(menu_title, str)
start_of_menu = re.compile(br"/-+\\")
end_of_menu = re.compile(br"\-+/")
target.expect(start_of_menu,
name = menu_name + ":menu-box-start")
target.expect(menu_title,
name = menu_name + ":menu-title")
target.expect(end_of_menu,
name = menu_name + ":menu-box-end" )
if canary_end_menu_redrawn:
target.expect(canary_end_menu_redrawn,
name = menu_name + ":end-of-menu")
target.report_info("BIOS:%s: found menu header" % menu_name)
def multiple_entry_select_one(
target,
select_entry,
max_scrolls = 30,
# regex format, to put inside (?P<values>VALUES)
wait = 0.5, timeout = 10,
highlight_string = "\x1b\\[1m\x1b\\[37m\x1b\\[46m",
level = ""):
"""
In a simple menu, wait for it to be drown and select a given entry
:param tcfl.tc.target_c target: target on which to operate (uses
the default console)
:params str select_entry: name of the entry to select; can use
Python regex format.
:params int max_scrolls: how many times to scroll maximum
:params str hightlight_string: (optional) ANSI sequence for
highlightling an entry (defaults to blue BG, yellow FG, bold)
:param str level: (optional; default *top*) name of the top level menu
The menu is usually printed like (blue background, white foreground,
yellow highlight, like::
/------------\\
| Enable |
| Disable |
\------------/
In ANSI terms, this is a string as::
^[[23;27H<Enter>=Complete Entry
^[[23;03H^v=Move Highlight
^[[22;03H
^[[22;53H
^[[22;27H
^[[23;53HEsc=Exit Entry
^[[0m^[[37m^[[44m^[[10;34H
^[[11;34H
^[[12;34H
^[[13;34H
^[[10;34H^[[10;34H/------------\\
^[[11;34H|^[[11;47H|^[[12;34H|^[[12;47H| <--- vertical bars |
^[[1m^[[37m^[[46m^[[11;36HEnable
^[[0m^[[37m^[[44m^[[12;36HDisable
^[[13;34H\------------/
selection highlight here is ^[[1m^[[37m^[[46m; this function thus
waits for:
- ^[[1m^[[37m^[[46m as highlight (selected)
- ^[[0m^[[37m^[[44m as normal (not selected)
- end of menu at \------------/
and scroll until what we want is selected
"""
assert isinstance(target, tcfl.tc.target_c)
assert isinstance(select_entry, str)
assert isinstance(max_scrolls, int) and max_scrolls > 0
assert isinstance(wait, numbers.Real) and wait > 0
assert isinstance(timeout, numbers.Real) and timeout > 0
assert isinstance(level, str)
# so this assumes the whole box is redrawn every time we move the
# cursor -- hence why we look for /---- ANSICRUFT+KEYSANSI+CRUFT
# HIGHLIGHT KEY ANSICRUFT ----/
_direction = False
entry_highlighted_regex = re.compile(
b"/-+"
+ b".*"
+ highlight_string.encode('utf-8')
+ b"\x1b\[[0-9]+;[0-9]+H"
+ b"(?P<key>[^\x1b]+)"
+ b".*"
+ rb"-+/")
target.report_info("BIOS: %s: scrolling for '%s'"
% (level, select_entry))
last_seen_entry = None
last_seen_entry_count = 0
for toggle in range(0, max_scrolls):
if _direction:
target.console_tx("\x1b[A") # press arrow up
else:
target.console_tx("\x1b[B") # press arrow down
target.report_info("BIOS: %s: waiting for highlighted entry" % level)
# wait for highlighted then give it a breather to send the rest
retry_top = 5
retry_cnt = 0
while retry_cnt < retry_top:
r = target.expect(entry_highlighted_regex,
timeout = timeout, name = "highlight")
if 'highlight' in r:
break
retry_cnt += 1
target.report_info(
"BIOS: %s: %s: didn't find a highlighted entry, retrying"
% (level, select_entry))
# tickle it
target.console_tx("\x1b[A") # press arrow up
target.console_tx("\x1b[B") # press arrow down
else:
# nothing found, raise it
raise tcfl.tc.error_e(
"BIOS: %s: can't find highlighted entries after %d tries"
% (level, retry_top))
key = r['highlight']['groupdict']['key']
select_entry_regex = re.compile(select_entry.encode('utf-8'))
if select_entry_regex.search(key):
target.report_info("BIOS: %s: entry '%s' found"
% (level, select_entry))
return key, r['highlight']['groupdict']
if last_seen_entry == key:
last_seen_entry_count += 1
else:
last_seen_entry = key
last_seen_entry_count = 1
if last_seen_entry_count > 2:
# make sure this count is lower then the one above for
# seen_entries; we might not have seen all the entries in
# the menu and have a limited count to make a judgement on
# maybe this is a menu that does not wrap around, flip the
# direction
_direction = not _direction
target.report_info("BIOS: %s: entry '%s' found, scrolling"
% (level, key))
# nothing found, raise it
raise tcfl.tc.error_e("%s: can't find entry option after %d entries"
% (select_entry, max_scrolls))
def menu_escape_to_main(target, esc_first = True):
"""
At any submenu, press ESC repeatedly until we go back to the main
menu
:param tcfl.tc.target_c target: target on which to operate (uses
the default console)
:param bool esc_first: (optional; default *True*) send an ESCAPE
key press before starting.
"""
assert isinstance(target, tcfl.tc.target_c)
assert isinstance(esc_first, bool)
target.report_info("BIOS: going to main menu")
max_levels = 10 # FIXME: BIOS profile?
if esc_first:
target.console_tx("\x1b")
#
# We look for the Move Highlight marker that is printed at the end
# of each menu; when we find that, then we look to see if what
# came before it contained all the main menu entries; if it does,
# we are good; if it doesn't, we press ESC again.
#
# We are going to wrap each main menu entry into how it looks in
# the ouptut
#
# ^[[XX:YYHENTRY[SPACES]
#
# to make sure we are not catching false positives because there
# will be a lot of ANSI flufff in betwween. not being this
# paranoid makes the main menu detection go out of synch
regexl = []
main_level_entries = main_level_entries_get(target)
for entry in main_level_entries:
regexl.append("\[[0-9]+;[0-9]+H" + entry)
main_menu_regex = re.compile(".*".join(regexl))
for level in range(max_levels):
offset = target.console.size()
# All menus print this after printing, so this is how we know
# the menu has redrawn
try:
# FIXME: move this to BIOS profile canary/end/menu/redrawn
target.expect("^v=Move Highlight")
except tcfl.tc.error_e:
target.report_info(
"BIOS: escaping to main, pressing ESC after timeout %d/%d"
% (level, max_levels))
target.console_tx("\x1b")
continue
read = target.console.read(offset = offset)
# then let's see if all the main menu entries are there
m = main_menu_regex.search(read)
if m:
target.report_info("BIOS: escaped to main")
# FIXME: this is a sync hack--we are still not sure why, but if
# we don't do this, things dont' sync up properly
time.sleep(5)
return
target.report_info("BIOS: escaping to main, pressing ESC %d/%d"
% (level, max_levels))
target.console_tx("\x1b")
# nothing found, raise it
raise tcfl.tc.error_e(
"BIOS: escaping to main: pressed ESC %d and didn't find"
" all the main menu entries (%s)"
% (max_levels, ",".join(main_level_entries)))
def dialog_changes_not_saved_expect(target, action):
"""
Expect a changes have not saved dialog and do something about it
(send *yes*, *No* or cancel)
:param tcfl.tc.target_c target: target on which to operate (uses
the default console)
:param str action: a string describing what to send (*Y*, *N*,
*\x1b*)
::
/---------------------------------------------------------------------\\
| |
| Changes have not saved. Save Changes and exit? |
|Press 'Y' to save and exit, 'N' to discard and exit, 'ESC' to cancel.|
| |
\---------------------------------------------------------------------/
"""
assert isinstance(target, tcfl.tc.target_c)
assert action in [ "y", "Y", "n", "N", "0x1b" ]
submenu_header_expect(
target, "Changes have not saved. Save Changes and exit",
canary_end_menu_redrawn = None)
# if we send the action too soon, sometimes it gets hung...so
# let's be patient
time.sleep(0.5)
target.console_tx(action)
def menu_config_network_enable(target):
"""
With the BIOS menu at the top level, enable the configuration option
*EDKII Menu > Platform Configuration > Network Configuration > EFI Network*
:param tcfl.tc.target_c target: target on which to operate (uses
the default console)
:returns: *True* if enabled, *False* if it was already enabled.
"""
assert isinstance(target, tcfl.tc.target_c)
r = menu_dig_to(
target,
[
"EDKII Menu",
"Platform Configuration",
"Network Configuration",
( "EFI Network", None, True ),
],
dig_last = False,
# FIXME: make this default
canary_end_menu_redrawn = "Esc=Exit")
entry = 'EFI Network'
value = r['EFI Network']['value']
# sic, different versions have differnt values, Disable vs Disabled vs ...
if b"Disable" not in value:
target.report_info("BIOS: %s: already enabled (%s)" % (entry, value))
target.console_tx("\x1b") # ESC one menu up
return False
target.report_info("BIOS: %s: enabling (was: %s)" % (entry, value))
# it's disabled, let's enable
entry_select(target) # select it
# geee ... some do Enable, some Enabled (see the missing d)
multiple_entry_select_one(target, "Enabled?")
entry_select(target) # select it
# hit F10 to save -- this way we don't have to deal with the
# "changes not saved" dialog, which is very error prone
bios_terminal = target.kws.get("bios.terminal_emulation", "vt100")
target.console.write(ansi_key_code("F10", bios_terminal))
dialog_changes_not_saved_expect(target, "Y")
# when this is succesful saving, we end up in this menu
submenu_header_expect(
target, "Platform Configuration",
canary_end_menu_redrawn = None)
return True
def menu_config_network_disable(target):
"""
With the BIOS menu at the top level, disable the configuration | |
<reponame>msg-byu/gblearn
"""Tests collection.py
"""
from pyrelate.collection import AtomsCollection
import numpy as np
import sys
import shutil
import io
import re
import unittest
import os
'''Functions to help in writing and designing clear, functional unit tests'''
def _delete_store(my_col):
'''Function to delete store generated in testing'''
shutil.rmtree(my_col.store.root)
def _initialize_collection_and_read(aids):
'''Initialize collection and read specified atoms files
Parameters:
aids (list of str): list of aid's for all ASE Atoms objects to be read into collection from test_data
'''
my_col = AtomsCollection("Test", store="tests/store")
data_path = 'tests/test_data/ni.p{0:s}.out'
for aid in aids:
my_col.read(data_path.format(aid), 28, 'lammps-dump-text',
rxid=r'ni.p(?P<aid>\d+).out')
return my_col
def _initialize_collection_and_describe(desc, aids, **kwargs):
'''Initialize and describe collection with given aid's and descriptors
Parameters:
aids (list of str): list of aid's for all ASE Atoms objects to be read into collection from test_data
desc(list of str): list of all descriptors to be applied to collection
kwargs(dict): "arguments" to be used in descriptor, only used in name of the results file
'''
my_col = _initialize_collection_and_read(aids)
for d in desc:
my_col.describe(d, fcn=_test_descriptor, **kwargs)
for aid in aids:
assert my_col.get_description(aid, d, **kwargs) is not None
return my_col
def _swap_x_y(positions):
"""Function to swap x and y coordinates of position"""
transposed = positions.T.copy()
temp = transposed[0].copy()
transposed[0] = transposed[1].copy()
transposed[1] = temp
swapped_positions = transposed.T
return swapped_positions
'''Toy descriptor functions to help in funtionality testing'''
def _test_descriptor(atoms, num=0, **kwargs):
if num == 0:
return 'test result 1', {}
else:
return 'test result 2', {}
def _processing_method(collection, based_on, method_name, **kwargs):
# process collection of results
new_string = method_name + "__"
for aid in collection.aids():
res = collection.get_description(aid, based_on[0], **based_on[1]) # str 'test result 1'
new_string += res
new_string += "_"
info = {}
return new_string, info
'''Unit tests'''
class TestCollection(unittest.TestCase):
def test_subset_defaults(self):
aids = ['454', '455']
my_col = _initialize_collection_and_read(aids)
data_loc = "tests/test_data/sub1"
my_col.read(data_loc, 28, 'lammps-dump-text', rxid=r'ni.p(?P<aid>\d+).out')
aids.append('456')
new_col = my_col.subset(aids[:2])
assert new_col.aids() == aids[:2]
assert new_col.store.root == my_col.store.root
assert new_col.name == my_col.name
_delete_store(my_col)
def test_subset_new_name(self):
aids = ['454', '455']
my_col = _initialize_collection_and_read(aids)
new_col = my_col.subset(aids[:1], name="Test_sub")
assert new_col.name == "test_sub"
assert my_col.name == "test"
assert new_col.aids() == aids[:1]
_delete_store(my_col)
def test_subset_new_store(self):
aids = ['454', '455']
my_col = _initialize_collection_and_read(aids)
new_col = my_col.subset(aids[:1], store="tests/store_2")
assert my_col.store.root != new_col.store.root
assert new_col.aids() == aids[:1]
_delete_store(my_col)
_delete_store(new_col)
def test_read_aid(self):
'''Test _read_aid function'''
my_col = AtomsCollection("Test", store="./tests/store")
rxid = r'ni.p(?P<gbid>\d+).out'
c_rxid = re.compile(rxid)
filename = "./tests/test_data/ni.p454.out"
aid = my_col._read_aid(filename, c_rxid)
assert aid == "454"
_delete_store(my_col)
def test_read_aid_with_prefix(self):
'''Test _read_aid, with prefix'''
my_col = AtomsCollection("Test", store="./tests/store")
rxid = r'ni.p(?P<gbid>\d+).out'
c_rxid = re.compile(rxid)
filename = "./tests/test_data/ni.p454.out"
prefix = "Pre"
aid = my_col._read_aid(filename, c_rxid, prefix)
assert aid == "pre_454"
_delete_store(my_col)
def test_read_aid_no_regex(self):
'''Test _read_aid, no regex'''
my_col = AtomsCollection("Test", store="./tests/store")
filename = "./tests/test_data/ni.p454.out"
aid = my_col._read_aid(filename, None)
assert aid == "ni.p454.out"
_delete_store(my_col)
def test_read_aid_no_regex_with_prefix(self):
'''Test _read_aid, no regex but with prefix'''
my_col = AtomsCollection("Test", store="./tests/store")
filename = "./tests/test_data/ni.p454.out"
prefix = "Test"
aid = my_col._read_aid(filename, None, prefix)
assert aid == "test_ni.p454.out"
_delete_store(my_col)
def test_read_aid_invalid_regex(self):
'''Test _read_aid, invaid regex prints error and sets aid as filename'''
my_col = AtomsCollection("Test", store="./tests/store")
filename = "./tests/test_data/ni.p454.out"
prefix = "Test"
output = io.StringIO()
sys.stdout = output
invalid_rxid = r'ni.p(P<gbid>\d+).out'
c_rxid = re.compile(invalid_rxid)
aid = my_col._read_aid(filename, c_rxid, prefix)
assert "Regex found no pattern. Resolving to filename as aid.\n" == output.getvalue()
assert aid == "test_ni.p454.out"
_delete_store(my_col)
def test_read_list(self):
'''Test read function, read list of input files'''
my_col = AtomsCollection("Test", store="./tests/store")
my_col.read(["./tests/test_data/ni.p454.out", "./tests/test_data/ni.p455.out"], 28, rxid=r'ni.p(?P<gbid>\d+).out', prefix="TEST")
assert 2 == len(my_col)
assert "test_454" == list(my_col)[0]
_delete_store(my_col)
def test_read_list_with_atomic_num_list(self):
'''Test read, list of input files with atomic number list'''
my_col = AtomsCollection("Test", store="./tests/store")
my_col.read(["./tests/test_data/ni.p454.out", "./tests/test_data/ni.p455.out"], [28, 28], rxid=r'ni.p(?P<gbid>\d+).out', prefix="TEST")
assert 2 == len(my_col)
assert "test_454" == list(my_col)[0]
_delete_store(my_col)
def test_read_single_file(self):
'''Test read function, read single file'''
my_col = AtomsCollection("Test", store="./tests/store")
my_col.read("./tests/test_data/ni.p455.out", 28, rxid=r'ni.p(?P<gbid>\d+).out', prefix="TEST")
assert 1 == len(my_col)
_delete_store(my_col)
def test_read_directory(self):
'''Test read, read all input files in directory'''
my_col = AtomsCollection("Test", store="./tests/store")
my_col.read("./tests/test_data/sub1/", 28, rxid=r'ni.p(?P<gbid>\d+).out', prefix="TEST")
assert 1 == len(my_col)
_delete_store(my_col)
def test_read_empty_dir_with_file(self):
'''Test read, read empty directory + single file'''
my_col = AtomsCollection("Test", store="./tests/store")
my_col.read(["./tests/test_data/ni.p455.out", "./tests/test_data/empty"], 28, rxid=r'ni.p(?P<gbid>\d+).out', prefix="TEST")
assert 1 == len(my_col)
_delete_store(my_col)
def test_read_empty_list(self):
'''Test read, empty list'''
my_col = AtomsCollection("Test", store="./tests/store")
my_col.read([], 28, rxid=r'ni.p(?P<gbid>\d+).out', prefix="TEST")
assert 0 == len(my_col)
_delete_store(my_col)
def test_read_repeat_file(self):
'''Test read, repeat file, will not read previously read file'''
my_col = _initialize_collection_and_read(['454'])
my_col.read("./tests/test_data/ni.p454.out", 28, rxid=r'ni.p(?P<gbid>\d+).out')
assert 1 == len(my_col)
_delete_store(my_col)
def test_read_nonexistent_directory(self):
'''Test read, try to read nonexistent directory and throw error'''
my_col = AtomsCollection("Test", store="./tests/store")
output = io.StringIO()
sys.stdout = output
my_col.read("definitely_wrong", 28, rxid=r'ni.p(?P<gbid>\d+).out', prefix="TEST")
assert "Invalid file path, definitely_wrong was not read.\n" == output.getvalue()
_delete_store(my_col)
def test_trim_correct_trim(self):
'''Test trim function, check that a) some atoms are trimmed, and b) no outside values kept in the Atoms object'''
aid = '454'
xdim = 0
trim_val = 3
pad_val = 3
my_col = _initialize_collection_and_read([aid])
pre_trim_size = len(my_col[aid])
my_col.trim(trim=trim_val, dim=xdim, pad=pad_val)
post_trim_size = len(my_col[aid])
assert pre_trim_size > post_trim_size, "No atoms were trimmed"
positions = my_col[aid].get_positions()[:, xdim]
my_col[aid].get_array("mask")
for idx, atom in enumerate(positions):
if(positions[idx] > (trim_val + pad_val) or positions[idx] < (-1 * (trim_val + pad_val))):
assert False, "Atoms object not trimmed correctly"
_delete_store(my_col)
def test_trim_correct_pad(self):
"""Test that any atoms with a '0' value in the mask are supposed to be in the pad"""
aid = '454'
xdim = 0
trim_val = 3
pad_val = 3
my_col = _initialize_collection_and_read([aid])
my_col.trim(trim=trim_val, dim=xdim, pad=pad_val)
positions = my_col[aid].get_positions()[:, xdim]
mask = my_col[aid].get_array("mask")
for idx, atom in enumerate(positions):
if(mask[idx] == 0):
if(positions[idx] < trim_val and positions[idx] > (trim_val * -1)):
assert False, "Mask was applied to atoms supposed to be included in final values"
_delete_store(my_col)
def test_trim_pad_True(self):
'''Test that when pad=True, default is the expected value (equal to trim)'''
aid = '454'
xdim = 0
trim_val = 3
expected_pad_val = 3
my_col = _initialize_collection_and_read([aid])
len(my_col[aid])
my_col.trim(trim=trim_val, dim=xdim, pad=True)
positions = my_col[aid].get_positions()[:, xdim]
# I dont know why this was here "mask = my_col[aid].get_array("mask")"
for idx, atom in enumerate(positions):
if(positions[idx] > (trim_val + expected_pad_val) or positions[idx] < (-1 * (trim_val + expected_pad_val))):
assert False, "Pad not set to same as trim value"
_delete_store(my_col)
def test_trim_pad_False(self):
'''Test that when pad=False, there is no pad'''
aid = '454'
xdim = 0
trim_val = 3
my_col = _initialize_collection_and_read([aid])
my_col.trim(trim=trim_val, dim=xdim, pad=False)
mask = my_col[aid].get_array("mask")
assert np.count_nonzero(mask) == len(my_col[aid]), "Padding atoms included in mask when not expected"
_delete_store(my_col)
def test_trim_fail_invalid_trim(self):
my_col = AtomsCollection("Test", "tests/store")
# self.assertRaises(TypeError, AtomsCollection.trim, trim="string", dim=0)
try:
my_col.trim(trim="string", dim=0)
except TypeError as e:
assert e.__str__() == "Trim should be int or float type"
else:
assert False, "Expected type error not thrown"
_delete_store(my_col)
def test_trim_fail_invalid_pad(self):
my_col = AtomsCollection("Test", store="tests/store")
try:
my_col.trim(trim=4, dim=0, pad="string")
except TypeError as e:
assert e.__str__() == "Pad should be int, float, or boolean type"
else:
assert False, "Expected type error not thrown"
_delete_store(my_col)
def test_trim_fail_invalid_dimension(self):
aid = '454'
my_col = _initialize_collection_and_read([aid])
invalid_dim = 3
try:
my_col.trim(trim=4, dim=invalid_dim)
except TypeError as e:
assert e.__str__() == "Dimension should equal 0, 1, or 2"
else:
assert False, "Expected error not thrown"
_delete_store(my_col)
def test_trim_specify_diff_dimensions(self):
"""Test that specifying the dimension correctly trims different dimensions"""
aid = '454'
xdim = 0
ydim = 1
trim_val = 3
my_col_A = _initialize_collection_and_read([aid])
my_col_B = _initialize_collection_and_read([aid])
new_positions = _swap_x_y(my_col_B[aid].get_positions())
my_col_B[aid].set_positions(new_positions)
my_col_A.trim(trim=trim_val, dim=xdim)
my_col_B.trim(trim=trim_val, dim=ydim)
mask_A = my_col_A[aid].get_array("mask")
mask_B = my_col_B[aid].get_array("mask")
assert np.array_equal(mask_A, mask_B), "Masks not equal, so atoms were trimmed differently for different dimensions"
_delete_store(my_col_A)
# deletes store b coincidentally
def test_describe_own_function(self):
'''Test using descriptor function not built into descriptors.py'''
my_col = _initialize_collection_and_read(['455'])
kwargs = {'num': 0, 'arg1': 1, 'arg2': 2, 'arg3': 3}
my_col.describe('desc', fcn=_test_descriptor, **kwargs)
res, info = my_col.get_description('455', 'desc', metadata=True, **kwargs)
assert res == 'test result 1'
assert info['desc_args'] == kwargs
_delete_store(my_col)
def test_describe_override(self):
'''Put result in store, and check to make sure 'override' parameter overrides previous result'''
kwargs = {'arg1': 1, 'arg2': 2, 'arg3': 3}
my_col = _initialize_collection_and_read(['455'])
my_col.store.store_description("fake result", {}, '455', "test", **kwargs) # store result, can be overridden
try:
my_col.describe('test', fcn=_test_descriptor, override=True, **kwargs)
res = my_col.get_description('455', 'test', **kwargs)
assert res != "fake result"
assert res == "test result 1"
finally:
_delete_store(my_col)
def test_describe_trim_post_descriptor(self):
aid = '455'
my_col = _initialize_collection_and_read([aid])
my_col.trim(trim=2, dim=0, pad=1)
num_atoms_with_mask = | |
<reponame>pauliacomi/CoolProp<filename>dev/inject_melting_curves.py
import os, json
Simon_curves = {
"n-Propane": {
"BibTeX": "Reeves-JCP-1964", "T_m": -187.75 + 273.15, "parts": [{"T_0": 85.3, "a": 7.180e8, "c": 1.283, "p_0": 0.0, "T_max": 168.63}]
},
"n-Pentane": {
"BibTeX": "Reeves-JCP-1964", "T_m": -129.89 + 273.15, "parts": [{"T_0": 143.5, "a": 6.600e8, "c": 1.649, "p_0": 0.0, "T_max": 156.2}]
},
"Isopentane": {
"BibTeX": "Reeves-JCP-1964", "T_m": -159.92 + 273.15, "parts": [{"T_0": 112.5, "a": 5.916e8, "c": 1.563, "p_0": 0, "T_max": 212.16}]
},
"Propylene": {
"BibTeX": "Reeves-JCP-1964", "T_m": -185.09 + 273.15, "parts": [{"T_0": 86.0, "a": 3.196e8, "c": 2.821, "p_0": 0, "T_min": 86.0, "T_max": 129},
{"T_0": 109.6, "a": 3.064e8, "c": 3.871, "p_0": 4.450e8, "T_min": 129, "T_max": 145.3}]
},
"Cyclohexane": {
"BibTeX": "Penoncello-IJT-1995", "T_m": 6.81 + 273.15, "parts": [{"T_0": 279.7, "a": 383.4e6, "c": 1.41, "p_0": 0, "T_max": 401.7}]
},
"Krypton": {
"BibTeX": "Michels-PHYSICA-1962", "T_m": 115.95, "parts": [{"T_0": 1, "a": 109479.2307, "c": 1.6169841, "p_0": -237497645.7, "T_max": 168.7}]
},
"Xenon": {
"BibTeX": "Michels-PHYSICA-1962", "T_m": 165.02, "parts": [{"T_0": 1, "a": 80890.5544859, "c": 1.5891650, "p_0": -260932309.446, "T_max": 366.4}]
},
"CarbonMonoxide": {
"BibTeX": "Barreiros-JCT-1982", "T_m": 68.3, "parts": [{"T_0": 1, "a": 19560.8, "c": 2.10747, "p_0": -142921439.2, "T_max": 87.5}]
},
"Oxygen": {
"BibTeX": "Younglove-NIST-1982", "T_m": 54.75, "parts": [{"T_0": 1, "a": 227606.348, "c": 1.769, "p_0": -266999247.652, "T_max": 63.1}]
},
"ParaHydrogen": {
"BibTeX": "Younglove-NIST-1982", "T_m": 18.9, "parts": [{"T_0": 1, "a": 125746.643, "c": 1.955, "p_0": -21155737.752, "T_min": 13.8033, "T_max": 22},
{"T_0": 1, "a": 248578.596, "c": 1.764739, "p_0": -26280332.904, "T_min": 22, "T_max": 164.5}]
},
"Methane": {
"BibTeX": "Abramson-HPR-2011", "T_m": 90.7, "parts": [{"T_0": 90.6941, "a": 0.208e9, "c": 1.698, "p_0": 1.17e4, "T_max": 600}]
},
"Helium": {
"BibTeX": "Datchi-PRB-2000", "T_m": 1.15, "parts": [{"T_0": 1, "a": 1.6067e6, "c": 1.565, "p_0": -1.6067e6, "T_max": 700}]
},
"Neon": {
"BibTeX": "SantamariaPerez-PRB-2010", "T_m": -1, "parts": [{"T_0": 24.4, "a": 1.7e9, "c": 1 / 0.77, "p_0": 101325, "T_max": 700}]
},
"Hydrogen": {
"BibTeX": "Datchi-PRB-2000", "T_m": 14.009985, "parts": [{"T_0": 1, "a": 2.31e5, "c": 1.7627, "p_0": -0.0052e6 - 2.31e5, "T_max": 700}]
}
}
polynomial_in_Tr = {
"Argon": {
"BibTeX": "Tegeler-JPCRD-1999", "T_m": 87.28, "parts": [{"T_0": 83.8058, "a": [-7476.2665, 9959.0613], "t": [1.05, 1.275], "p_0": 68891, "T_max": 254.0}]
},
"Fluorine": {
"BibTeX": "deReuck-BOOK-1990", "T_m": 53.15, "parts": [{"T_0": 53.4811, "a": [988043.478261], "t": [2.1845], "p_0": 252, "T_max": 55.4}]
},
"Nitrogen": {
"BibTeX": "Span-JPCRD-2000", "T_m": 77.34, "parts": [{"T_0": 63.151, "a": [12798.61], "t": [1.78963], "p_0": 12523, "T_max": 283.8}]
},
"Ethane": {
"BibTeX": "Buecker-JCRD-2006", "T_m": 90.4, "parts": [{"T_0": 90.368, "a": [2.23626315e8, 1.05262374e8], "t": [1.0, 2.55], "p_0": 1.14, "T_max": 110.2}]
},
"Isobutane": {
"BibTeX": "Buecker-JPCRD-2006B", "T_m": 113.55, "parts": [{"T_0": 113.73, "a": [1.9536371309e9], "t": [6.12], "p_0": 0.0219, "T_max": 124.9}]
},
"Ethylene": {
"BibTeX": "Smukala-JPCRD-2000", "T_m": 169, "parts": [{"T_0": 103.989, "a": [2947001.84], "t": [2.045], "p_0": 122.65, "T_min": 103.989, "T_max": 110.369},
{"T_0": 110.369, "a": [6.82693421], "t": [1.089], "p_0": 46.8e6, "T_min": 110.369, "T_max": 188}]
},
"n-Butane": {
"BibTeX": "Buecker-JPCRD-2006B", "T_m": -137.92 + 273.15, "parts": [{"T_0": 134.895, "a": [5.585582364e8], "t": [2.206], "p_0": 0.653, "T_max": 163.9}]
},
"Water": {
"BibTeX": "IAPWS", "T_m": -1, "parts": [{"T_0": 273.16, "a": [-0.119539337e7, -0.808183159e5, -0.333826860e4], "t": [0.3000000e1, 0.257500e2, 0.103750e3], "p_0": 611.657, "T_min": 273.16, "T_max": 251.165},
{"T_0": 251.165, "a": [0.299948], "t": [60], "p_0": 208.566e6, "T_min": 251.165, "T_max": 256.164},
{"T_0": 256.164, "a": [1.18721], "t": [8], "p_0": 350.1e6, "T_min": 256.164, "T_max": 273.31},
{"T_0": 273.31, "a": [1.07476], "t": [4.6], "p_0": 623.4e6, "T_min": 273.31, "T_max": 355}
]
}
}
polynomial_in_theta = {
"Methanol": {
"BibTeX": "deReuck-BOOK-1993", "T_m": 337.8, "parts": [{"T_0": 175.61, "a": [5.330770e9, 4.524780e9, 3.888861e10], "t": [1, 1.5, 4], "p_0": 0.187, "T_max": 245.9}]
},
"CarbonDioxide": {
"BibTeX": "Span-JPCRD-1996", "T_m": 216.58, "parts": [{"T_0": 216.592, "a": [1955.5390, 2055.4593], "t": [1, 2], "p_0": 517950, "T_max": 327.6}]
}
}
import CoolProp
__ = 0
for fluid in CoolProp.__fluids__:
if fluid not in Simon_curves and fluid not in polynomial_in_Tr and fluid not in polynomial_in_theta:
print(fluid)
__ += 1
else:
print(' ' * 30, fluid)
print(__)
import CoolProp.CoolProp as CP
import json, numpy as np, matplotlib.pyplot as plt, pandas
ip = 1
irho = 1
Nrow, Ncol = 5, 5
figp = plt.figure(figsize=(20, 20))
figrho = plt.figure(figsize=(20, 20))
def plot_rho(T, rho, fit=False):
x, y = (T - T[0]) / (T[len(T) - 1] - T[0]), (rho - rho[0]) / (rho[len(rho) - 1] - rho[0])
c = np.polyfit(x, y, 3)
yfit = np.polyval(c, x)
err = yfit - y
rms = np.sqrt(np.mean(np.power(err, 2)))
rhofit = yfit * (rho[len(rho) - 1] - rho[0]) + rho[0]
if fit:
return T, (rhofit / rho - 1) * 100
else:
return x, y
def simon():
global ip, irho
for fluid, values in Simon_curves.iteritems():
axp = figp.add_subplot(Nrow, Ncol, ip); ip += 1
axrho = figrho.add_subplot(Nrow, Ncol, irho); irho += 1
axp.set_xlabel('T [K]')
axp.set_ylabel('p [Pa]')
axrho.set_xlabel('T [K]')
axrho.set_ylabel('rho [mol/m$^3$]')
axp.set_title(fluid + ' - ' + str(round(CP.Props(fluid, "molemass"), 2)))
axrho.set_title(fluid)
fname = os.path.join('fluids', fluid + '.json')
j = json.load(open(fname, 'r'))
for part in values['parts']:
if 'T_min' not in part:
part['T_min'] = round(CP.Props(fluid, "Tmin"), 4)
values['type'] = 'Simon'
j['ANCILLARIES']['melting_line'] = values
fp = open(fname, 'w')
from package_json import json_options
fp.write(json.dumps(j, **json_options))
fp.close()
# if not isinstance(values, list):
# values = [values]
# df = pandas.read_csv('melting_curves/'+fluid+'.mlt',names=['T','p','rho'])
# axp.plot(df['T'], df['p'], 'o', mfc='none')
# x,y = plot_rho(df['T'],df['rho'],fit = True)
# axrho.plot(x,y, 'o', mfc='none')
# else:
# for i in ['I','II']:
# df = pandas.read_csv('melting_curves/'+fluid+'-'+i+'.mlt',names=['T','p','rho'])
# axp.plot(df['T'], df['p'], 'o', mfc='none')
# x,y = plot_rho(df['T'],df['rho'],fit = True)
# axrho.plot(x,y, 'o', mfc='none')
T_m = values['T_m']
for i, value in enumerate(values['parts']):
Tmin = value.get('T_min', CP.Props(fluid, "Tmin"))
Tmax = value['T_max']
T = np.linspace(Tmin, Tmax, 200)
T_0 = value['T_0']
p_0 = value['p_0']
a = value['a']
c = value['c']
p = p_0 + a * ((T / T_0)**c - 1)
axp.plot(T, p)
cc = 1.75
aa = 3e8 # (101325-p_0)/((T_m/T_0)**cc-1)
pt = CP.Props(fluid, 'ptriple')
pp = pt + aa * ((T / Tmin)**cc - 1)
axp.plot(T_m, 101325, '*')
axp.plot(T, pp, '--')
print("%s %s %s %s" % (fluid, CP.Props(fluid, "molemass"), CP.Props(fluid, 'accentric'), pp[-1] / p[-1] - 1))
# if fluid == 'Helium':
# T = np.array([326.2,345.1,362.8,385.1,419.4,459,499,535.7,570,608])
# p = p_0 + a*((T/T_0)**c - 1)
# print p
def Tr():
global ip, irho
for fluid, values in polynomial_in_Tr.iteritems():
axp = figp.add_subplot(Nrow, Ncol, ip); ip += 1
axrho = figrho.add_subplot(Nrow, Ncol, irho); irho += 1
axp.set_xlabel('T [K]')
axp.set_ylabel('p [Pa]')
axrho.set_xlabel('T [K]')
axrho.set_ylabel('rho [mol/m$^3$]')
axp.set_title(fluid + ' - ' + str(round(CP.Props(fluid, "molemass"), 2)))
axrho.set_title(fluid)
fname = os.path.join('fluids', fluid + '.json')
j = json.load(open(fname, 'r'))
for part in values['parts']:
if 'T_min' not in part:
part['T_min'] = round(CP.Props(fluid, "Tmin"), 4)
values['type'] = 'polynomial_in_Tr'
j['ANCILLARIES']['melting_line'] = values
fp = open(fname, 'w')
from package_json import json_options
fp.write(json.dumps(j, **json_options))
fp.close()
if fluid == 'Ethylene':
T = [104.003, 104.059, 104.13, 104.2, 104.27, 104.41, 104.55, 104.69, 104.83, 104.969, 105.108, 105.386, 106.077, 106.764, 107.446, 111.384, 119.283, 127.136, 158.146, 188.621]
p = np.array([0.1, 0.5, 1, 1.5, 2, 3, 4, 5, 6, 7, 8, 10, 15, 20, 25, 50, 75, 100, 200, 300]) * 1e6
axp.plot(T, p, '*')
# if not isinstance(values, list):
# values = [values]
# df = pandas.read_csv('melting_curves/'+fluid+'.mlt',names=['T','p','rho'])
# axp.plot(df['T'], df['p'], 'o', mfc='none')
# x,y = plot_rho(df['T'],df['rho'],fit = True)
# axrho.plot(x,y, 'o', mfc='none')
#
# else:
# for i in ['I','II']:
# df = pandas.read_csv('melting_curves/'+fluid+'-'+i+'.mlt',names=['T','p','rho'])
# axp.plot(df['T'], df['p'], 'o', mfc='none')
# x,y = plot_rho(df['T'],df['rho'],fit = True)
# axrho.plot(x,y, 'o', mfc='none')
T_m = values['T_m']
for i, value in enumerate(values['parts']):
Tmin = value.get('T_min', CP.Props(fluid, "Tmin"))
Tmax = value['T_max']
T = np.linspace(Tmin, Tmax, 200)
a = value['a']
t = value['t']
T_t = value['T_0']
p_t = value['p_0']
RHS = 0
for i in range(len(a)):
RHS += a[i] * ((T / T_t)**t[i] - 1)
p = p_t * (RHS + 1)
axp.plot(T, p)
cc = 1.75
aa = 3e8 # (101325-p_0)/((T_m/T_0)**cc-1)
pt = CP.Props(fluid, 'ptriple')
pp = pt + aa * ((T / Tmin)**cc - 1)
axp.plot(T_m, 101325, '*')
axp.plot(T, pp, '--')
print("%s %s %s %s" % (fluid, CP.Props(fluid, "molemass"), CP.Props(fluid, 'accentric'), pp[-1] / p[-1] - 1))
def theta():
global ip, irho
for fluid, values in polynomial_in_theta.iteritems():
axp = figp.add_subplot(Nrow, Ncol, ip); ip += 1
axrho = figrho.add_subplot(Nrow, Ncol, irho); irho += 1
axp.set_xlabel('T [K]')
axp.set_ylabel('p [Pa]')
axrho.set_xlabel('T [K]')
axrho.set_ylabel('rho [mol/m$^3$]')
axp.set_title(fluid + ' - ' + str(round(CP.Props(fluid, "molemass"), 2)))
axrho.set_title(fluid)
fname = os.path.join('fluids', fluid + '.json')
j = json.load(open(fname, 'r'))
for part in values['parts']:
if 'T_min' not in part:
part['T_min'] = round(CP.Props(fluid, "Tmin"), 4)
values['type'] = 'polynomial_in_Theta'
j['ANCILLARIES']['melting_line'] = values
fp = open(fname, 'w')
from package_json import json_options
fp.write(json.dumps(j, **json_options))
fp.close()
T_m = values['T_m']
for value in values['parts']:
| |
<reponame>DalavanCloud/tmppy<gh_stars>1-10
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import textwrap
from _py2tmp import ir3
import typed_ast.ast3 as ast
from typing import List, Tuple, Dict, Optional, Union, Callable
from _py2tmp.utils import ast_to_string
class Symbol:
def __init__(self, name: str, type: ir3.ExprType, is_function_that_may_throw: bool):
if is_function_that_may_throw:
assert isinstance(type, ir3.FunctionType)
self.type = type
self.name = name
self.is_function_that_may_throw = is_function_that_may_throw
class SymbolLookupResult:
def __init__(self, symbol: Symbol, ast_node: ast.AST, is_only_partially_defined: bool, symbol_table: 'SymbolTable'):
self.symbol = symbol
self.ast_node = ast_node
self.is_only_partially_defined = is_only_partially_defined
self.symbol_table = symbol_table
class SymbolTable:
def __init__(self, parent=None):
self.symbols_by_name = dict() # type: Dict[str, Tuple[Symbol, ast.AST, bool]]
self.parent = parent
def get_symbol_definition(self, name: str):
result = self.symbols_by_name.get(name)
if result:
symbol, ast_node, is_only_partially_defined = result
return SymbolLookupResult(symbol, ast_node, is_only_partially_defined, self)
if self.parent:
return self.parent.get_symbol_definition(name)
return None
def add_symbol(self,
name: str,
type: ir3.ExprType,
definition_ast_node: ast.AST,
is_only_partially_defined: bool,
is_function_that_may_throw: bool):
if is_function_that_may_throw:
assert isinstance(type, ir3.FunctionType)
self.symbols_by_name[name] = (Symbol(name, type, is_function_that_may_throw),
definition_ast_node,
is_only_partially_defined)
class CompilationContext:
def __init__(self,
symbol_table: SymbolTable,
custom_types_symbol_table: SymbolTable,
filename: str,
source_lines: List[str],
function_name: Optional[str] = None,
partially_typechecked_function_definitions_by_name: Dict[str, ast.FunctionDef] = None):
self.symbol_table = symbol_table
self.custom_types_symbol_table = custom_types_symbol_table
self.partially_typechecked_function_definitions_by_name = partially_typechecked_function_definitions_by_name or dict()
self.filename = filename
self.source_lines = source_lines
self.current_function_name = function_name
def create_child_context(self, function_name=None):
return CompilationContext(SymbolTable(parent=self.symbol_table),
self.custom_types_symbol_table,
self.filename,
self.source_lines,
function_name=function_name or self.current_function_name,
partially_typechecked_function_definitions_by_name=self.partially_typechecked_function_definitions_by_name)
def add_symbol(self,
name: str,
type: ir3.ExprType,
definition_ast_node: ast.AST,
is_only_partially_defined: bool,
is_function_that_may_throw: bool):
"""
Adds a symbol to the symbol table.
This throws an error (created by calling `create_already_defined_error(previous_type)`) if a symbol with the
same name and different type was already defined in this scope.
"""
if is_function_that_may_throw:
assert isinstance(type, ir3.FunctionType)
self._check_not_already_defined(name, definition_ast_node)
self.symbol_table.add_symbol(name=name,
type=type,
definition_ast_node=definition_ast_node,
is_only_partially_defined=is_only_partially_defined,
is_function_that_may_throw=is_function_that_may_throw)
def add_custom_type_symbol(self,
custom_type: ir3.CustomType,
definition_ast_node: ast.ClassDef):
self.add_symbol(name=custom_type.name,
type=ir3.FunctionType(argtypes=[arg.type for arg in custom_type.arg_types],
returns=custom_type),
definition_ast_node=definition_ast_node,
is_only_partially_defined=False,
is_function_that_may_throw=False)
self.custom_types_symbol_table.add_symbol(name=custom_type.name,
type=custom_type,
definition_ast_node=definition_ast_node,
is_only_partially_defined=False,
is_function_that_may_throw=False)
def add_symbol_for_function_with_unknown_return_type(self,
name: str,
definition_ast_node: ast.FunctionDef):
self._check_not_already_defined(name, definition_ast_node)
self.partially_typechecked_function_definitions_by_name[name] = definition_ast_node
def get_symbol_definition(self, name: str):
return self.symbol_table.get_symbol_definition(name)
def get_partial_function_definition(self, name: str):
return self.partially_typechecked_function_definitions_by_name.get(name)
def get_type_symbol_definition(self, name: str):
return self.custom_types_symbol_table.get_symbol_definition(name)
def set_function_type(self, name: str, type: ir3.FunctionType):
if name in self.partially_typechecked_function_definitions_by_name:
ast_node = self.partially_typechecked_function_definitions_by_name[name]
del self.partially_typechecked_function_definitions_by_name[name]
self.symbol_table.add_symbol(name=name,
type=type,
definition_ast_node=ast_node,
is_only_partially_defined=False,
is_function_that_may_throw=True)
else:
assert self.get_symbol_definition(name).symbol.type == type
def _check_not_already_defined(self, name: str, definition_ast_node: ast.AST):
symbol_lookup_result = self.symbol_table.get_symbol_definition(name)
if not symbol_lookup_result:
symbol_lookup_result = self.custom_types_symbol_table.get_symbol_definition(name)
if symbol_lookup_result:
is_only_partially_defined = symbol_lookup_result.is_only_partially_defined
previous_definition_ast_node = symbol_lookup_result.ast_node
elif name in self.partially_typechecked_function_definitions_by_name:
is_only_partially_defined = False
previous_definition_ast_node = self.partially_typechecked_function_definitions_by_name[name]
else:
is_only_partially_defined = None
previous_definition_ast_node = None
if previous_definition_ast_node:
if is_only_partially_defined:
raise CompilationError(self, definition_ast_node,
'%s could be already initialized at this point.' % name,
notes=[(previous_definition_ast_node, 'It might have been initialized here (depending on which branch is taken).')])
else:
raise CompilationError(self, definition_ast_node,
'%s was already defined in this scope.' % name,
notes=[(previous_definition_ast_node, 'The previous declaration was here.')])
class CompilationError(Exception):
def __init__(self, compilation_context: CompilationContext, ast_node: ast.AST, error_message: str, notes: List[Tuple[ast.AST, str]] = []):
error_message = CompilationError._diagnostic_to_string(compilation_context=compilation_context,
ast_node=ast_node,
message='error: ' + error_message)
notes = [CompilationError._diagnostic_to_string(compilation_context=compilation_context,
ast_node=note_ast_node,
message='note: ' + note_message)
for note_ast_node, note_message in notes]
super().__init__(''.join([error_message] + notes))
@staticmethod
def _diagnostic_to_string(compilation_context: CompilationContext, ast_node: ast.AST, message: str):
first_line_number = ast_node.lineno
first_column_number = ast_node.col_offset
error_marker = ' ' * first_column_number + '^'
return textwrap.dedent('''\
{filename}:{first_line_number}:{first_column_number}: {message}
{line}
{error_marker}
''').format(filename=compilation_context.filename,
first_line_number=first_line_number,
first_column_number=first_column_number,
message=message,
line=compilation_context.source_lines[first_line_number - 1],
error_marker=error_marker)
def module_ast_to_ir3(module_ast_node: ast.Module, filename: str, source_lines: List[str]):
compilation_context = CompilationContext(SymbolTable(),
SymbolTable(),
filename,
source_lines)
function_defns = []
toplevel_assertions = []
custom_types = []
# First pass: process everything except function bodies and toplevel assertions
for ast_node in module_ast_node.body:
if isinstance(ast_node, ast.FunctionDef):
function_name, arg_types, return_type = function_def_ast_to_symbol_info(ast_node, compilation_context)
if return_type:
compilation_context.add_symbol(
name=function_name,
type=ir3.FunctionType(argtypes=arg_types,
returns=return_type),
definition_ast_node=ast_node,
is_only_partially_defined=False,
is_function_that_may_throw=True)
else:
compilation_context.add_symbol_for_function_with_unknown_return_type(
name=function_name,
definition_ast_node=ast_node)
elif isinstance(ast_node, ast.ImportFrom):
supported_imports_by_module = {
'tmppy': ('Type', 'empty_list', 'empty_set', 'match'),
'typing': ('List', 'Set', 'Callable')
}
supported_imports = supported_imports_by_module.get(ast_node.module)
if not supported_imports:
raise CompilationError(compilation_context, ast_node,
'The only modules that can be imported in TMPPy are: ' + ', '.join(sorted(supported_imports_by_module.keys())))
if len(ast_node.names) == 0:
raise CompilationError(compilation_context, ast_node, 'Imports must import at least 1 symbol.') # pragma: no cover
for imported_name in ast_node.names:
if not isinstance(imported_name, ast.alias) or imported_name.asname:
raise CompilationError(compilation_context, ast_node, 'TMPPy only supports imports of the form "from some_module import some_symbol, some_other_symbol".')
if imported_name.name not in supported_imports:
raise CompilationError(compilation_context, ast_node, 'The only supported imports from %s are: %s.' % (ast_node.module, ', '.join(sorted(supported_imports))))
elif isinstance(ast_node, ast.Import):
raise CompilationError(compilation_context, ast_node,
'TMPPy only supports imports of the form "from some_module import some_symbol, some_other_symbol".')
elif isinstance(ast_node, ast.ClassDef):
custom_type = class_definition_ast_to_ir3(ast_node, compilation_context)
compilation_context.add_custom_type_symbol(custom_type=custom_type,
definition_ast_node=ast_node)
custom_types.append(custom_type)
elif isinstance(ast_node, ast.Assert):
# We'll process this in the 2nd pass (since we need to infer function return types first).
pass
else:
# raise CompilationError(compilation_context, ast_node, 'This Python construct is not supported in TMPPy:\n%s' % ast_to_string(ast_node))
raise CompilationError(compilation_context, ast_node, 'This Python construct is not supported in TMPPy')
# 2nd pass: process function bodies and toplevel assertions
for ast_node in module_ast_node.body:
if isinstance(ast_node, ast.FunctionDef):
new_function_defn = function_def_ast_to_ir3(ast_node, compilation_context)
function_defns.append(new_function_defn)
compilation_context.set_function_type(
name=ast_node.name,
type=ir3.FunctionType(returns=new_function_defn.return_type,
argtypes=[arg.type
for arg in new_function_defn.args]))
elif isinstance(ast_node, ast.Assert):
toplevel_assertions.append(assert_ast_to_ir3(ast_node, compilation_context))
public_names = set()
for function_defn in function_defns:
if not function_defn.name.startswith('_'):
public_names.add(function_defn.name)
return ir3.Module(function_defns=function_defns,
assertions=toplevel_assertions,
custom_types=custom_types,
public_names=public_names)
def match_expression_ast_to_ir3(ast_node: ast.Call, compilation_context: CompilationContext, in_match_pattern: bool, check_var_reference: Callable[[ast.Name], None]):
assert isinstance(ast_node.func, ast.Call)
if ast_node.keywords:
raise CompilationError(compilation_context, ast_node.keywords[0].value, 'Keyword arguments are not allowed in match()')
if ast_node.func.keywords:
raise CompilationError(compilation_context, ast_node.func.keywords[0].value, 'Keyword arguments are not allowed in match()')
if not ast_node.func.args:
raise CompilationError(compilation_context, ast_node.func, 'Found match() with no arguments; it must have at least 1 argument.')
matched_exprs = []
for expr_ast in ast_node.func.args:
expr = expression_ast_to_ir3(expr_ast, compilation_context, in_match_pattern, check_var_reference)
if expr.type != ir3.TypeType():
raise CompilationError(compilation_context, expr_ast,
'All arguments passed to match must have type Type, but an argument with type %s was specified.' % str(expr.type))
matched_exprs.append(expr)
if len(ast_node.args) != 1 or not isinstance(ast_node.args[0], ast.Lambda):
raise CompilationError(compilation_context, ast_node, 'Malformed match()')
[lambda_expr_ast] = ast_node.args
lambda_args = lambda_expr_ast.args
if lambda_args.vararg:
raise CompilationError(compilation_context, lambda_args.vararg,
'Malformed match(): vararg lambda arguments are not supported')
assert not lambda_args.kwonlyargs
assert not lambda_args.kw_defaults
assert not lambda_args.defaults
lambda_arg_ast_node_by_name = {arg.arg: arg
for arg in lambda_args.args}
lambda_arg_index_by_name = {arg.arg: i
for i, arg in enumerate(lambda_args.args)}
lambda_arg_names = {arg.arg for arg in lambda_args.args}
unused_lambda_arg_names = {arg.arg for arg in lambda_args.args}
lambda_body_compilation_context = compilation_context.create_child_context()
lambda_arguments = []
for arg in lambda_args.args:
lambda_arguments.append(arg.arg)
lambda_body_compilation_context.add_symbol(name=arg.arg,
type=ir3.TypeType(),
definition_ast_node=arg,
is_only_partially_defined=False,
is_function_that_may_throw=False)
if not isinstance(lambda_expr_ast.body, ast.Dict):
raise CompilationError(compilation_context, ast_node, 'Malformed match()')
dict_expr_ast = lambda_expr_ast.body
if not dict_expr_ast.keys:
raise CompilationError(compilation_context, dict_expr_ast,
'An empty mapping dict was passed to match(), but at least 1 mapping is required.')
parent_function_name = compilation_context.current_function_name
assert parent_function_name
main_definition = None
main_definition_key_expr_ast = None
last_result_expr_type = None
last_result_expr_ast_node = None
match_cases = []
for key_expr_ast, value_expr_ast in zip(dict_expr_ast.keys, dict_expr_ast.values):
if isinstance(key_expr_ast, ast.Tuple):
pattern_ast_nodes = key_expr_ast.elts
else:
pattern_ast_nodes = [key_expr_ast]
if len(pattern_ast_nodes) != len(matched_exprs):
raise CompilationError(lambda_body_compilation_context, key_expr_ast,
'%s type patterns were provided, while %s were expected' % (len(pattern_ast_nodes), len(matched_exprs)),
[(ast_node.func, 'The corresponding match() was here')])
pattern_exprs = []
for pattern_ast_node in pattern_ast_nodes:
pattern_expr = expression_ast_to_ir3(pattern_ast_node, lambda_body_compilation_context, in_match_pattern=True, check_var_reference=check_var_reference)
pattern_exprs.append(pattern_expr)
if pattern_expr.type != ir3.TypeType():
raise CompilationError(lambda_body_compilation_context, pattern_ast_node,
'Type patterns must have type Type but this pattern has type %s' % str(pattern_expr.type),
[(ast_node.func, 'The corresponding match() was here')])
lambda_args_used_in_pattern = {var.name
for pattern_expr in pattern_exprs
for var in pattern_expr.get_free_variables()
if var.name in lambda_arg_names}
for var in lambda_args_used_in_pattern:
unused_lambda_arg_names.discard(var)
def check_var_reference_in_result_expr(ast_node: ast.Name):
check_var_reference(ast_node)
if ast_node.id in lambda_arg_names and not ast_node.id in lambda_args_used_in_pattern:
raise CompilationError(lambda_body_compilation_context, ast_node,
'%s was used in the result of this match branch but not in any of its patterns' % ast_node.id)
result_expr = expression_ast_to_ir3(value_expr_ast, lambda_body_compilation_context,
in_match_pattern=in_match_pattern,
check_var_reference=check_var_reference_in_result_expr)
if last_result_expr_type and result_expr.type != last_result_expr_type:
raise CompilationError(lambda_body_compilation_context, value_expr_ast,
'All branches in a match() must return the same type, but this branch returns a %s '
'while a previous branch in this match expression returns a %s' % (
str(result_expr.type), str(last_result_expr_type)),
notes=[(last_result_expr_ast_node,
'A previous branch returning a %s was here.' % str(last_result_expr_type))])
last_result_expr_type = result_expr.type
| |
<filename>oops_fhir/r4/code_system/v3_hl7_standard_version_code.py
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["v3HL7StandardVersionCode"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class v3HL7StandardVersionCode:
"""
v3 Code System HL7StandardVersionCode
This code system holds version codes for the Version 3 standards.
Values are to be determined by HL7 and added with each new version of
the HL7 Standard.
Status: active - Version: 2018-08-12
Copyright None
http://terminology.hl7.org/CodeSystem/v3-HL7StandardVersionCode
"""
ballot2008_jan = CodeSystemConcept(
{
"code": "Ballot2008Jan",
"definition": "The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in January 2008.",
"display": "Ballot 2008 January",
}
)
"""
Ballot 2008 January
The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in January 2008.
"""
ballot2008_may = CodeSystemConcept(
{
"code": "Ballot2008May",
"definition": "The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in May 2008.",
"display": "Ballot 2008 May",
}
)
"""
Ballot 2008 May
The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in May 2008.
"""
ballot2008_sep = CodeSystemConcept(
{
"code": "Ballot2008Sep",
"definition": "The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in September 2008.",
"display": "Ballot 2008 September",
}
)
"""
Ballot 2008 September
The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in September 2008.
"""
ballot2009_jan = CodeSystemConcept(
{
"code": "Ballot2009Jan",
"definition": "The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in January 2009.",
"display": "Ballot 2009 January",
}
)
"""
Ballot 2009 January
The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in January 2009.
"""
ballot2009_may = CodeSystemConcept(
{
"code": "Ballot2009May",
"definition": "The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in May 2009.",
"display": "Ballot 2009 May",
}
)
"""
Ballot 2009 May
The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in May 2009.
"""
ballot2009_sep = CodeSystemConcept(
{
"code": "Ballot2009Sep",
"definition": "The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in September 2009.",
"display": "Ballot 2009 September",
}
)
"""
Ballot 2009 September
The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in September 2009.
"""
ballot2010_jan = CodeSystemConcept(
{
"code": "Ballot2010Jan",
"definition": "Definition: The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in January 2010.",
"display": "Ballot 2010 Jan",
}
)
"""
Ballot 2010 Jan
Definition: The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in January 2010.
"""
ballot2010_may = CodeSystemConcept(
{
"code": "Ballot2010May",
"definition": "Definition: The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in May 2010.",
"display": "Ballot 2010 May",
}
)
"""
Ballot 2010 May
Definition: The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in May 2010.
"""
ballot2010_sep = CodeSystemConcept(
{
"code": "Ballot2010Sep",
"definition": "Definition: The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in September 2010.",
"display": "Ballot 2010 Sep",
}
)
"""
Ballot 2010 Sep
Definition: The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in September 2010.
"""
ballot2011_jan = CodeSystemConcept(
{
"code": "Ballot2011Jan",
"definition": "Definition: The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in January 2011.",
"display": "Ballot 2011 Jan",
}
)
"""
Ballot 2011 Jan
Definition: The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in January 2011.
"""
ballot2011_may = CodeSystemConcept(
{
"code": "Ballot2011May",
"definition": "Definition: The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in May 2011.",
"display": "Ballot 2011 May",
}
)
"""
Ballot 2011 May
Definition: The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in May 2011.
"""
ballot2011_sep = CodeSystemConcept(
{
"code": "Ballot2011Sep",
"definition": "Definition: The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in September 2011.",
"display": "Ballot 2011 Sep",
}
)
"""
Ballot 2011 Sep
Definition: The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in September 2011.
"""
ballot2012_jan = CodeSystemConcept(
{
"code": "Ballot2012Jan",
"definition": "Definition: The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in January 2012.",
"display": "Ballot 2012 Jan",
}
)
"""
Ballot 2012 Jan
Definition: The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in January 2012.
"""
ballot2012_may = CodeSystemConcept(
{
"code": "Ballot2012May",
"definition": "Definition: The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in May 2012.",
"display": "Ballot 2012 May",
}
)
"""
Ballot 2012 May
Definition: The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in May 2012.
"""
ballot2012_sep = CodeSystemConcept(
{
"code": "Ballot2012Sep",
"definition": "Definition: The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in September 2012.",
"display": "Ballot 2012 Sep",
}
)
"""
Ballot 2012 Sep
Definition: The complete set of normative, DSTU, proposed (under ballot) and draft artifacts as published in the ballot whose ballot cycle ended in September 2012.
"""
v3_2003_12 = CodeSystemConcept(
{
"code": "V3-2003-12",
"definition": "The consistent set of messaging artefacts as published or contained in repositories in December of 2003, based on the latest version of any V3 models or artefacts (RIM, Datatypes, CMETS, Common Messages, Vocabularies) as available in December of 2003. Note: This versioncode does not cover the version of the XML ITS.",
"display": "HL7 Version V3-2003-12",
}
)
"""
HL7 Version V3-2003-12
The consistent set of messaging artefacts as published or contained in repositories in December of 2003, based on the latest version of any V3 models or artefacts (RIM, Datatypes, CMETS, Common Messages, Vocabularies) as available in December of 2003. Note: This versioncode does not cover the version of the XML ITS.
"""
v3_2005_n = CodeSystemConcept(
{
"code": "V3-2005N",
"definition": "Description:The consistent set of normative and DSTU messaging artifacts as published or contained in repositories in December of 2004, based on the latest version of any V3 models or artifacts (RIM, Datatypes, CMETS, Common Messages, Vocabularies) as published under the title of Normative Edition 2005. Note: This versioncode does not cover the version of the XML ITS.",
"display": "2005 Normative Edition",
}
)
"""
2005 Normative Edition
Description:The consistent set of normative and DSTU messaging artifacts as published or contained in repositories in December of 2004, based on the latest | |
import ast
from typing import NamedTuple
import yapypy.extended_python.extended_ast as ex_ast
from yapypy.extended_python.symbol_analyzer import SymTable, Tag
from yapypy.utils.namedlist import INamedList, as_namedlist, trait
from yapypy.utils.instrs import *
from Redy.Magic.Pattern import Pattern
from bytecode import *
from bytecode.concrete import FreeVar, CellVar, Compare
from bytecode.flags import CompilerFlags
class IndexedAnalyzedSymTable(NamedTuple):
bounds: list
freevars: list
cellvars: list
borrowed_cellvars: list
@classmethod
def from_raw(cls, tb):
return cls(*[list(each) for each in tb.analyzed])
class Context(INamedList, metaclass=trait(as_namedlist)):
bc: Bytecode
sym_tb: IndexedAnalyzedSymTable
parent: 'Context'
def update(self, bc=None, sym_tb=None, parent=None):
return Context(bc if bc is not None else self.bc,
sym_tb if sym_tb is not None else self.sym_tb,
parent if parent is not None else self.parent)
def enter_new(self, tag_table: SymTable):
sym_tb = IndexedAnalyzedSymTable.from_raw(tag_table)
bc = Bytecode()
bc.flags |= CompilerFlags.NEWLOCALS
if tag_table.depth > 1:
bc.flags |= CompilerFlags.NESTED
if not sym_tb.freevars:
bc.flags |= CompilerFlags.NOFREE
else:
bc.freevars.extend(sym_tb.freevars)
bc.cellvars.extend(sym_tb.cellvars)
return self.update(parent=self, bc=Bytecode(), sym_tb=sym_tb)
def load_name(self, name, lineno=None):
sym_tb = self.sym_tb
if name in sym_tb.cellvars:
self.bc.append(Instr('LOAD_DEREF', CellVar(name), lineno=lineno))
elif name in sym_tb.freevars:
self.bc.append(Instr('LOAD_DEREF', FreeVar(name), lineno=lineno))
elif name in sym_tb.bounds:
self.bc.append(Instr('LOAD_FAST', name, lineno=lineno))
else:
self.bc.append(Instr("LOAD_GLOBAL", name, lineno=lineno))
def del_name(self, name, lineno=None):
sym_tb = self.sym_tb
if name in sym_tb.cellvars:
self.bc.append(Instr('DELETE_DEREF', CellVar(name), lineno=lineno))
elif name in sym_tb.freevars:
self.bc.append(Instr('DELETE_DEREF', FreeVar(name), lineno=lineno))
elif name in sym_tb.bounds:
self.bc.append(Instr('DELETE_FAST', name, lineno=lineno))
else:
self.bc.append(Instr("DELETE_GLOBAL", name, lineno=lineno))
def store_name(self, name, lineno=None):
sym_tb = self.sym_tb
if name in sym_tb.cellvars:
self.bc.append(Instr('STORE_DEREF', CellVar(name), lineno=lineno))
elif name in sym_tb.freevars:
self.bc.append(Instr('STORE_DEREF', FreeVar(name), lineno=lineno))
elif name in sym_tb.bounds:
self.bc.append(Instr('STORE_FAST', name, lineno=lineno))
else:
self.bc.append(Instr("STORE_GLOBAL", name, lineno=lineno))
def load_closure(self, lineno=None):
parent = self.parent
freevars = self.sym_tb.freevars
if freevars:
for each in self.sym_tb.freevars:
if each in parent.sym_tb.cellvars:
parent.bc.append(
Instr('LOAD_CLOSURE', CellVar(each), lineno=lineno))
elif each in parent.sym_tb.borrowed_cellvars:
parent.bc.append(
Instr('LOAD_CLOSURE', FreeVar(each), lineno=lineno))
else:
raise RuntimeError
parent.bc.append(Instr('BUILD_TUPLE', len(freevars)))
def py_compile(node: Tag):
ctx = Context(Bytecode(), IndexedAnalyzedSymTable.from_raw(node.tag), None)
py_emit(node.it, ctx)
return ctx.bc.to_code()
@Pattern
def py_emit(node: ast.AST, ctx: Context):
return type(node)
@py_emit.case(Tag)
def py_emit(node: Tag, ctx: Context):
ctx = ctx.enter_new(node.tag)
py_emit(node.it, ctx)
@py_emit.case(ast.Module)
def py_emit(node: ast.Module, ctx: Context):
for each in node.body:
py_emit(each, ctx)
ctx.bc.append(Instr('LOAD_CONST', None))
ctx.bc.append(Instr('RETURN_VALUE'))
@py_emit.case(ast.Str)
def py_emit(node: ast.Str, ctx: Context):
ctx.bc.append(LOAD_CONST(node.s, lineno=node.lineno))
@py_emit.case(ast.JoinedStr)
def py_emit(node: ast.JoinedStr, ctx: Context):
for each in node.values:
py_emit(each, ctx)
ctx.bc.append(BUILD_STRING(len(node.values), lineno=node.lineno))
@py_emit.case(ast.FormattedValue)
def py_emit(node: ast.FormattedValue, ctx: Context):
conversion = node.conversion
format_spec = node.format_spec
value = node.value
maps = {
97: 3, # ascii
114: 2, # repr
115: 1, # str
-1: 0, # None
}
py_emit(value, ctx)
flags = maps[conversion]
if format_spec:
py_emit(format_spec, ctx)
flags += 4
ctx.bc.append(Instr("FORMAT_VALUE", flags))
@py_emit.case(ast.Tuple)
def py_emit(node: ast.Tuple, ctx: Context):
is_lhs = isinstance(node.ctx, ast.Store)
if any(isinstance(each, ast.Starred) for each in node.elts):
raise NotImplemented
if is_lhs:
UNPACK_SEQUENCE(len(node.elts), lineno=node.lineno)
for each in node.elts:
py_emit(each, ctx)
else:
for each in node.elts:
py_emit(each, ctx)
BUILD_TUPLE(len(node.elts), lineno=node.lineno)
@py_emit.case(ast.FunctionDef)
def py_emit(node: ast.FunctionDef, new_ctx: Context):
"""
https://docs.python.org/3/library/dis.html#opcode-MAKE_FUNCTION
MAKE_FUNCTION flags:
0x01 a tuple of default values for positional-only and positional-or-keyword parameters in positional order
0x02 a dictionary of keyword-only parameters’ default values
0x04 an annotation dictionary
0x08 a tuple containing cells for free variables, making a closure
the code associated with the function (at TOS1)
the qualified name of the function (at TOS)
"""
parent_ctx: Context = new_ctx.parent
for each in node.body:
py_emit(each, new_ctx)
args = node.args
new_ctx.bc.argcount = len(args.args)
new_ctx.bc.kwonlyargcount = len(args.kwonlyargs)
make_function_flags = 0
if new_ctx.sym_tb.freevars:
make_function_flags |= 0x08
if args.defaults:
make_function_flags |= 0x01
if args.kw_defaults:
make_function_flags |= 0x02
annotations = []
argnames = []
for arg in args.args:
argnames.append(arg.arg)
if arg.annotation:
annotations.append((arg.arg, arg.annotation))
for arg in args.kwonlyargs:
argnames.append(arg.arg)
if arg.annotation:
annotations.append((arg.arg, arg.annotation))
arg = args.vararg
if arg:
new_ctx.bc.flags |= CompilerFlags.VARARGS
argnames.append(arg.arg)
if arg.annotation:
annotations.append((arg.arg, arg.annotation))
arg = args.kwarg
if arg:
new_ctx.bc.flags |= CompilerFlags.VARKEYWORDS
argnames.append(arg.arg)
if arg.annotation:
annotations.append((arg.arg, arg.annotation))
if any(annotations):
make_function_flags |= 0x04
new_ctx.bc.argnames.extend(argnames)
if make_function_flags & 0x01:
for each in args.defaults:
py_emit(each, parent_ctx)
parent_ctx.bc.append(
Instr('BUILD_TUPLE', len(args.defaults), lineno=node.lineno))
if make_function_flags & 0x02:
for each in args.kw_defaults:
py_emit(each, parent_ctx)
parent_ctx.bc.append(
Instr('BUILD_TUPLE', len(args.kw_defaults), lineno=node.lineno))
if make_function_flags & 0x04:
keys, annotation_values = zip(*annotations)
parent_ctx.bc.append(
Instr('LOAD_CONST', tuple(keys), lineno=node.lineno))
for each in annotation_values:
py_emit(each, parent_ctx)
parent_ctx.bc.append(
Instr("BUILD_TUPLE", len(annotation_values), lineno=node.lineno))
if make_function_flags & 0x08:
new_ctx.load_closure(lineno=node.lineno)
new_ctx.bc.append(Instr('LOAD_CONST', None))
new_ctx.bc.append(Instr('RETURN_VALUE'))
print(new_ctx.bc)
inner_code = new_ctx.bc.to_code()
parent_ctx.bc.append(Instr('LOAD_CONST', inner_code, lineno=node.lineno))
### when it comes to nested, the name is not generated correctly now.
parent_ctx.bc.append(Instr('LOAD_CONST', node.name, lineno=node.lineno))
parent_ctx.bc.append(
Instr("MAKE_FUNCTION", make_function_flags, lineno=node.lineno))
parent_ctx.store_name(node.name, lineno=node.lineno)
@py_emit.case(ex_ast.ExDict)
def py_emit(node: ast.Dict, ctx: Context):
keys = node.keys
values = node.values
if any(each for each in keys if each is None):
raise NotImplemented
for key, value in zip(keys, values):
py_emit(key, ctx)
py_emit(value, ctx)
ctx.bc.append(Instr('BUILD_MAP', len(keys), lineno=node.lineno))
@py_emit.case(ast.Assign)
def py_emit(node: ast.Assign, ctx: Context):
targets = node.targets
value = node.value
n = len(targets)
py_emit(value, ctx)
for _ in range(n - 1):
ctx.bc.append(DUP_TOP(lineno=node.lineno))
for each in targets:
py_emit(each, ctx)
@py_emit.case(ast.Name)
def py_emit(node: ast.Name, ctx: Context):
{
ast.Load: ctx.load_name,
ast.Store: ctx.store_name,
ast.Del: ctx.del_name
}[type(node.ctx)](
node.id, lineno=node.lineno)
@py_emit.case(ast.Expr)
def py_emit(node: ast.Expr, ctx: Context):
py_emit(node.value, ctx)
ctx.bc.append(POP_TOP(lineno=node.lineno))
@py_emit.case(ast.Call)
def py_emit(node: ast.Call, ctx: Context):
py_emit(node.func, ctx)
has_star = False
has_key = False
has_star_star = False
for each in node.args:
if isinstance(each, ast.Starred):
has_star = True
break
for each in node.keywords:
if each.arg:
has_key = True
break
for each in node.keywords:
if each.arg is None:
has_star_star = True
break
# positional arguments
if has_star or has_star_star:
arg_count = 0
arg_tuple_count = 0
for each in node.args:
if not isinstance(each, ast.Starred):
py_emit(each, ctx)
arg_count += 1
else:
if arg_count:
ctx.bc.append(
Instr("BUILD_TUPLE", arg_count, lineno=node.lineno))
arg_tuple_count += 1
arg_count = 0
py_emit(each.value, ctx)
arg_tuple_count += 1
if arg_count:
ctx.bc.append(Instr("BUILD_TUPLE", arg_count, lineno=node.lineno))
arg_tuple_count += 1
if arg_tuple_count > 1:
ctx.bc.append(
Instr(
"BUILD_TUPLE_UNPACK_WITH_CALL",
arg_tuple_count,
lineno=node.lineno))
elif arg_tuple_count == 1:
pass
elif arg_tuple_count == 0:
ctx.bc.append(Instr("BUILD_TUPLE", 0, lineno=node.lineno))
else:
for each in node.args:
py_emit(each, ctx)
# keyword arguments
if has_star or has_star_star:
karg_pack_count = 0
keys = []
values = []
karg_count = 0
# use dummy node handle trailing keyword arguments
dummy_node = ast.keyword(arg=None)
node.keywords.append(dummy_node)
for each in node.keywords:
if each.arg:
keys.append(each.arg)
values.append(each.value)
karg_count += 1
else:
if karg_count:
karg_pack_count += 1
if karg_count > 1:
for value in values:
py_emit(value, ctx)
ctx.bc.append(
Instr(
"LOAD_CONST", tuple(keys), lineno=node.lineno))
ctx.bc.append(
Instr(
"BUILD_CONST_KEY_MAP",
karg_count,
lineno=node.lineno))
elif karg_count == 1:
ctx.bc.append(
Instr("LOAD_CONST", keys[0], lineno=node.lineno))
py_emit(values[0], ctx)
ctx.bc.append(
Instr("BUILD_MAP", 1, lineno=node.lineno))
keys = []
values = []
karg_count = 0
if each is dummy_node:
break
py_emit(each.value, ctx)
karg_pack_count += 1
node.keywords.pop(-1) # pop dummy node
if karg_pack_count > 1:
ctx.bc.append(
Instr(
"BUILD_MAP_UNPACK_WITH_CALL",
karg_pack_count,
lineno=node.lineno))
else:
keys = []
for each in node.keywords:
py_emit(each.value, ctx)
keys.append(each.arg)
if keys:
ctx.bc.append(Instr("LOAD_CONST", tuple(keys), lineno=node.lineno))
if has_star or has_star_star:
ctx.bc.append(
Instr(
"CALL_FUNCTION_EX",
has_star_star | has_key,
lineno=node.lineno))
elif has_key:
ctx.bc.append(
Instr(
"CALL_FUNCTION_KW",
len(node.args) + len(node.keywords),
lineno=node.lineno))
else:
ctx.bc.append(
Instr('CALL_FUNCTION', len(node.args), lineno=node.lineno))
@py_emit.case(ast.YieldFrom)
def py_emit(node: ast.YieldFrom, ctx: Context):
append = ctx.bc.append
py_emit(node.value, ctx)
append(Instr('GET_YIELD_FROM_ITER', lineno=node.lineno))
append(Instr('LOAD_CONST', None, lineno=node.lineno))
append(Instr("YIELD_FROM", lineno=node.lineno))
@py_emit.case(ast.Attribute)
def py_emit(node: ast.Attribute, ctx: Context):
py_emit(node.value, ctx)
ctx.bc.append({
ast.Store: STORE_ATTR,
ast.Load: LOAD_ATTR,
ast.Del: DELETE_ATTR
}[type(node.ctx)](node.attr, lineno=node.lineno))
@py_emit.case(ast.Yield)
def py_emit(node: ast.Yield, ctx: Context):
py_emit(node.value, ctx)
ctx.bc.append(Instr('YIELD_VALUE', lineno=node.lineno))
@py_emit.case(ast.Return)
def py_emit(node: ast.Return, ctx: Context):
py_emit(node.value, ctx)
ctx.bc.append(Instr('RETURN_VALUE', lineno=node.lineno))
@py_emit.case(ast.Pass)
def py_emit(node: ast.Pass, ctx: Context):
pass
@py_emit.case(ast.Nonlocal)
def py_emit(_1, _2):
pass
@py_emit.case(ast.Global)
def py_emit(_1, _2):
pass
@py_emit.case(ast.UnaryOp)
def py_emit(node: ast.UnaryOp, ctx: Context):
py_emit(node.value, ctx)
inst = {
ast.Not: "UNARY_NOT",
ast.USub: "UNARY_NEGATIVE",
ast.UAdd: "UNARY_POSITIVE",
ast.Invert: "UNARY_INVERT"
}.get(type(node.op))
if inst:
ctx.bc.append(Instr(inst, lineno=node.lineno))
else:
raise TypeError
@py_emit.case(ast.BinOp)
def py_emit(node: ast.BinOp, ctx: Context):
py_emit(node.left, ctx)
py_emit(node.right, ctx)
inst = {
ast.Add: "BINARY_ADD",
ast.BitAnd: "BINARY_AND",
ast.Sub: "BINARY_SUBTRACT",
ast.Div: "BINARY_TRUE_DIVIDE",
ast.FloorDiv: "BINARY_FLOOR_DIVIDE",
ast.LShift: "BINARY_LSHIFT",
ast.RShift: "BINARY_RSHIFT",
ast.MatMult: "BINARY_MATRIX_MULTIPLY",
ast.Pow: "BINARY_POWER",
ast.BitOr: "BINARY_OR",
ast.BitXor: "BINARY_XOR",
ast.Mult: "BINARY_MULTIPLY",
ast.Mod: "BINARY_MODULO"
}.get(type(node.op))
if inst:
ctx.bc.append(Instr(inst, lineno=node.lineno))
else:
raise TypeError
@py_emit.case(ast.BoolOp)
def py_emit(node: ast.BoolOp, ctx: Context):
inst = {
ast.And: "JUMP_IF_FALSE_OR_POP",
ast.Or: "JUMP_IF_TRUE_OR_POP"
}.get(type(node.op))
if inst:
label = Label()
for expr in node.values[:-1]:
py_emit(expr, ctx)
ctx.bc.append(Instr(inst, label, lineno=node.lineno))
py_emit(node.values[-1], ctx)
ctx.bc.append(label)
else:
raise TypeError
@py_emit.case(ast.Num)
def py_emit(node: ast.Num, ctx: Context):
ctx.bc.append(Instr("LOAD_CONST", node.n, lineno=node.lineno))
@py_emit.case(ast.Import)
def py_emit(node: ast.Import, ctx: Context):
for name in node.names:
ctx.bc.append(
Instr("LOAD_CONST", 0,
lineno=node.lineno)) # TOS1 for level, default to zero
ctx.bc.append(Instr("LOAD_CONST", None,
lineno=node.lineno)) # TOS for fromlist()
ctx.bc.append(Instr("IMPORT_NAME", name.name, lineno=node.lineno))
as_name = name.name or name.asname
ctx.store_name(as_name, lineno=node.lineno)
@py_emit.case(ast.ImportFrom)
def py_emit(node: ast.ImportFrom, ctx: Context):
lineno = node.lineno
ctx.bc.append(Instr("LOAD_CONST", node.level, lineno=lineno))
names = tuple(name.name for name in node.names)
ctx.bc.append(LOAD_CONST(names, lineno=lineno))
ctx.bc.append(Instr("IMPORT_NAME", node.module, lineno=lineno))
if names == ('*', ):
ctx.bc.append(Instr('IMPORT_STAR', lineno=lineno))
else:
for name in node.names:
ctx.bc.append(Instr("IMPORT_FROM", name.name, lineno=lineno))
as_name = name.name or name.asname
ctx.store_name(as_name, lineno=lineno)
ctx.bc.append(POP_TOP(lineno=lineno))
@py_emit.case(ast.ListComp)
def py_emit(node: ast.ListComp, ctx: Context):
loop_start = Label()
loop_done = Label()
loop_exit = Label()
append = ctx.bc.append
append(Instr("BUILD_LIST", lineno=node.lineno))
append(Instr("LOAD_FAST", '.0', lineno=node.lineno))
append(loop_start)
append(Instr("FOR_ITER", loop_done))
py_emit(node.generators[0].target)
py_emit(node.generators[0].iter)
| |
D V
# + + + - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# accumulator ROR A 6A 1 2
# zeropage ROR oper 66 2 5
# zeropage,X ROR oper,X 76 2 6
# absolute ROR oper 6E 3 6
# absolute,X ROR oper,X 7E 3 7
address = None
cycles = None
if (op_code == 0x6A): # accumulator
carryOut = True if (self._a&0x01 > 0) else False
self._a = ((self._a>>1) + (0x80 if (self._carry) else 0))&0xFF
self._carry = carryOut
self._negative = self._a&0x80 > 0
self._zero = self._a == 0
cycles = 2
return
elif (op_code == 0x66): # zeropage
address = self._get_address_at_zeropage()
cycles = 5
elif (op_code == 0x76): # zeropage,X
address = self._get_address_at_zeropage_x()
cycles = 6
elif (op_code == 0x6E): # absolute
address = self._get_address_at_absolute()
cycles = 6
elif (op_code == 0x7E): # absolute,X
address = self._get_address_at_absolute_x()
cycles = 7
else:
raise RuntimeError(f"Unknown op code: {op_code}")
value = self._system.mmu.read_byte(address)
carryOut = True if (value&0x01 > 0) else False
value = ((value>>1) + (0x80 if (self._carry) else 0))&0xFF
self._carry = carryOut
self._system.mmu.write_byte(address, value)
self._negative = value&0x80 > 0
self._zero = value == 0
self._system.consume_cycles(cycles)
def RTI(self, op_code):
# Return from Interrupt
# pull SR, pull PC N Z C I D V
# from stack
# addressing assembler opc bytes cyles
# --------------------------------------------
# implied RTI 40 1 6
self._set_status_flag(self.pull())
pc_lo = self.pull()
pc_hi = self.pull()
self._pc = ((pc_hi<<8) + pc_lo)&0xFFFF
cycles = 6
def RTS(self, op_code):
# Return from Subroutine
# pull PC, PC+1 -> PC N Z C I D V
# - - - - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# implied RTS 60 1 6
pc_lo = self.pull()
pc_hi = self.pull()
self._pc = ((pc_hi<<8) + pc_lo + 1)&0xFFFF
cycles = 6
def SBC(self, op_code):
# Subtract Memory from Accumulator with Borrow
# A - M - C -> A N Z C I D V
# + + + - - +
# addressing assembler opc bytes cyles
# --------------------------------------------
# immediate SBC #oper E9 2 2
# zeropage SBC oper E5 2 3
# zeropage,X SBC oper,X F5 2 4
# absolute SBC oper ED 3 4
# absolute,X SBC oper,X FD 3 4*
# absolute,Y SBC oper,Y F9 3 4*
# (indirect,X) SBC (oper,X) E1 2 6
# (indirect),Y SBC (oper),Y F1 2 5*
value = None
cycles = None
if (op_code == 0xE9): # immediate
value = self._get_next_byte()
cycles = 2
elif (op_code == 0xE5): # zeropage
value = self._get_value_at_zeropage()
cycles = 3
elif (op_code == 0xF5): # zeropage,X
value = self._get_value_at_zeropage_x()
cycles = 4
elif (op_code == 0xED): # absolute
value = self._get_value_at_absolute()
cycles = 4
elif (op_code == 0xFD): # absolute,X
value = self._get_value_at_absolute_x()
cycles = 4
elif (op_code == 0xF9): # absolute,Y
value = self._get_value_at_absolute_y()
cycles = 4
elif (op_code == 0xE1): # (indirect,X)
value = self._get_value_at_indirect_x()
cycles = 6
elif (op_code == 0xF1): # (indirect),Y
value = self._get_value_at_indirect_y()
cycles = 5
else:
raise RuntimeError(f"Unknown op code: {op_code}")
# Invert value and run through same logic as ADC.
value ^= 0xFF
result = self._a + value + (1 if self._carry == True else 0)
self._carry = result > 0xFF
# More info on source: https://stackoverflow.com/a/29224684
self._overflow = ~(self._a ^ value) & (self._a ^ result) & 0x80
self._a = result&0xFF
self._negative = self._a&0x80 > 1
self._zero = self._a == 0
self._system.consume_cycles(cycles)
def SEC(self, op_code):
# Set Carry Flag
# 1 -> C N Z C I D V
# - - 1 - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# implied SEC 38 1 2
self._carry = True
cycles = 2
def SED(self, op_code):
# Set Decimal Flag
# 1 -> D N Z C I D V
# - - - - 1 -
# addressing assembler opc bytes cyles
# --------------------------------------------
# implied SED F8 1 2
self._decimal_mode = True
cycles = 2
def SEI(self, op_code):
# Set Interrupt Disable Status
# 1 -> I N Z C I D V
# - - - 1 - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# implied SEI 78 1 2
self._interrupt_disable = True
cycles = 2
def STA(self, op_code):
# Store Accumulator in Memory
# A -> M N Z C I D V
# - - - - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# zeropage STA oper 85 2 3
# zeropage,X STA oper,X 95 2 4
# absolute STA oper 8D 3 4
# absolute,X STA oper,X 9D 3 5
# absolute,Y STA oper,Y 99 3 5
# (indirect,X) STA (oper,X) 81 2 6
# (indirect),Y STA (oper),Y 91 2 6
address = None
cycles = None
if (op_code == 0x85): # zeropage
address = self._get_address_at_zeropage()
cycles = 3
elif (op_code == 0x95): # zeropage,X
address = self._get_address_at_zeropage_x()
cycles = 4
elif (op_code == 0x8D): # absolute
address = self._get_address_at_absolute()
cycles = 4
elif (op_code == 0x9D): # absolute,X
address = self._get_address_at_absolute_x()
cycles = 5
elif (op_code == 0x99): # absolute,Y
address = self._get_address_at_absolute_y()
cycles = 5
elif (op_code == 0x81): # (indirect,X)
address = self._get_address_at_indirect_x()
cycles = 6
elif (op_code == 0x91): # (indirect),Y
address = self._get_address_at_indirect_y()
cycles = 6
else:
raise RuntimeError(f"Unknown op code: {op_code}")
self._system.mmu.write_byte(address, self._a)
self._system.consume_cycles(cycles)
def STX(self, op_code):
# Store Index X in Memory
# X -> M N Z C I D V
# - - - - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# zeropage STX oper 86 2 3
# zeropage,Y STX oper,Y 96 2 4
# absolute STX oper 8E 3 4
address = None
cycles = None
if (op_code == 0x86): # zeropage
address = self._get_address_at_zeropage()
cycles = 3
elif (op_code == 0x96): # zeropage,Y
address = self._get_address_at_zeropage_y()
cycles = 4
elif (op_code == 0x8E): # absolute
address = self._get_address_at_absolute()
cycles = 4
else:
raise RuntimeError(f"Unknown op code: {op_code}")
self._system.mmu.write_byte(address, self._x)
self._system.consume_cycles(cycles)
def STY(self, op_code):
# Sore Index Y in Memory
# Y -> M N Z C I D V
# - - - - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# zeropage STY oper 84 2 3
# zeropage,X STY oper,X 94 2 4
# absolute STY oper 8C 3 4
address = None
cycles = None
if (op_code == 0x84): # zeropage
address = self._get_address_at_zeropage()
cycles = 3
elif (op_code == 0x94): # zeropage,X
address = self._get_address_at_zeropage_x()
cycles = 4
elif (op_code == 0x8C): # absolute
address = self._get_address_at_absolute()
cycles = 4
else:
raise RuntimeError(f"Unknown op code: {op_code}")
self._system.mmu.write_byte(address, self._y)
self._system.consume_cycles(cycles)
def TAX(self, op_code):
# Transfer Accumulator to Index X
# A -> X N Z C I D V
# + + - - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# implied TAX AA 1 2
self._x = self._a
self._negative = (self._x>>7) > 0
self._zero = self._x == 0
cycles = 2
def TAY(self, op_code):
# Transfer Accumulator to Index Y
# A -> Y N Z C I D V
# + + - - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# implied TAY A8 1 2
self._y = self._a
self._negative = (self._y>>7) > 0
self._zero = self._y == 0
cycles = 2
def TSX(self, op_code):
# Transfer Stack Pointer to Index X
# SP -> X N Z C I D V
# + + - - - -
# addressing assembler opc bytes cyles
# --------------------------------------------
# implied TSX BA 1 2
self._x = self._sp
self._negative = (self._x>>7) > 0
self._zero = self._x == 0
cycles = 2
def TXA(self, op_code):
# Transfer Index X to Accumulator
# X -> A N Z C I D V
# | |
<reponame>pgriewank/ASR_tools
#todo
#make timesteps adaptive
#read in dz
import numpy as np
import math
from netCDF4 import Dataset
import os
from datetime import datetime,timedelta
#from unionfind import Unionfind
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
from matplotlib.pyplot import cm
from cusize_functions import *
import time as ttiimmee
import sys
import pickle
#from cdo import *
import pandas as pd
directory_clustering = '/data/inscape/phil/lasso_clustering/'
directory_clustering = '/data/testbed/lasso/clustering/'
directory_data = '/data/testbed/lasso/sims/' #+date
directory_data = '/data/inscape/lasso/sims/' #+date
dates = ['20160611_micro']
dates = ['20160611']
dates = ['20160830']
dates = ['bomex']
filename_w = []
filename_ql = []
filename_qt = []
filename_clus =[]
filename_couv =[]
for date in dates:
filename_w.append(directory_data+date+'/w.nc')
filename_ql.append(directory_data+date+'/ql.nc')
filename_qt.append(directory_data+date+'/qt.nc')
#filename_clus.append(directory_data+date+'/couvreux_clustering_20sig_filt_v2.pkl')
filename_clus.append(directory_clustering+'couvreux_'+date+'_clustering_20sig_filt_v2.pkl')
#filename_couv.append(directory_clustering+'couv_prop_sig2_filt_'+date+'.pkl')
filename_couv.append(directory_clustering+'couv_prop_sig2_filt_'+date+'.pkl')
#col_names = ['Volume','sq Area','Radius','V_h','w','w profile','ql profile','qv profile','Area profile','Area','height','cl h','cl A','cl V','dry w','dry h','dry A','dry V','dry w','time','x','y','z','base','z max cf','w_ref','w_ref_vec','w_below_vec','w_below','w_bottom']
#We are getting rid of the bottom/below things because they are tricky and maybe no necessary
col_names = ['Volume','sq Area','Radius','V_h','w', 'w profile','ql profile','qv profile','Area profile','Area','height', 'wet h','wet A','wet V','wet w', 'dry w','dry h','dry A','dry V' ,'time','x','y','z','base','z max cf','w flux','qt flux','qt total flux','qt fluc','w90 profile']
dx = 25
dy = 25
dz = 25
dz = 23.4375
dA = dx*dy
dV = dx*dy*dz
ql_min = 1e-6 #cutoff for cloudy
n_z,n_x,n_y = get_zxy_dimension(filename_w[0],'w')
#got to make timesteps automated
timesteps = 70
#Time loop that loads all the cluss identified via clustering and calculates their properties
for d in range(len(dates)):
#loading w
file_w = Dataset(filename_w[d],read='r')
file_ql = Dataset(filename_ql[d],read='r')
file_qt = Dataset(filename_qt[d],read='r')
seconds_since_start = file_w.variables['time'][:]
try:
time_init = datetime.strptime(dates[d][0:7]+'0600','%Y%m%d%H%M')
except:
time_init = datetime.strptime('2020'+'01'+'01'+'0600','%Y%m%d%H%M')
#loading clustering from file
time1 = ttiimmee.time()
with open(filename_clus[d],'rb') as f: # Python 3: open(..., 'rb')
print('reading in clustering results via pickle in '+filename_clus[d])
cluster_cell_list_time, idx_3d_cluster_cells_time = pickle.load(f,encoding='latin1')
time2 = ttiimmee.time()
print(' reading in the clustering took so many seconds:',(time2-time1))
if timesteps > len(cluster_cell_list_time):
timesteps = len(cluster_cell_list_time)
print(' limiting timesteps to len(cluster_cell_list_time):',timesteps)
#and calculating varous sizes as well as mean vertical velocity
couv_A_all = np.zeros(0)
couv_w_all = np.zeros(0)
couv_V_all = np.zeros(0)
couv_h_all = np.zeros(0)
couv_base_all = np.zeros(0)
couv_max_cf_all = np.zeros(0)
couv_prof_w_all = np.zeros([0,n_z])
couv_prof_w90_all = np.zeros([0,n_z])
couv_prof_A_all = np.zeros([0,n_z])
couv_prof_qv_all = np.zeros([0,n_z])
couv_prof_ql_all = np.zeros([0,n_z])
couv_prof_cf_all = np.zeros([0,n_z])
#fluxes
couv_prof_flux_w_all = np.zeros([0,n_z]) #Poor mans mass flux
couv_prof_flux_qt_all = np.zeros([0,n_z])
couv_prof_total_flux_qt_all = np.zeros([0,n_z])
#fluctuations
couv_prof_fluc_qt_all = np.zeros([0,n_z])
couv_t_all = []
couv_x_max_all = []
couv_y_max_all = []
couv_z_max_all = []
couv_wet_w_all = np.zeros(0)
couv_wet_V_all = np.zeros(0)
couv_wet_A_all = np.zeros(0)
couv_wet_h_all = np.zeros(0)
couv_dry_w_all = np.zeros(0)
couv_dry_V_all = np.zeros(0)
couv_dry_A_all = np.zeros(0)
couv_dry_h_all = np.zeros(0)
couv_wet_prof_A_all = np.zeros([0,n_z])
couv_dry_prof_A_all = np.zeros([0,n_z])
#couv_w_below_all = np.zeros(0)
#couv_w_bottom_all = np.zeros(0)
#couv_w_ref_all = np.zeros(0)
#couv_w_ref_vec_all = []
#couv_w_below_vec_all = []
for t in range(timesteps):
#for t in range(20,21):
ncluss = len(cluster_cell_list_time[t])
time_now = time_init + timedelta(seconds=float(seconds_since_start[t]))
print('timestep and ncluss ',t,ncluss)
print('datetime ',t,time_now)
if ncluss>0:
time1 = ttiimmee.time()
#load data
w = grab_3d_field(file_w,t,'w')
ql = grab_3d_field(file_ql,t,'ql')
qt = grab_3d_field(file_qt,t,'qt')
qv = qt-ql
qt_mean_prof = np.mean(qt,axis=1)
qt_mean_prof = np.mean(qt_mean_prof,axis=1)
w_qt_fluc = qt*0.0
for n in range(n_z):
w_qt_fluc[n,:,:]=w[n,:,:]*(qt[n,:,:]-qt_mean_prof[n])
w_qt = w*qt
couv_w = np.zeros(ncluss)
couv_V = np.zeros(ncluss)
couv_A = np.zeros(ncluss)
couv_h = np.zeros(ncluss)
couv_max_cf = np.zeros(ncluss)
couv_base = np.zeros(ncluss)
couv_prof_w = np.zeros([ncluss,n_z])
couv_prof_w90 = np.zeros([ncluss,n_z])
couv_prof_A = np.zeros([ncluss,n_z])
couv_prof_ql = np.zeros([ncluss,n_z])
couv_prof_qv = np.zeros([ncluss,n_z])
couv_prof_cf = np.zeros([ncluss,n_z])
couv_prof_flux_w = np.zeros([ncluss,n_z])
couv_prof_flux_qt = np.zeros([ncluss,n_z])
couv_prof_total_flux_qt = np.zeros([ncluss,n_z])
couv_prof_fluc_qt = np.zeros([ncluss,n_z])
couv_t = []
couv_x_max = []
couv_y_max = []
couv_z_max = []
couv_wet_w = np.zeros(ncluss)
couv_wet_V = np.zeros(ncluss)
couv_wet_A = np.zeros(ncluss)
couv_wet_h = np.zeros(ncluss)
couv_dry_w = np.zeros(ncluss)
couv_dry_V = np.zeros(ncluss)
couv_dry_A = np.zeros(ncluss)
couv_dry_h = np.zeros(ncluss)
couv_wet_prof_A = np.zeros([ncluss,n_z])
couv_dry_prof_A = np.zeros([ncluss,n_z])
#setting to nan incase they don't exist
couv_wet_w[:] = 'nan'
couv_wet_V[:] = 'nan'
couv_wet_A[:] = 'nan'
couv_wet_h[:] = 'nan'
couv_dry_w[:] = 'nan'
couv_dry_V[:] = 'nan'
couv_dry_A[:] = 'nan'
couv_dry_h[:] = 'nan'
#couv_w_ref_vec = []
#couv_w_below_vec = []
#couv_w_below = np.zeros(ncluss) #imitate Lareau, get the layer beneath the clus everywhere within 300 m of the CBL
#couv_w_bottom = np.zeros(ncluss) #get the lowest clus layer everywhere within 300 m of the CBL
#couv_w_ref = np.zeros(ncluss)
#calculate the z lvl of maximum amount of clus fraction at that timestep
idx_z = idx_3d_cluster_cells_time[t][0]
z_max_cf = np.argmax(np.bincount(idx_z))
print('z_max_cf :',z_max_cf)
# #Calculating cbl height + 300m using a critical value for the horizontal variability of w following Lareau
# w_var = 1.0
# z_var=0
# while w_var > 0.08:
# z_var += 1
# w_var = np.var(w[z_var,:])
# cbl_idx = z_var
# cbl_idx_max = cbl_idx+int(300/dz)
# print('cbl height + 300 m index :',cbl_idx_max)
for i in range(ncluss):
idx_z = idx_3d_cluster_cells_time[t][0][cluster_cell_list_time[t][i]]
idx_x = idx_3d_cluster_cells_time[t][1][cluster_cell_list_time[t][i]]
idx_y = idx_3d_cluster_cells_time[t][2][cluster_cell_list_time[t][i]]
couv_w[i] = np.mean(w[idx_z,idx_x,idx_y])
couv_V[i] = (float(len(cluster_cell_list_time[t][i]))*dV)**(1./3.)
couv_A[i] = func_proj_A(idx_x,idx_y,dA)
couv_h[i] = (max(idx_z)-min(idx_z)+1)*dz
couv_max_cf[i] = z_max_cf
couv_prof_w[i,:],tmp =func_vert_mean(idx_z,idx_x,idx_y,w)
couv_prof_w90[i,:],tmp =func_vert_percentile(idx_z,idx_x,idx_y,w,90)
couv_prof_A[i,:] = tmp*dA
couv_prof_qv[i,:],tmp =func_vert_mean(idx_z,idx_x,idx_y,qv)
couv_prof_ql[i,:],tmp =func_vert_mean(idx_z,idx_x,idx_y,ql)
#getting the fluxes
couv_prof_flux_w[i,:],tmp =func_vert_mean(idx_z,idx_x,idx_y,w)
couv_prof_flux_w[i,:] = couv_prof_flux_w[i,:]*couv_prof_A[i,:]
#couv_prof_flux_qt[i,:],tmp =func_vert_mean(idx_z,idx_x,idx_y,w*qt)
#lets see if it is cheaper to calculate this once
couv_prof_total_flux_qt[i,:],tmp =func_vert_mean(idx_z,idx_x,idx_y,w_qt)
couv_prof_total_flux_qt[i,:] = couv_prof_total_flux_qt[i,:]*couv_prof_A[i,:]
couv_prof_flux_qt[i,:],tmp =func_vert_mean(idx_z,idx_x,idx_y,w_qt_fluc)
couv_prof_flux_qt[i,:] = couv_prof_flux_qt[i,:]*couv_prof_A[i,:]
couv_prof_fluc_qt[i,:] = couv_prof_qv[i,:]+couv_prof_ql[i,:]-qt_mean_prof
# #Speed stuff which is currently not used
# couv_w_ref[i] = couv_prof_w[i,z_max_cf]
# ############################################################################################
# #Here we determine the cells which are 1 cell below the clus and no higher than cbl_idx_max
# #Using this we calculate the mean value of w
# #As well as the 95th percentile
# #And for a reason I forgot I also pass along all w values
# ############################################################################################
# idx_xy = np.vstack([idx_x,idx_y])
# idx_xy_unique = np.unique(idx_xy,axis=1)
# idx_xy_z_below = np.zeros(idx_xy_unique.shape[1],dtype=np.uint8)
# for ii in range(idx_xy_unique.shape[1]):
# #Searches for where the x and y values of the unique value
# idx_z_xy = idx_z[np.where((idx_xy_unique[0,ii]==idx_xy[0])*(idx_xy_unique[1,ii]==idx_xy[1]))[0]]
# #z_min = min(idx_z_xy)-1
# idx_xy_z_below[ii] = min(idx_z_xy)-1
# #print(z_min)
# w_below_vec = w[idx_xy_z_below,idx_xy_unique[0,:],idx_xy_unique[1,:]].ravel()
# w_below_vex_cbl300 = w_below_vec[idx_xy_z_below<cbl_idx_max]
# if w_below_vex_cbl300.size:
# couv_w_below_vec.append(w_below_vec)
# couv_w_below[i] = np.mean(w_below_vec)
# w_below_up = w_below_vex_cbl300[ w_below_vex_cbl300>0.1]
# if w_below_up.size:
# couv_w_below_95_up[i] = np.percentile(w_below_up,95)
# else:
# couv_w_below_95_up[i] = 'nan'
# else:
# couv_w_below_vec.append([])
# couv_w_below[i] = 'nan'
# #Getting the bottom clus layer
# w_bottom_vec = w[idx_xy_z_below+1,idx_xy_unique[0,:],idx_xy_unique[1,:]].ravel()
# w_bottom_vec_cbl300 = w_bottom_vec[idx_xy_z_below+1<cbl_idx_max]
# if w_bottom_vec_cbl300.size:
# couv_w_bottom[i] = np.mean(w_bottom_vec)
# else:
# couv_w_bottom[i] = 'nan'
# #print('idx_xy_z_below',idx_xy_z_below)
# #print('idx_z',idx_z)
# #print('w_bottom_vec',w_bottom_vec)
# #print('w',w[idx_z,idx_x,idx_y])
# #print('wtf clus bottom vs clus w',couv_w_bottom[i],couv_w[i])
# #if i>100:
# # []+absd
# ############################################################################################
# #getting the 95th percentile of all cells in the z_ref level
# #As well as the mean w at that level
# #And for good measure I also just pass along the whole vector of w
# ############################################################################################
# ind_z_ref = np.where(idx_z==z_max_cf)[0]
# w_ref_lvl = w[idx_z[ind_z_ref],idx_x[ind_z_ref],idx_y[ind_z_ref]].ravel()
# w_ref_up = w_ref_lvl[w_ref_lvl>0.1]
# if w_ref_lvl.size:
# couv_w_ref_95[i] = np.percentile(w_ref_lvl,95)
# couv_w_ref_vec.append(w_ref_lvl)
# else:
# couv_w_ref_95[i] = 'nan'
# couv_w_ref_vec.append([])
# if w_ref_up.size:
# couv_w_ref_95_up[i] = np.percentile(w_ref_up,95)
# couv_w_ref_up_n[i] = len(w_ref_up)
# else:
# couv_w_ref_95_up[i] = 'nan'
# couv_w_ref_up_n[i] = 0
# if np.isnan(np.nanmax(tmp)):
# print('wtf')
# print(tmp)
couv_base[i]=np.min(idx_z)*dz
couv_t.append(time_now)
couv_x_max.append(np.argmax(np.bincount(idx_x)))
couv_y_max.append(np.argmax(np.bincount(idx_y)))
couv_z_max.append(np.argmax(np.bincount(idx_z)))
#Now the new part, separating the cluster into dry and wet
ql_tmp = ql[idx_z,idx_x,idx_y]
idx_z_dry = idx_z[ql_tmp<ql_min]
idx_x_dry = idx_x[ql_tmp<ql_min]
idx_y_dry = idx_y[ql_tmp<ql_min]
if idx_z_dry.size>0:
couv_dry_w[i] = np.mean(w[idx_z_dry,idx_x_dry,idx_y_dry])
couv_dry_V[i] = (float(len(idx_z_dry))*dV)**(1./3.)
couv_dry_A[i] = func_proj_A(idx_x_dry,idx_y_dry,dA)
couv_dry_h[i] = (max(idx_z_dry)-min(idx_z_dry)+1)*dz
idx_z_wet = idx_z[ql_tmp>=ql_min]
idx_x_wet = idx_x[ql_tmp>=ql_min]
idx_y_wet = idx_y[ql_tmp>=ql_min]
if idx_z_wet.size>0:
#print('idx_z_wet',idx_z_wet)
#print('ql_tmp',ql_tmp)
#print('ql[idx_z_wet,idx_x_wet,idx_y_wet]',ql[idx_z_wet,idx_x_wet,idx_y_wet])
#print((float(len(idx_z_wet))*dV)**(1./3.),'(float(len(idx_z_wet))*dV)**(1./3.)')
#print((float(len(idx_z))*dV)**(1./3.),'(float(len(idx_z))*dV)**(1./3.)')
#print((float(len(idx_z))*dV)**(1./3.)-(float(len(idx_z_wet))*dV)**(1./3.))
couv_wet_w[i] = np.mean(w[idx_z_wet,idx_x_wet,idx_y_wet])
couv_wet_V[i] = (float(len(idx_z_wet))*dV)**(1./3.)
couv_wet_A[i] = func_proj_A(idx_x_wet,idx_y_wet,dA)
couv_wet_h[i] = (max(idx_z_wet)-min(idx_z_wet)+1)*dz
if couv_wet_V[i]>couv_V[i]:
print('wtf is happening',couv_wet_V[i],couv_V[i])
#I try to get the cf by calculating the Area profiles of both dry and wet and the ratio
#actually, it might be best to calculate that anyway
#I have a feeling this will throw up some weird shit when
tmp1,tmpwet =func_vert_mean(idx_z_wet,idx_x_wet,idx_y_wet,w)
couv_wet_prof_A[i,:] = tmpwet*dA
tmp1,tmpdry =func_vert_mean(idx_z_dry,idx_x_dry,idx_y_dry,w)
couv_dry_prof_A[i,:] = tmpdry*dA
couv_prof_cf[i,:] = tmpwet/(tmpdry+tmpwet)
else:
couv_prof_cf[i,:] = couv_prof_A[i,:]*0.0
couv_V_all = np.hstack([couv_V_all,couv_V])
couv_w_all = np.hstack([couv_w_all,couv_w])
couv_A_all = np.hstack([couv_A_all,couv_A])
couv_h_all = np.hstack([couv_h_all,couv_h])
# couv_w_ref_all = np.hstack([couv_w_ref_all,couv_w_ref])
# couv_w_below_all = np.hstack([couv_w_below_all,couv_w_below])
# couv_w_bottom_all = np.hstack([couv_w_bottom_all,couv_w_bottom])
# couv_w_ref_95_all = np.hstack([couv_w_ref_95_all,couv_w_ref_95])
# couv_w_ref_95_up_all = np.hstack([couv_w_ref_95_up_all,couv_w_ref_95_up])
# couv_w_below_95_up_all = np.hstack([couv_w_below_95_up_all,couv_w_below_95_up])
# couv_w_ref_up_n_all = np.hstack([couv_w_ref_up_n_all,couv_w_ref_up_n])
# couv_w_ref_vec_all.extend(couv_w_ref_vec)
# couv_w_below_vec_all.extend(couv_w_below_vec)
couv_max_cf_all = np.hstack([couv_max_cf_all,couv_max_cf])
couv_base_all = np.hstack([couv_base_all,couv_base])
couv_t_all.extend(couv_t)
couv_x_max_all.extend(couv_x_max)
couv_y_max_all.extend(couv_y_max)
couv_z_max_all.extend(couv_z_max)
couv_wet_V_all = np.hstack([couv_wet_V_all,couv_wet_V])
couv_wet_w_all = np.hstack([couv_wet_w_all,couv_wet_w])
couv_wet_A_all = np.hstack([couv_wet_A_all,couv_wet_A])
couv_wet_h_all = np.hstack([couv_wet_h_all,couv_wet_h])
couv_dry_V_all = np.hstack([couv_dry_V_all,couv_dry_V])
couv_dry_w_all = np.hstack([couv_dry_w_all,couv_dry_w])
couv_dry_A_all = np.hstack([couv_dry_A_all,couv_dry_A])
| |
= DlpServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_deidentify_template(
dlp.GetDeidentifyTemplateRequest(), name="name_value",
)
def test_list_deidentify_templates(
transport: str = "grpc", request_type=dlp.ListDeidentifyTemplatesRequest
):
client = DlpServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.list_deidentify_templates), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = dlp.ListDeidentifyTemplatesResponse(
next_page_token="next_page_token_value",
)
response = client.list_deidentify_templates(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dlp.ListDeidentifyTemplatesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDeidentifyTemplatesPager)
assert response.next_page_token == "next_page_token_value"
def test_list_deidentify_templates_from_dict():
test_list_deidentify_templates(request_type=dict)
@pytest.mark.asyncio
async def test_list_deidentify_templates_async(transport: str = "grpc_asyncio"):
client = DlpServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = dlp.ListDeidentifyTemplatesRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_deidentify_templates), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dlp.ListDeidentifyTemplatesResponse(
next_page_token="next_<PASSWORD>",
)
)
response = await client.list_deidentify_templates(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDeidentifyTemplatesAsyncPager)
assert response.next_page_token == "next_page_token_value"
def test_list_deidentify_templates_field_headers():
client = DlpServiceClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dlp.ListDeidentifyTemplatesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.list_deidentify_templates), "__call__"
) as call:
call.return_value = dlp.ListDeidentifyTemplatesResponse()
client.list_deidentify_templates(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_deidentify_templates_field_headers_async():
client = DlpServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = dlp.ListDeidentifyTemplatesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_deidentify_templates), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dlp.ListDeidentifyTemplatesResponse()
)
await client.list_deidentify_templates(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_deidentify_templates_flattened():
client = DlpServiceClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.list_deidentify_templates), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = dlp.ListDeidentifyTemplatesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_deidentify_templates(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_deidentify_templates_flattened_error():
client = DlpServiceClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_deidentify_templates(
dlp.ListDeidentifyTemplatesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_deidentify_templates_flattened_async():
client = DlpServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_deidentify_templates), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = dlp.ListDeidentifyTemplatesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dlp.ListDeidentifyTemplatesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_deidentify_templates(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_deidentify_templates_flattened_error_async():
client = DlpServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_deidentify_templates(
dlp.ListDeidentifyTemplatesRequest(), parent="parent_value",
)
def test_list_deidentify_templates_pager():
client = DlpServiceClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.list_deidentify_templates), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
dlp.ListDeidentifyTemplatesResponse(
deidentify_templates=[
dlp.DeidentifyTemplate(),
dlp.DeidentifyTemplate(),
dlp.DeidentifyTemplate(),
],
next_page_token="abc",
),
dlp.ListDeidentifyTemplatesResponse(
deidentify_templates=[], next_page_token="def",
),
dlp.ListDeidentifyTemplatesResponse(
deidentify_templates=[dlp.DeidentifyTemplate(),], next_page_token="ghi",
),
dlp.ListDeidentifyTemplatesResponse(
deidentify_templates=[
dlp.DeidentifyTemplate(),
dlp.DeidentifyTemplate(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_deidentify_templates(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, dlp.DeidentifyTemplate) for i in results)
def test_list_deidentify_templates_pages():
client = DlpServiceClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.list_deidentify_templates), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
dlp.ListDeidentifyTemplatesResponse(
deidentify_templates=[
dlp.DeidentifyTemplate(),
dlp.DeidentifyTemplate(),
dlp.DeidentifyTemplate(),
],
next_page_token="abc",
),
dlp.ListDeidentifyTemplatesResponse(
deidentify_templates=[], next_page_token="def",
),
dlp.ListDeidentifyTemplatesResponse(
deidentify_templates=[dlp.DeidentifyTemplate(),], next_page_token="ghi",
),
dlp.ListDeidentifyTemplatesResponse(
deidentify_templates=[
dlp.DeidentifyTemplate(),
dlp.DeidentifyTemplate(),
],
),
RuntimeError,
)
pages = list(client.list_deidentify_templates(request={}).pages)
for page, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_deidentify_templates_async_pager():
client = DlpServiceAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_deidentify_templates),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
dlp.ListDeidentifyTemplatesResponse(
deidentify_templates=[
dlp.DeidentifyTemplate(),
dlp.DeidentifyTemplate(),
dlp.DeidentifyTemplate(),
],
next_page_token="abc",
),
dlp.ListDeidentifyTemplatesResponse(
deidentify_templates=[], next_page_token="def",
),
dlp.ListDeidentifyTemplatesResponse(
deidentify_templates=[dlp.DeidentifyTemplate(),], next_page_token="ghi",
),
dlp.ListDeidentifyTemplatesResponse(
deidentify_templates=[
dlp.DeidentifyTemplate(),
dlp.DeidentifyTemplate(),
],
),
RuntimeError,
)
async_pager = await client.list_deidentify_templates(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, dlp.DeidentifyTemplate) for i in responses)
@pytest.mark.asyncio
async def test_list_deidentify_templates_async_pages():
client = DlpServiceAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.list_deidentify_templates),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
dlp.ListDeidentifyTemplatesResponse(
deidentify_templates=[
dlp.DeidentifyTemplate(),
dlp.DeidentifyTemplate(),
dlp.DeidentifyTemplate(),
],
next_page_token="abc",
),
dlp.ListDeidentifyTemplatesResponse(
deidentify_templates=[], next_page_token="def",
),
dlp.ListDeidentifyTemplatesResponse(
deidentify_templates=[dlp.DeidentifyTemplate(),], next_page_token="ghi",
),
dlp.ListDeidentifyTemplatesResponse(
deidentify_templates=[
dlp.DeidentifyTemplate(),
dlp.DeidentifyTemplate(),
],
),
RuntimeError,
)
pages = []
async for page in (await client.list_deidentify_templates(request={})).pages:
pages.append(page)
for page, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page.raw_page.next_page_token == token
def test_delete_deidentify_template(
transport: str = "grpc", request_type=dlp.DeleteDeidentifyTemplateRequest
):
client = DlpServiceClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._transport.delete_deidentify_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_deidentify_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == dlp.DeleteDeidentifyTemplateRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_deidentify_template_from_dict():
test_delete_deidentify_template(request_type=dict)
@pytest.mark.asyncio
async def test_delete_deidentify_template_async(transport: str = "grpc_asyncio"):
client = DlpServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = dlp.DeleteDeidentifyTemplateRequest()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client._client._transport.delete_deidentify_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_deidentify_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the response is the type that we | |
<reponame>rthartley/reacombiner
import subprocess
from pathlib import PurePath
from time import strptime, mktime
import rpp
from ReaCombiner import db, file_utils
from ReaCombiner import rppFile
from ReaCombiner.data import *
sg.theme('LightGreen')
sg.SetOptions(element_padding=(5, 5))
bg_color = "cadetblue"
window: sg.Window = None
window0: sg.Window = None
# ------ Menu Definition ------ #
menu_def = [['File', ['Add Project', 'Delete Project', 'Print Project', 'Scrape Folder', 'Exit']],
['Run', ['Run Reaper']],
['Help', 'About...']]
projectTableHeadings = [sg.Button('Project'), sg.Button('Mix'), sg.Button('Last Modified')]
projectTable = sg.Table([], auto_size_columns=False,
col_widths=[20, 12, 20], justification="left",
key='PROJECTS', num_rows=15, enable_events=True,
headings=[b.get_text() for b in projectTableHeadings])
# noinspection SpellCheckingInspection
projectSorts = [[sg.Text('Sort by', size=(8, 1), justification='r'),
sg.Radio('Name', 'sort', default=True, size=(6, 1), key='-SORT-NAME-', enable_events=True),
sg.Radio('Mix', 'sort', size=(6, 1), key='-SORT-MIX-', enable_events=True),
sg.Radio('Date', 'sort', size=(6, 1), key='-SORT-DATE-', enable_events=True),
sg.Radio('A-Z', 'upordown', size=(4, 1), default=True, key='-SORT-UP-', enable_events=True),
sg.Radio('Z-A', 'upordown', size=(4, 1), default=False, key='-SORT-DOWN-', enable_events=True)]]
projectTexts = [
[sg.Text('Location', justification='r', pad=((0, 10), (15, 5))),
sg.Text(size=(45, 2), key='location', auto_size_text=True, background_color=bg_color, pad=((0, 0), (15, 5)))],
[sg.Text('Tempo', justification='r', pad=((0, 10), (0, 5))),
sg.Text(size=(20, 1), key='tempo', background_color=bg_color, pad=((0, 0), (0, 5)))],
[sg.Text('Record Path', justification='r', pad=((0, 10), (0, 0))),
sg.Text(size=(45, 2), key='record_path', auto_size_text=True, background_color=bg_color, pad=((0, 0), (0, 5)))],
[sg.Text('Sample Rate', justification='r', pad=((0, 10), (0, 0))),
sg.Text(size=(10, 1), key='sample_rate', background_color=bg_color, pad=((0, 0), (0, 5)))],
[sg.Frame("Project Notes",
[[sg.Text(size=(40, 10),
key='project_notes', auto_size_text=True, background_color=bg_color)]], pad=((0, 0), (15, 15)),
)]]
projectTableLayout = [[projectTable]] + projectSorts + projectTexts
trackTableHeadings = ['Num', 'Track Name']
trackTable = sg.Table([], max_col_width=15, auto_size_columns=False, num_rows=15,
col_widths=[4, 25], justification="left",
key='TRACKS', enable_events=True, headings=trackTableHeadings)
trackTexts = [[sg.Text('Main Send', justification='r', pad=((0, 10), (15, 5))),
sg.Text(size=(5, 1), key='main_send', background_color=bg_color, pad=((0, 0), (15, 5)))],
[sg.Text('Vol', justification='r', pad=((0, 10), (0, 5))),
sg.Text(size=(10, 1), key='vol', background_color=bg_color, pad=((0, 0), (0, 5)))],
[sg.Text('Pan', justification='r', pad=((0, 10), (0, 5))),
sg.Text(size=(10, 1), key='pan', background_color=bg_color, pad=((0, 0), (0, 5)))],
[sg.Text('Aux Recvs', justification='r', pad=((0, 10), (0, 5))),
sg.Text(size=(25, 5), key='aux_recvs', background_color=bg_color, pad=((0, 0), (0, 5)))],
[sg.Frame("Track Notes", [[sg.Text(size=(40, 10), key='track_notes', background_color=bg_color)]],
pad=((0, 0), (15, 15)))]]
trackTableLayout = [[trackTable]] + trackTexts
itemTableHeadings = ['Item Name', 'Source', 'Position']
itemTable = sg.Table([], auto_size_columns=False, num_rows=15, key='ITEMS', enable_events=True,
col_widths=[25, 6, 20], justification="left", headings=itemTableHeadings)
pluginTableHeadings = ['Name', 'File', 'Preset']
pluginTable = sg.Table([], max_col_width=15, auto_size_columns=False, num_rows=15,
col_widths=[24, 12, 12], key='PLUGINS', headings=pluginTableHeadings,
justification="left")
pluginTableLayout = [[pluginTable]]
itemTexts = [
[sg.Text('File', justification='r', pad=((0, 10), (15, 0))),
sg.Text(size=(40, 2), auto_size_text=True, background_color=bg_color, key='file', pad=((0, 0), (15, 0)))],
[sg.Frame('Plugins', [[pluginTable]], pad=(10, 10))]]
itemTableLayout = [[itemTable]] + itemTexts
# ------ GUI Definition ------ #
layout = [
[sg.Menu(menu_def, )],
[
sg.Frame('Projects', projectTableLayout, pad=((0, 10), (0,)), vertical_alignment='top'),
sg.Frame('Tracks', trackTableLayout, vertical_alignment='top', pad=((0, 10), (0, 0))),
sg.Frame('Items', itemTableLayout, vertical_alignment='top')],
]
def projectNameUpper(project):
return project.name.upper()
def projectMixUpper(project):
return project.mix.upper()
def projectEpochSecs(project):
return epochSecs(project.date)
def sortProjectsBy(values: dict):
if values['-SORT-NAME-']:
sortBy = projectNameUpper
elif values['-SORT-MIX-']:
sortBy = projectMixUpper
else:
sortBy = projectEpochSecs
# allProjects.sortProjects(sortBy)
return sortBy
def findTrackNotes(project: Project):
exts = project.find('EXTENSIONS')
if exts:
notes = exts.findall('S&M_TRACKNOTES')
if notes:
ret = {}
for n in range(0, len(notes)):
txt = "\n".join([notes[n][t][1:] for t in range(0, len(notes[n]))])
ret[notes[n].attrib[0]] = txt
return ret
return None
# noinspection SpellCheckingInspection
def addNewProject(fname, sortBy=projectNameUpper, reverse=False):
projectFile = rppFile.openFile(fname)
if projectFile is None:
return
# rppFile.printStruct(projectFile)
dtls = rppFile.getFileDetails(fname)
dtls.append(projectFile.find('TEMPO')[1])
dtls.append(projectFile.find('RECORD_PATH')[1])
dtls.append(projectFile.find('SAMPLERATE')[1])
notes = projectFile.find('NOTES')
if notes is not None:
txt = "\n".join([notes[n][1:] for n in range(0, len(notes))])
else:
txt = ""
dtls.append(txt)
newProject = Project(allProjects.maxProjectNum + 1, dtls[0], dtls[1], dtls[2], dtls[3], dtls[4], dtls[5],
dtls[6],
dtls[7])
rslt = allProjects.findProject(newProject)
if rslt == "_EXISTS_":
file_utils.errorMsg('Project already loaded: ' + dtls[0] + '/' + dtls[1])
return
elif rslt:
actualDeleteOldProject(rslt)
pnum = db.addProject(dtls)
allProjects.addProject(newProject)
allProjects.sortProjects(sortBy, reverse)
updateProjectTable(allProjects.getProjects())
tracks = projectFile.findall('TRACK')
# print('Project has %d tracks' % len(tracks))
clearTables()
tnotes = findTrackNotes(projectFile)
for n in range(0, len(tracks)):
track = tracks[n]
tnum = n + 1
trackDtls = [track.find('NAME')[1], track.find('MAINSEND')[1], track.find('VOLPAN')[1],
track.find('VOLPAN')[2],
",".join([lst[1] for lst in track.findall('AUXRECV')])]
try:
txt = tnotes[track.attrib[0]]
except KeyError:
txt = ""
trackDtls.append(txt)
newTrack = Track(tnum, trackDtls[0], trackDtls[1], trackDtls[2], trackDtls[3], trackDtls[4], trackDtls[5])
newProject.addTrack(newTrack)
db.addTrack([pnum, tnum] + trackDtls)
items = track.findall('ITEM')
for inum in range(0, len(items)):
item = items[inum]
src = item.find('SOURCE')
itemDtls = [item.find('NAME')[1], src.attrib[0], item.find('POSITION')[1]]
fl = src.find('FILE')
if fl is not None:
itemDtls.append(fl[1])
else:
itemDtls.append('')
newItem = Item(inum, itemDtls[0], itemDtls[1], itemDtls[2], itemDtls[3])
newTrack.addItem(newItem)
db.addItem([pnum, tnum, inum] + itemDtls)
fxchain = track.find('FXCHAIN')
if fxchain is not None:
vst = None
items = fxchain.find('.')
for inum in range(0, len(items)):
item = items[inum]
if isinstance(item, list) and item[0] == 'PRESETNAME' and vst is not None:
preset = item[1]
# print('PRESETNAME=' + item[1] + ' goes with VST ' + str(vst))
pluginDtls = [vst[0], vst[1], preset]
newPlugin = Plugin(inum, pluginDtls[0], pluginDtls[1], pluginDtls[2])
newTrack.addPlugin(newPlugin)
db.addPlugin([pnum, tnum, inum] + pluginDtls)
vst = None
elif isinstance(item, rpp.element.Element):
if item.tag in pluginTypes:
if vst is not None:
pluginDtls = [vst[0], vst[1], '']
newPlugin = Plugin(inum, pluginDtls[0], pluginDtls[1], pluginDtls[2])
newTrack.addPlugin(newPlugin)
db.addPlugin([pnum, tnum, inum] + pluginDtls)
if item.tag == 'JS':
vst = ['JS:' + item.attrib[0]] + item.attrib[1:2]
else:
vst = item.attrib[0:2]
# print('VST=' + str(vst))
if vst is not None:
pluginDtls = [vst[0], vst[1], '']
newPlugin = Plugin(len(items), pluginDtls[0], pluginDtls[1], pluginDtls[2])
newTrack.addPlugin(newPlugin)
db.addPlugin([pnum, tnum, len(items)] + pluginDtls)
def actualDeleteOldProject(project: Project):
pnum = project.projectNum
db.deleteProject(pnum)
allProjects.deleteProject(pnum)
clearTables()
projectTable.update([[project.name, project.mix, project.date] for project in allProjects.getProjects()])
def deleteOldProject(project: Project):
if 'OK' == sg.popup_ok_cancel(" Are you absolutely sure? - there is no undo"):
actualDeleteOldProject(project)
def createMyWindow():
global window0
window0 = sg.Window("ReaCombiner", layout, default_element_size=(12, 1),
auto_size_text=False, auto_size_buttons=False,
default_button_element_size=(12, 1),
finalize=True, resizable=True)
window0.Hide()
return window0
def chunk(lst, upto):
ret = ""
while len(",".join(lst)) > upto:
n = 0
while len(",".join(lst[0:n])) < upto:
n += 1
ret += ",".join(lst[0:n])
ret += '\n'
lst = lst[n:]
ret += ",".join(lst)
return ret
def chunkStr(str, upto):
ret = ""
while len(str) > upto:
ret += str[:upto] + '\n'
str = str[upto:]
ret += str
return ret
# noinspection SpellCheckingInspection
def clearTables():
window0.find_element('main_send').update('')
window0.find_element('vol').update('')
window0.find_element('pan').update('')
window0.find_element('aux_recvs').update('')
window0.find('file').update('')
newShowTracks(trackTable)
newShowItems(itemTable)
newShowPlugins(pluginTable)
def epochSecs(dt: str):
t = strptime(dt)
return mktime(t)
def updateProjectTable(sps: list):
projectTable.update([p.getProjectDetails() for p in sps])
clearTables()
def close():
global window, window0
if window0 is not None:
window0.close()
window = None
window0 = None
# noinspection SpellCheckingInspection
def showMyWindow(projects: Projects):
allProjects.updateProjects(projects.getProjects())
updateProjectTable(allProjects.getProjects())
window = window0
window.UnHide()
# ------ Loop & Process button menu choices ------ #
while True:
event, values = window.read()
if event == sg.WIN_CLOSED or event == 'Exit':
break
# ------ Process menu choices ------ #
if event == 'About...':
with open("../docs/README.txt", "r", encoding="utf-8") as input_file:
layout = [[sg.Multiline(input_file.read(), size=(80, 25), autoscroll=True, key='_OUTPUT_')],
[sg.Button('OK', key='_OK_')]]
window1 = sg.Window('Help').Layout(layout)
window.Hide()
window = window1
window1.Read(timeout=0)
elif event == '_OK_':
window.Hide()
window = window0
window0.UnHide()
elif event == 'Run Reaper':
# print(event)
if len(values['PROJECTS']) > 0:
row = values['PROJECTS'][0]
project = allProjects.getProject(row)
path = PurePath(project.location, project.name, project.mix + '.rpp')
env = os.environ.copy()
subprocess.Popen(['reaper', str(path)], env=env)
else:
file_utils.errorMsg('First select a project to run')
elif event == 'Add Project':
fname = file_utils.browseFile()
addNewProject(fname[0], sortProjectsBy(values), values['-SORT-DOWN-'])
elif event == 'Delete Project':
# print(event)
if len(values['PROJECTS']) > 0:
row = values['PROJECTS'][0]
project = allProjects.getProject(row)
deleteOldProject(project)
else:
file_utils.errorMsg('First select a project to run')
elif event == 'Print Project':
print(event)
if len(values['PROJECTS']) > 0:
row = values['PROJECTS'][0]
project = allProjects.getProject(row)
project.print()
else:
file_utils.errorMsg('First select a project to run')
elif event == 'Scrape Folder':
files = file_utils.scrapeDirectory()
selectedFiles = file_utils.selectProjects(files)
for file in selectedFiles:
addNewProject(file, sortProjectsBy(values), values['-SORT-DOWN-'])
elif event == 'PROJECTS':
# print(values['PROJECTS'])
if len(values['PROJECTS']) > 0:
row = values['PROJECTS'][0]
project = allProjects.getProject(row)
window.find_element('location').update(chunkStr(project.location, 50)) # 50 is text width
window.find_element('tempo').update(project.tempo)
window.find_element('record_path').update(
chunkStr(project.recordPath, 50)) # 50 is text width
window.find_element('sample_rate').update(project.sampleRate)
window.find_element('project_notes').update(project.projectNotes)
clearTables()
newShowTracks(trackTable, project)
elif event == 'TRACKS':
# print(values['TRACKS'])
if len(values['PROJECTS']) > 0 and len(values['TRACKS']) > 0:
prow = values['PROJECTS'][0]
project = allProjects.getProject(row)
trow = values['TRACKS'][0]
tracks = project.getTracks()
trackNum = trackTable.get()[trow][0]
track = tracks[trackNum]
window.find_element('main_send').update('yes' if track.mainSend == "1" else 'no')
window.find_element('vol').update(track.vol)
window.find_element('pan').update(track.pan)
ar = window.find_element('aux_recvs')
lst = track.auxReceives.split(",")
if lst == ['']:
mstr = ''
else:
mstr = chunk([str(int(n) + 1) for n in lst], 25) # 25 is width of aux_recvs
ar.update(mstr)
window.find_element('track_notes').update(track.trackNotes)
newShowItems(itemTable, track)
newShowPlugins(pluginTable, track)
elif event == 'ITEMS':
# print(values['ITEMS'])
if len(values['PROJECTS']) > 0 and len(values['TRACKS']) > 0 and len(values['ITEMS']) > 0:
prow = values['PROJECTS'][0]
project = allProjects.getProject(row)
trow = values['TRACKS'][0]
tracks = project.getTracks()
trackNum = trackTable.get()[trow][0]
track = tracks[trackNum]
irow = values['ITEMS'][0]
window.find_element('file').update(chunkStr(track.getItems()[irow].file, 50))
elif event == '-SORT-NAME-' or event == '-SORT-MIX-' or event == '-SORT-DATE-' or event == '-SORT-UP-' or event == '-SORT-DOWN-':
allProjects.sortProjects(sortProjectsBy(values), values['-SORT-DOWN-'])
updateProjectTable(allProjects.getProjects())
else:
# print(event, values)
file_utils.errorMsg("Got an unknown event " | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Optional, Union, Dict
from dbas.database import DBDiscussionSession
from dbas.database.discussion_model import ClickedStatement, ClickedArgument, User, MarkedArgument, MarkedStatement, \
Argument, Statement
from dbas.lib import get_author_data, Relations, get_global_url
from dbas.strings.lib import start_with_capital, start_with_small
from .keywords import Keywords as _
from .translator import Translator
nick_of_anonymous_user = 'anonymous'
tag_type = 'span'
start_attack = '<{} data-argumentation-type="attack">'.format(tag_type)
start_argument = '<{} data-argumentation-type="argument">'.format(tag_type)
start_position = '<{} data-argumentation-type="position">'.format(tag_type)
start_content = '<{} class="triangle-content-text">'.format(tag_type)
start_pro = '<{} data-attitude="pro">'.format(tag_type)
start_con = '<{} data-attitude="con">'.format(tag_type)
start_tag = '<{}>'.format(tag_type)
end_tag = '</{}>'.format(tag_type)
def get_text_for_add_premise_container(lang, confrontation, premise, attack_type, conclusion, is_supportive):
"""
Based on the users reaction, text will be build. This text can be used for the container where users can
add their statements
:param lang: ui_locales
:param confrontation: chosen confrontation
:param premise: current premise
:param attack_type: type of the attack
:param conclusion: current conclusion
:param is_supportive: boolean
:return: string
"""
_t = Translator(lang)
while premise[-1] in ['.', ' ']:
premise = premise[:-1]
while conclusion[-1] in ['.', ' ']:
conclusion = premise[:-1]
confrontation = start_with_capital(confrontation)
# different cases
if attack_type == Relations.UNDERMINE:
return '{} {} ...'.format(_t.get(_.itIsFalseThat), premise)
elif attack_type == Relations.SUPPORT:
intro = _t.get(_.itIsFalseThat)
outro = _t.get(_.doesNotHold)
if is_supportive:
intro = _t.get(_.itIsTrueThat)
outro = _t.get(_.hold)
return '{} {} {} ...'.format(intro, conclusion, outro)
elif attack_type == Relations.UNDERCUT:
return '{}, {} ...'.format(confrontation, _t.get(_.butIDoNotBelieveCounterFor).format(conclusion))
elif attack_type == Relations.REBUT:
mid = _t.get(_.iAcceptCounterThat) if is_supportive else _t.get(_.iAcceptArgumentThat)
return '{} {} {} ...'.format(confrontation, mid, conclusion)
else:
return ''
def get_header_for_users_confrontation_response(db_argument, lang, premise, attack_type, conclusion, start_lower_case,
is_supportive, is_logged_in, redirect_from_jump=False):
"""
Based on the users reaction, text will be build. This text can be used for the speech bubbles where users
justify an argument they have chosen.
:param db_argument: Argument
:param lang: ui_locales
:param premise: current premise
:param attack_type: type of the attack
:param conclusion: current conclusion
:param start_lower_case: boolean
:param is_supportive: boolean
:param is_logged_in: boolean
:param redirect_from_jump: boolean
:return: string
"""
_t = Translator(lang)
if premise[-1] == '.':
premise = premise[:-1]
if conclusion[-1] == '.':
conclusion = conclusion[:-1]
# pretty print
r = _t.get(_.right)[0:1].upper()
f = _t.get(_.itIsFalseThat)[0:1].upper()
t = _t.get(_.itIsTrueThat)[0:1].upper()
if start_lower_case:
r = _t.get(_.right)[0:1].lower()
f = _t.get(_.itIsFalseThat)[0:1].lower()
t = _t.get(_.itIsTrueThat)[0:1].lower()
r += _t.get(_.right)[1:] + ', '
f += _t.get(_.itIsFalseThat)[1:]
t += _t.get(_.itIsTrueThat)[1:]
if lang == 'de':
r += start_with_small(_t.get(_.itIsTrueThat)) + ' '
f = _t.get(_.wrong) + ', ' + start_with_small(_t.get(_.itIsFalseThat)) + ' '
if redirect_from_jump:
r = _t.get(_.maybeItIsTrueThat) + ' '
# different cases
user_msg = __get_user_msg_for_users_confrontation_response(db_argument, attack_type, premise, conclusion, f, t, r,
is_supportive, _t)
if not user_msg:
user_msg = ''
# is logged in?
if is_logged_in:
return user_msg, _t.get(_.canYouGiveAReasonForThat)
else:
return user_msg, ''
def __get_user_msg_for_users_confrontation_response(db_argument, attack_type, premise, conclusion, itisfalsethat,
itistruethat, right, is_supportive, _t):
"""
Builds a string based on the attack type to confront the user
:param db_argument: Argument
:param attack_type: String
:param premise: String
:param conclusion: String
:param itisfalsethat: String
:param itistruethat: String
:param right: String
:param is_supportive: Boolean
:param _t: Translator
:return: String
"""
# different cases
if attack_type == Relations.UNDERMINE:
return __get_user_msg_for_users_undermine_response(premise, _t.get(_.that))
if attack_type == Relations.SUPPORT:
return __get_user_msg_for_users_support_response(conclusion, itistruethat, itisfalsethat, is_supportive, _t)
if attack_type == Relations.UNDERCUT:
return __get_user_msg_for_users_undercut_response(db_argument, premise, conclusion, right, _t)
if attack_type == Relations.REBUT:
return __get_user_msg_for_users_rebut_response(premise, conclusion, right, is_supportive, _t)
def __get_user_msg_for_users_undermine_response(premise, that):
"""
Simple text for an undermine
:param premise: String
:param that: String
:return: String
"""
return '{} {}{}{}'.format(that, '{}', premise, '{}')
def __get_user_msg_for_users_support_response(conclusion, itistruethat, itisfalsethat, is_supportive, _t):
"""
Simple text for an support
:param conclusion: String
:param itistruethat: String
:param itisfalsethat: String
:param is_supportive: String
:param _t: Translator
:return: String
"""
if is_supportive:
intro = itistruethat
outro = _t.get(_.hold)
else:
intro = itisfalsethat
outro = _t.get(_.doesNotHold)
return '{}{} {} {}{}.'.format('{}', intro, conclusion, outro, '{}')
def __get_user_msg_for_users_undercut_response(db_argument, premise, conclusion, right, _t):
"""
Simple text for the undercut
:param db_argument: Argument
:param premise: String
:param conclusion: String
:param right: String
:param _t: Translator
:return:
"""
tmp = None
if db_argument.conclusion_uid is None and _t.get_lang() == 'de':
# undercutting an undercut
start_text = _t.get(_.itIsTrueThatAnonymous)
if conclusion.lower().startswith(start_text.lower()):
conclusion = conclusion[len(start_text):]
tmp = _t.get(_.butThisDoesNotRejectArgument)
if tmp is None:
tmp = _t.get(_.butIDoNotBelieveArgumentFor if db_argument.is_supportive else _.butIDoNotBelieveCounterFor)
tmp = tmp.format(conclusion)
return '{}{}. {}{}{}'.format(right, premise, '{}', tmp, '{}')
def __get_user_msg_for_users_rebut_response(premise, conclusion, right, is_supportive, _t):
"""
Simple text for the rebut
:param premise: String
:param conclusion: String
:param right: String
:param is_supportive:
:param _t: Translator
:return: String
"""
if is_supportive:
intro = _t.get(_.iAcceptCounterThat)
mid = _t.get(_.howeverIHaveMuchStrongerArgumentRejectingThat)
else:
intro = _t.get(_.iAcceptArgumentThat)
mid = _t.get(_.howeverIHaveMuchStrongerArgumentAcceptingThat)
return '{}{}{}, {} {}. {} {}.{}'.format('{}', right, premise, intro, conclusion, mid, conclusion, '{}')
def get_relation_text_dict_without_substitution(lang, with_no_opinion_text, premise, conclusion, is_dont_know=False):
"""
Returns the four different reaction possibilities without any replacement based on the gender of the confrontation
:param lang: Language.ui_locales
:param with_no_opinion_text: Boolean
:param premise: String
:param conclusion: String
:param is_dont_know: Boolean
:return: dict()
"""
return __get_relation_text_dict(lang, with_no_opinion_text, premise, conclusion, is_dont_know)
def get_relation_text_dict_with_substitution(lang, with_no_opinion_text, is_dont_know=False, attack_type=None,
gender=''):
"""
Returns the four different reaction possibilities with replacements based on the gender of the confrontation
:param lang: Language.ui_locales
:param with_no_opinion_text: Boolean
:param is_dont_know: Boolean
:param attack_type: String
:param gender: String
:return: dict()
"""
_t = Translator(lang)
assertion = _t.get(_.theirAssertion)
reason = _t.get(_.theirReason)
statement = _t.get(_.theirStatement)
position = _t.get(_.theirPosition)
opinion = _t.get(_.opinion)
if gender == 'f':
assertion = _t.get(_.herAssertion)
reason = _t.get(_.herReason)
statement = _t.get(_.herStatement)
position = _t.get(_.herPosition)
opinion = _t.get(_.opinion_her)
elif gender == 'm':
assertion = _t.get(_.hisAssertion)
reason = _t.get(_.hisReason)
statement = _t.get(_.hisStatement)
position = _t.get(_.hisPosition)
opinion = _t.get(_.opinion_his)
premise = statement
if lang == 'de':
if is_dont_know:
premise = assertion
conclusion = assertion
if is_dont_know:
conclusion = reason
else:
conclusion = opinion
if not is_dont_know:
if attack_type == Relations.UNDERMINE or attack_type == Relations.REBUT:
conclusion = position
else:
conclusion = _t.get(_.myArgument)
return __get_relation_text_dict(lang, with_no_opinion_text, premise, conclusion, is_dont_know)
def __get_relation_text_dict(lang, with_no_opinion_text, premise, conclusion, is_dont_know=False):
"""
Text of the different reaction types for an given argument
:param lang: Language.ui_locales
:param with_no_opinion_text: Boolean
:param premise: String
:param conclusion: String
:param is_dont_know: Boolean
:return: dict()
"""
_t = Translator(lang)
premise = start_attack + premise + end_tag
conclusion = start_argument + conclusion + end_tag
ret_dict = dict()
if with_no_opinion_text:
ret_dict['step_back_text'] = _t.get(_.goStepBack) + '. (' + _t.get(_.noOtherAttack) + ')'
ret_dict['no_opinion_text'] = _t.get(_.showMeAnotherArgument) + '.'
ret_dict['undermine_text'] = _t.get(_.reaction_text_undermine).format(premise)
ret_dict['support_text'] = _t.get(_.reaction_text_support).format(premise)
ret_dict['undercut_text'] = _t.get(_.reaction_text_undercut).format(premise, conclusion)
if is_dont_know:
tmp = start_argument + _t.get(_.reason) + end_tag
ret_dict['undercut_text'] = _t.get(_.reaction_text_undercut_for_dont_know).format(premise, tmp)
conclusion_user = start_position + _t.get(_.myPosition) + end_tag
ret_dict['rebut_text'] = _t.get(_.reaction_text_rebut).format(premise, conclusion, conclusion_user)
if is_dont_know:
ret_dict['rebut_text'] = _t.get(_.reaction_text_rebut_for_dont_know).format(conclusion)
return ret_dict
def get_jump_to_argument_text_list(lang):
"""
Returns answer set for the jumping step
:param lang: ui_locales
:return: Array with [Conclusion is (right, wrong), Premise is (right, wrong)
Premise does not lead to the conclusion, both hold]
"""
_t = Translator(lang)
premise = start_attack + _t.get(_.reason) + end_tag
conclusion = start_argument + _t.get(_.assertion) + end_tag
answers = list()
answers.append(_t.get(_.jumpAnswer0).format(conclusion, premise))
answers.append(_t.get(_.jumpAnswer1).format(conclusion, premise))
answers.append(_t.get(_.jumpAnswer2).format(conclusion, premise))
answers.append(_t.get(_.jumpAnswer3).format(conclusion, premise))
answers.append(_t.get(_.jumpAnswer4).format(premise))
return answers
def get_support_to_argument_text_list(lang):
"""
Returns answer set for the supporting step
:param lang: ui_locales
:return: Array with [Conclusion is (right, wrong), Premise is (right, wrong)
Premise does not lead to the conclusion, both hold]
"""
_t = Translator(lang)
premise = start_attack + _t.get(_.reason) + end_tag
conclusion = start_argument + _t.get(_.assertion) + end_tag
answers = list()
answers.append(_t.get(_.supportAnswer0).format(premise))
answers.append(_t.get(_.supportAnswer3).format(premise))
answers.append(_t.get(_.supportAnswer2).format(premise, conclusion))
answers.append(_t.get(_.supportAnswer1).format(premise))
return answers
def get_text_for_confrontation(lang, nickname, premise, conclusion, sys_conclusion, supportive, attack,
confrontation, reply_for_argument, user_is_attacking, user_arg, sys_arg,
color_html=True):
"""
Text for the confrontation of the system
:param lang: ui_locales
:param nickname: nickname
:param premise: String
:param conclusion: String
:param sys_conclusion: String
:param supportive: String
:param attack: String
:param confrontation: String
:param reply_for_argument: Boolean
:param user_is_attacking: Boolean
:param user_arg: Argument
:param sys_arg: Argument
:param color_html: Boolean
:return: String
"""
my_start_argument = ''
my_end_tag = ''
if color_html:
my_start_attack = start_attack
my_start_argument = start_argument
my_end_tag = end_tag
confrontation = my_start_attack + confrontation + my_end_tag
conclusion = my_start_argument + conclusion + my_end_tag
if attack == Relations.UNDERMINE:
premise = my_start_argument + premise + my_end_tag
sys_conclusion = my_start_argument + sys_conclusion + my_end_tag
# build some confrontation text
if attack == Relations.UNDERMINE:
confrontation_text, gender = __get_confrontation_text_for_undermine(nickname, premise, lang, sys_arg,
my_start_argument, my_end_tag,
confrontation)
elif attack == Relations.UNDERCUT:
confrontation_text, gender = __get_confrontation_text_for_undercut(nickname, lang,
premise, conclusion, confrontation,
supportive, sys_arg)
elif attack == Relations.REBUT:
confrontation_text, gender = __get_confrontation_text_for_rebut(lang, nickname, reply_for_argument,
user_arg, | |
'allocation_state': {'key': 'allocationState', 'type': 'str'},
'allocation_state_transition_time': {'key': 'allocationStateTransitionTime', 'type': 'iso-8601'},
'errors': {'key': 'errors', 'type': '[MachineLearningServiceError]'},
'current_node_count': {'key': 'currentNodeCount', 'type': 'int'},
'target_node_count': {'key': 'targetNodeCount', 'type': 'int'},
'node_state_counts': {'key': 'nodeStateCounts', 'type': 'NodeStateCounts'},
}
def __init__(
self,
*,
vm_size: Optional[str] = None,
vm_priority: Optional[Union[str, "VmPriority"]] = None,
scale_settings: Optional["ScaleSettings"] = None,
user_account_credentials: Optional["UserAccountCredentials"] = None,
subnet: Optional["ResourceId"] = None,
remote_login_port_public_access: Optional[Union[str, "RemoteLoginPortPublicAccess"]] = "NotSpecified",
**kwargs
):
super(AmlComputeProperties, self).__init__(**kwargs)
self.vm_size = vm_size
self.vm_priority = vm_priority
self.scale_settings = scale_settings
self.user_account_credentials = user_account_credentials
self.subnet = subnet
self.remote_login_port_public_access = remote_login_port_public_access
self.allocation_state = None
self.allocation_state_transition_time = None
self.errors = None
self.current_node_count = None
self.target_node_count = None
self.node_state_counts = None
class AmlUserFeature(msrest.serialization.Model):
"""Features enabled for a workspace.
:param id: Specifies the feature ID.
:type id: str
:param display_name: Specifies the feature name.
:type display_name: str
:param description: Describes the feature for user experience.
:type description: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
display_name: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(AmlUserFeature, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.description = description
class ClusterUpdateParameters(msrest.serialization.Model):
"""AmlCompute update parameters.
:param scale_settings: Desired scale settings for the amlCompute.
:type scale_settings: ~azure.mgmt.machinelearningservices.models.ScaleSettings
"""
_attribute_map = {
'scale_settings': {'key': 'properties.scaleSettings', 'type': 'ScaleSettings'},
}
def __init__(
self,
*,
scale_settings: Optional["ScaleSettings"] = None,
**kwargs
):
super(ClusterUpdateParameters, self).__init__(**kwargs)
self.scale_settings = scale_settings
class ComponentsSgqdofSchemasIdentityPropertiesUserassignedidentitiesAdditionalproperties(msrest.serialization.Model):
"""ComponentsSgqdofSchemasIdentityPropertiesUserassignedidentitiesAdditionalproperties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of user assigned identity.
:vartype principal_id: str
:ivar client_id: The client id of user assigned identity.
:vartype client_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ComponentsSgqdofSchemasIdentityPropertiesUserassignedidentitiesAdditionalproperties, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class ComputeInstance(Compute):
"""An Azure Machine Learning compute instance.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param compute_type: Required. The type of compute.Constant filled by server. Possible values
include: "AKS", "AmlCompute", "ComputeInstance", "DataFactory", "VirtualMachine", "HDInsight",
"Databricks", "DataLakeAnalytics".
:type compute_type: str or ~azure.mgmt.machinelearningservices.models.ComputeType
:param compute_location: Location for the underlying compute.
:type compute_location: str
:ivar provisioning_state: The provision state of the cluster. Valid values are Unknown,
Updating, Provisioning, Succeeded, and Failed. Possible values include: "Unknown", "Updating",
"Creating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.ProvisioningState
:param description: The description of the Machine Learning compute.
:type description: str
:ivar created_on: The date and time when the compute was created.
:vartype created_on: ~datetime.datetime
:ivar modified_on: The date and time when the compute was last modified.
:vartype modified_on: ~datetime.datetime
:param resource_id: ARM resource id of the underlying compute.
:type resource_id: str
:ivar provisioning_errors: Errors during provisioning.
:vartype provisioning_errors:
list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar is_attached_compute: Indicating whether the compute was provisioned by user and brought
from outside if true, or machine learning service provisioned it if false.
:vartype is_attached_compute: bool
:param properties: Compute Instance properties.
:type properties: ~azure.mgmt.machinelearningservices.models.ComputeInstanceProperties
"""
_validation = {
'compute_type': {'required': True},
'provisioning_state': {'readonly': True},
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_errors': {'readonly': True},
'is_attached_compute': {'readonly': True},
}
_attribute_map = {
'compute_type': {'key': 'computeType', 'type': 'str'},
'compute_location': {'key': 'computeLocation', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'provisioning_errors': {'key': 'provisioningErrors', 'type': '[MachineLearningServiceError]'},
'is_attached_compute': {'key': 'isAttachedCompute', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'ComputeInstanceProperties'},
}
def __init__(
self,
*,
compute_location: Optional[str] = None,
description: Optional[str] = None,
resource_id: Optional[str] = None,
properties: Optional["ComputeInstanceProperties"] = None,
**kwargs
):
super(ComputeInstance, self).__init__(compute_location=compute_location, description=description, resource_id=resource_id, **kwargs)
self.compute_type = 'ComputeInstance' # type: str
self.properties = properties
class ComputeInstanceApplication(msrest.serialization.Model):
"""Defines an Aml Instance application and its connectivity endpoint URI.
:param display_name: Name of the ComputeInstance application.
:type display_name: str
:param endpoint_uri: Application' endpoint URI.
:type endpoint_uri: str
"""
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'endpoint_uri': {'key': 'endpointUri', 'type': 'str'},
}
def __init__(
self,
*,
display_name: Optional[str] = None,
endpoint_uri: Optional[str] = None,
**kwargs
):
super(ComputeInstanceApplication, self).__init__(**kwargs)
self.display_name = display_name
self.endpoint_uri = endpoint_uri
class ComputeInstanceConnectivityEndpoints(msrest.serialization.Model):
"""Defines all connectivity endpoints and properties for a ComputeInstance.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar public_ip_address: Public IP Address of this ComputeInstance.
:vartype public_ip_address: str
:ivar private_ip_address: Private IP Address of this ComputeInstance (local to the VNET in
which the compute instance is deployed).
:vartype private_ip_address: str
"""
_validation = {
'public_ip_address': {'readonly': True},
'private_ip_address': {'readonly': True},
}
_attribute_map = {
'public_ip_address': {'key': 'publicIpAddress', 'type': 'str'},
'private_ip_address': {'key': 'privateIpAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ComputeInstanceConnectivityEndpoints, self).__init__(**kwargs)
self.public_ip_address = None
self.private_ip_address = None
class ComputeInstanceCreatedBy(msrest.serialization.Model):
"""Describes information on user who created this ComputeInstance.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar user_name: Name of the user.
:vartype user_name: str
:ivar user_org_id: Uniquely identifies user' Azure Active Directory organization.
:vartype user_org_id: str
:ivar user_id: Uniquely identifies the user within his/her organization.
:vartype user_id: str
"""
_validation = {
'user_name': {'readonly': True},
'user_org_id': {'readonly': True},
'user_id': {'readonly': True},
}
_attribute_map = {
'user_name': {'key': 'userName', 'type': 'str'},
'user_org_id': {'key': 'userOrgId', 'type': 'str'},
'user_id': {'key': 'userId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ComputeInstanceCreatedBy, self).__init__(**kwargs)
self.user_name = None
self.user_org_id = None
self.user_id = None
class ComputeInstanceLastOperation(msrest.serialization.Model):
"""The last operation on ComputeInstance.
:param operation_name: Name of the last operation. Possible values include: "Create", "Start",
"Stop", "Restart", "Reimage", "Delete".
:type operation_name: str or ~azure.mgmt.machinelearningservices.models.OperationName
:param operation_time: Time of the last operation.
:type operation_time: ~datetime.datetime
:param operation_status: Operation status. Possible values include: "InProgress", "Succeeded",
"CreateFailed", "StartFailed", "StopFailed", "RestartFailed", "ReimageFailed", "DeleteFailed".
:type operation_status: str or ~azure.mgmt.machinelearningservices.models.OperationStatus
"""
_attribute_map = {
'operation_name': {'key': 'operationName', 'type': 'str'},
'operation_time': {'key': 'operationTime', 'type': 'iso-8601'},
'operation_status': {'key': 'operationStatus', 'type': 'str'},
}
def __init__(
self,
*,
operation_name: Optional[Union[str, "OperationName"]] = None,
operation_time: Optional[datetime.datetime] = None,
operation_status: Optional[Union[str, "OperationStatus"]] = None,
**kwargs
):
super(ComputeInstanceLastOperation, self).__init__(**kwargs)
self.operation_name = operation_name
self.operation_time = operation_time
self.operation_status = operation_status
class ComputeInstanceProperties(msrest.serialization.Model):
"""Compute Instance properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param vm_size: Virtual Machine Size.
:type vm_size: str
:param subnet: Virtual network subnet resource ID the compute nodes belong to.
:type subnet: ~azure.mgmt.machinelearningservices.models.ResourceId
:param application_sharing_policy: Policy for sharing applications on this compute instance
among users of parent workspace. If Personal, only the creator can access applications on this
compute instance. When Shared, any workspace user can access applications on this instance
depending on his/her assigned role. Possible values include: "Personal", "Shared". Default
value: "Shared".
:type application_sharing_policy: str or
~azure.mgmt.machinelearningservices.models.ApplicationSharingPolicy
:param ssh_settings: Specifies policy and settings for SSH access.
:type ssh_settings: ~azure.mgmt.machinelearningservices.models.ComputeInstanceSshSettings
:ivar connectivity_endpoints: Describes all connectivity endpoints available for this
ComputeInstance.
:vartype connectivity_endpoints:
~azure.mgmt.machinelearningservices.models.ComputeInstanceConnectivityEndpoints
:ivar applications: Describes available applications and their endpoints on this
ComputeInstance.
:vartype applications:
list[~azure.mgmt.machinelearningservices.models.ComputeInstanceApplication]
:ivar created_by: Describes information on user who created this ComputeInstance.
:vartype created_by: ~azure.mgmt.machinelearningservices.models.ComputeInstanceCreatedBy
:ivar errors: Collection of errors encountered on this ComputeInstance.
:vartype errors: list[~azure.mgmt.machinelearningservices.models.MachineLearningServiceError]
:ivar state: The current state of this ComputeInstance. Possible values include: "Creating",
"CreateFailed", "Deleting", "Running", "Restarting", "JobRunning", "SettingUp", "SetupFailed",
"Starting", "Stopped", "Stopping", "UserSettingUp", "UserSetupFailed", "Unknown", "Unusable".
:vartype state: str or ~azure.mgmt.machinelearningservices.models.ComputeInstanceState
:ivar last_operation: The last operation on ComputeInstance.
:vartype last_operation:
~azure.mgmt.machinelearningservices.models.ComputeInstanceLastOperation
"""
_validation = {
'connectivity_endpoints': {'readonly': True},
'applications': {'readonly': True},
'created_by': {'readonly': True},
'errors': {'readonly': True},
'state': {'readonly': True},
'last_operation': {'readonly': True},
}
_attribute_map = {
'vm_size': {'key': 'vmSize', 'type': 'str'},
'subnet': {'key': 'subnet', 'type': 'ResourceId'},
'application_sharing_policy': {'key': 'applicationSharingPolicy', 'type': 'str'},
'ssh_settings': {'key': 'sshSettings', 'type': 'ComputeInstanceSshSettings'},
'connectivity_endpoints': {'key': 'connectivityEndpoints', 'type': 'ComputeInstanceConnectivityEndpoints'},
'applications': {'key': 'applications', 'type': '[ComputeInstanceApplication]'},
'created_by': {'key': 'createdBy', 'type': 'ComputeInstanceCreatedBy'},
'errors': {'key': 'errors', 'type': '[MachineLearningServiceError]'},
'state': {'key': 'state', 'type': 'str'},
'last_operation': {'key': 'lastOperation', 'type': 'ComputeInstanceLastOperation'},
}
def __init__(
self,
*,
vm_size: Optional[str] = None,
subnet: | |
''' This notebook will perform some amount of wrangling, repeat all previous
Natural Language Processing (NLP) preprocessing, and conduct feature engineering.
The feature engineering leverages work conducted in Step_5_Create_Stop_and_Unique_words
and will be used to domain-specifc scoring (like sentiment) and expanded,
domain-specific stopwords list.
This script will have a companion notebook in the 'notebooks'
folder of this Git repository.'''
import s3fs
import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import re
from tqdm.autonotebook import tqdm
tqdm.pandas(desc="progress-bar", leave=False)
import string
import spacy
from spacy.lang import punctuation
from spacy.lang.en import English
from spacy import displacy
nlp = spacy.load("en_core_web_lg")
import unicodedata # might need to pip install unicodedate2 on aws sagemaker
import contractions
from contractions import contractions_dict ## pip installed this
from wordcloud import WordCloud, STOPWORDS #pip install
from textblob import TextBlob
!python -m textblob.download_corpora
from afinn import Afinn
import nltk
import nltk.corpus
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import ToktokTokenizer
from nltk.corpus import stopwords
import gensim
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import preprocess_string
from gensim.parsing.preprocessing import STOPWORDS
from gensim.models import word2vec
import multiprocessing as mp
import sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.decomposition import TruncatedSVD
import warnings
warnings.filterwarnings('ignore')
cores = mp.cpu_count()
import warnings
from datetime import datetime
warnings.filterwarnings('ignore')
punctuation = string.punctuation + '”' + '“' + '–' + '““' + "’’" + '”'
stopword = stopwords.words('english')
stopwords = set(STOPWORDS)
wordnet_lemmatizer = WordNetLemmatizer()
#File Admin Issues
import os
import io
import boto3
from dotenv import load_dotenv
load_dotenv(verbose=True)
def aws_session(region_name='us-east-1'):
return boto3.session.Session(aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'), #looks for any .env file
aws_secret_access_key=os.getenv('AWS_ACCESS_KEY_SECRET'), #Has to be in same directory
region_name=region_name) #from above
def make_bucket(name, acl):
session = aws_session()
s3_resource = session.resource('s3')
return s3_resource.create_bucket(Bucket=name, ACL=acl)
def upload_file_to_bucket(bucket_name, file_path):
session = aws_session()
s3_resource = session.resource('s3')
file_dir, file_name = os.path.split(file_path)
bucket = s3_resource.Bucket(bucket_name)
bucket.upload_file(
Filename=file_path,
Key=file_name,
ExtraArgs={'ACL': 'public-read'}
)
s3_url = f"https://{bucket_name}.s3.amazonaws.com/{file_name}"
return s3_url
fs = s3fs.S3FileSystem(anon=False,key='####',secret='#####')
g_df = pd.read_csv('s3://music-lyrics-chain/g_df')#entire dataset, index, song_name, lyrics, genre
g_stop = pd.read_csv('s3://music-lyrics-chain/g_stopwords')#from 80% g_train dataset, domain specific stop words
hiphop = pd.read_csv('s3://music-lyrics-chain/uniquely_hiphop')# from 80% g_train dataset, uniquely hiphop
pop = pd.read_csv('s3://music-lyrics-chain/uniquely_pop')# from 80% g_train dataset, uniquely pop
rock = pd.read_csv('s3://music-lyrics-chain/uniquely_rock')# from 80% g_train dataset, uniquely rock
g_stop = g_stop.dropna(subset=['All'])#to fix a known issue in this df
g_df.drop(columns=['Unnamed: 0'], axis=1, inplace=True)#drop a useless column
# With appreciation for the Fake News Way
def remove_special_characters(text):
"""
Removes special characters from the text document
"""
# define the pattern to keep. You can check the regex using this url https://regexr.com/
pat = r'[^a-zA-z0-9.,!?/:;\"\'\s]'
return re.sub(pat, '', text)
def remove_extra_whitespace_tabs(text):
"""
Removes extra whitespaces and remove_extra_whitespace_tabs
"""
#pattern = r'^\s+$|\s+$'
pattern = r'^\s*|\s\s*'
return re.sub(pattern, ' ', text).strip()
def remove_digits(text):
"""
Remove all digits from the text document
take string input and return a clean text without numbers.
Use regex to discard the numbers.
"""
result = ''.join(i for i in text if not i.isdigit()).lower()
return ' '.join(result.split())
def remove_newlines(text):
"""
Remove newline characters from the text document
"""
return text.replace('\\n', ' ').replace('\\r', ' ').replace('\n', ' ').replace('\r', ' ').replace('\\', ' ')
#normalize to the NFKD (Normalization Form Compatibility Decomposition) form
#that present in the Unicode standard to remain compatible with other encodings
def remove_accented_chars(text):
"""
Removes accented characters from the test
"""
new_text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8', 'ignore')
return new_text
import contractions
#contractions.fix(g_df['lyrics'][10])
#expands contractions found in the text
def expand_contractions(text):
expanded_text = contractions.fix(text)
expanded_text = re.sub("'", "", expanded_text)
return expanded_text
# replace punctuation characters with spaces
def replace_punctuation(text):
filters = string.punctuation + '”' + '“' + '–' + '!' + '?' + '.' + ',' #added !, ?, . , and comma
translate_dict = dict((c, " ") for c in filters)
translate_map = str.maketrans(translate_dict)
text = text.translate(translate_map)
return text
# Remove stopwords and remove words with 2 or less characters
def stops_letters(text):
result = []
for token in gensim.utils.simple_preprocess(text):
if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 2 and token not in stopword:
result.append(token)
return " ".join(result)
#Removes any word that starts with either http or https
def remove_urls(vTEXT):
#vTEXT = re.sub('http://\S+|https://\S+', '', vTEXT,flags=re.MULTILINE)
vTEXT = re.sub('http[s]?://\S+', '', vTEXT,flags=re.MULTILINE)
return(vTEXT)
#Remove words that starts with www
def remove_www(vTEXT):
vTEXT = re.sub('www\S+', '', vTEXT,flags=re.MULTILINE)
return(vTEXT)
#Standard NLP run through.
g_df['lyrics'] = g_df['lyrics'].apply(remove_urls)
g_df['lyrics'] = g_df['lyrics'].apply(remove_www)
g_df['lyrics'] = g_df['lyrics'].apply(remove_special_characters)
g_df['lyrics'] = g_df['lyrics'].apply(remove_extra_whitespace_tabs)
g_df['lyrics'] = g_df['lyrics'].apply(remove_digits)
g_df['lyrics'] = g_df['lyrics'].apply(remove_accented_chars)
g_df['lyrics'] = g_df['lyrics'].apply(expand_contractions)
g_df['lyrics'] = g_df['lyrics'].apply(replace_punctuation)
# word counts
g_df['full_word_count'] = g_df["lyrics"].apply(lambda x: len(str(x).split(" ")))
# Character counts
g_df['full_character_count'] = g_df["lyrics"].apply(lambda x: sum(len(word) for word in str(x).split(" ")))
#average word length
g_df['full_avg_word_length'] = g_df['full_character_count'] / g_df['full_word_count']
#Gensim stopword removal. Creating a medium sized lyrics set. I'll run a couple feature engineering
#functions on it. Then create a smaller set with the domain specific stopwords list and compare the two.
g_df['med_lyrics'] =g_df['lyrics'].apply(stops_letters)
# word counts
g_df['med_word_count'] = g_df["med_lyrics"].apply(lambda x: len(str(x).split(" ")))
# Character counts
g_df['med_character_count'] = g_df["med_lyrics"].apply(lambda x: sum(len(word) for word in str(x).split(" ")))
#average word length
g_df['med_avg_word_length'] = g_df['med_character_count'] / g_df['med_word_count']
#Feature engineering, Affinity score.
afinn = Afinn()
def get_affinity_scores(lyrics):
scores = []
count = 0
for t in lyrics:
if len(t) > 0:
scores.append(afinn.score(t) / len(t))
else:
count += 1
scores.append(0)
return scores
new_affin = get_affinity_scores(g_df['med_lyrics'].tolist())
g_df['med_content_affin'] = new_affin
#Feature engineering, Sentiment score and label
""" Something was broken in this. The sent_score was always the same number
and the labels were incorrect sometimes. I fixed it with some changes however
the med_sent_score is cast as a list, an object. Need it as a Float.
Will fix later."""
def sentiment_check (text):
polarity_score = TextBlob(text).sentiment.polarity
if polarity_score < 0:
return 'negative'
elif polarity_score == 0:
return 'neutral'
else:
return 'positive'
g_df['med_sent_label'] = g_df['med_lyrics'].apply(sentiment_check)
print("Label done. Current Time =", datetime.now())
def new_sent_ck (text):
polarity_score = TextBlob(text).sentiment.polarity
sent_score = []
sent_score.append(polarity_score)
return sent_score
g_df['med_sent_score'] = g_df['med_lyrics'].apply(new_sent_ck)
print("Both med_sent tasks done. Current Time =", datetime.now())
#Feature engineering, giant string for a vectorizer, later.
import nltk
nltk.download('punkt')
nltk.download('wordnet')
def lemmatized_word(text):
word_tokens = nltk.word_tokenize(text)
lemmatized_word = [wordnet_lemmatizer.lemmatize(word) for word in word_tokens]
return " ".join(lemmatized_word) #combine the words into a giant string that vectorizer can accept
g_df['med_vector'] = g_df['med_lyrics'].apply(lemmatized_word)
print("Vector done. Current Time =", datetime.now())
#Clean up med_lyrics for any NaN values, which will stop the next function.
g_df.dropna(axis=0, subset=['med_lyrics'], inplace=True)
#Feature engineering, create domain specific scores based on words unique to particulary genres.
def genre_count(text):
result = 0
text_tokenized = word_tokenize(text)
for i in range(0, len(text_tokenized)):
if text_tokenized[i] in stop_words:
result += digit
else:
pass
if result != 0:
return result
else:
pass
#Set Rock! words...
stop_words = nltk.corpus.stopwords.words('english')
stop_words = []
rock2 = rock['Word'].to_dict()
rock3 = list(rock2.values())
digit = .01
stop_words.extend(rock3)
print(len(stop_words), 'Rock!')
print("Current Time =", datetime.now())
#Run genre_count with Rock!
g_df['med_rock_genre_count'] =g_df['med_lyrics'].apply(genre_count)
#Reset to Hip Hop...
stop_words = nltk.corpus.stopwords.words('english')
stop_words = []
hiphop2 = hiphop['Word'].to_dict()
hiphop3 = list(hiphop2.values())
digit = 100
stop_words.extend(hiphop3)
print(len(stop_words), 'Hip Hop')
print("Current Time =", datetime.now())
#Run genre_count with Hip Hop
g_df['med_hiphop_genre_count'] =g_df['med_lyrics'].apply(genre_count)
#Reset to Pop...
stop_words = nltk.corpus.stopwords.words('english')
stop_words = []
pop2 = pop['Word'].to_dict()
pop3 = list(pop2.values())
digit = 1
stop_words.extend(pop3)
print(len(stop_words), 'Pop')
print("Current Time =", datetime.now())
#Run genre_count with Hip Hop
g_df['med_pop_genre_count'] =g_df['med_lyrics'].apply(genre_count)
print("Current Time =", datetime.now())
# New coulmn with all genre_count numbers added up.
g_df['med_genre_count'] = g_df['med_rock_genre_count']+g_df['med_hiphop_genre_count']+g_df['med_pop_genre_count']
#Create the smaller lyrics set from the domain-specific stopwords.
import nltk
import nltk.corpus
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
stop_words = nltk.corpus.stopwords.words('english')
g_stop2 = g_stop['All'].to_dict()
g_stop3 = list(g_stop2.values())
stop_words.extend(g_stop3)
def stops_word(text):
result = []
text_tokenized = word_tokenize(text)
for i in range(0, len(text_tokenized)):
if text_tokenized[i] not in stop_words:
result.append(text_tokenized[i])
else:
pass
return str(result).replace("'","")
g_df['sml_lyrics'] =g_df['lyrics'].apply(stops_word)
print("sml_lyrics complete. Current Time =", datetime.now
g_df['sml_lyrics']=g_df['sml_lyrics'].str.replace(',' ,'')# Fixes the srings with commas issue.
g_df['sml_lyrics']=g_df['sml_lyrics'].str.replace('[' ,'')
g_df['sml_lyrics']=g_df['sml_lyrics'].str.replace(']' ,'')
#Gensim stopword removal. Same as what was run on med_lyrics. IOT limit differences between
#sml_ and med_ portions of dataset to just domain-specific stopwords and scoring.
g_df['sml_lyrics'] = g_df['sml_lyrics'].apply(stops_letters)
# word counts
g_df['sml_word_count'] = g_df["sml_lyrics"].apply(lambda x: len(str(x).split(" ")))
# Character counts
g_df['sml_character_count'] = g_df["sml_lyrics"].apply(lambda x: sum(len(word) for word in str(x).split(" ")))
#average word length
g_df['sml_avg_word_length'] = g_df['sml_character_count'] / g_df['sml_word_count']
#Feature engineering, Affinity score.
afinn = Afinn()
new_affin = get_affinity_scores(g_df['sml_lyrics'].tolist())
g_df['sml_content_affin'] = new_affin
print("affinity score. Current Time =", datetime.now())
#Feature engineering, Sentiment score and label
""" Something was broken in this. The sent_score was always the same number
and the labels were incorrect sometimes. I fixed it with some changes however
the sml_sent_score is cast as a list, an object. Need it as a Float.
Will fix later."""
def sentiment_check (text):
polarity_score = TextBlob(text).sentiment.polarity
if polarity_score < 0:
return 'negative'
elif polarity_score == 0:
return 'neutral'
else:
return 'positive'
g_df['sml_sent_label'] = g_df['sml_lyrics'].apply(sentiment_check)
print("Label done. Current Time =", datetime.now())
def new_sent_ck (text):
polarity_score = TextBlob(text).sentiment.polarity
sent_score = []
sent_score.append(polarity_score)
return sent_score
g_df['sml_sent_score'] = g_df['sml_lyrics'].apply(new_sent_ck)
print("Both sml_sent tasks done. Current Time =", datetime.now())
#Feature engineering, giant string for a vectorizer, later.
g_df['sml_vector'] = g_df['sml_lyrics'].apply(lemmatized_word)
print("sml_vector done. Current Time =", datetime.now())
#Clean up sml_lyrics for any NaN values, which will stop the next function.
g_df.dropna(axis=0, subset=['sml_lyrics'], inplace=True)
#Feature engineering, create domain specific scores based on words unique to particulary genres.
def genre_count(text):
result = | |
'laser')
elif laser == 'em':
if isinstance(filter, bool):
if cycle not in cycle_dict[laser]:
cycle_dict[laser][cycle] = filter
else:
error('ConfigFile::Duplicated emission filter cycle')
else:
error('ConfigFile::Invalid filter for', laser, 'laser')
else:
error('ConfigFile:Invalid laser')
# Add default/home to cycles with out filters specified
method = config.get('experiment', 'method')
method = config[method]
start_cycle = 1
if method.get('pre recipe', fallback = None):
start_cycle = 0
last_cycle = int(config.get('experiment','cycles'))+1
# Get/check default filters
default_filters = {}
fallbacks = {'red':'home', 'green':'home', 'em':'True'}
for laser in colors:
filter = method.get('default '+laser+' filter', fallback = fallbacks[laser])
try:
filter = float(filter)
except:
pass
if laser in ex_dict.keys():
if filter in ex_dict[laser].keys():
default_filters[laser] = filter
elif laser == 'em':
if filter in ['True', 'False']:
default_filters[laser] = filter
# Assign default filters to missing cycles
for cycle in range(start_cycle,last_cycle):
for laser in colors:
if cycle not in cycle_dict[laser]:
cycle_dict[laser][cycle] = default_filters[laser]
return cycle_dict
def LED(AorB, indicate):
"""Control front LEDs to communicate what the HiSeq is doing.
**Parameters:**
- AorB (str): Flowcell position (A or B), or all.
- indicate (str): Current action of the HiSeq or state of the flowcell.
=========== =========== =============================
LED MODE indicator HiSeq Action / Flowcell State
=========== =========== ===================================================
off off The flowcell is not in use.
yellow error There is an error with the flowcell.
green startup The HiSeq is starting up or shutting down
pulse green user The HiSeq requires user input
blue sleep The flowcell is holding or waiting.
pulse blue awake HiSeq valve, pump, or temperature action on the flowcell.
sweep blue imaging HiSeq is imaging the flowcell.
=========== =========== ========================================
"""
fc = []
if AorB in flowcells.keys():
fc = [AorB]
elif AorB == 'all':
fc = [*flowcells.keys()]
for AorB in fc:
if indicate == 'startup':
hs.f.LED(AorB, 'green')
elif indicate == 'user':
hs.f.LED(AorB, 'pulse green')
elif indicate == 'error':
hs.f.LED(AorB, 'yellow')
elif indicate == 'sleep':
hs.f.LED(AorB, 'blue')
elif indicate == 'awake':
hs.f.LED(AorB, 'pulse blue')
elif indicate == 'imaging':
hs.f.LED(AorB, 'sweep blue')
elif indicate == 'off':
hs.f.LED(AorB, 'off')
return True
def userYN(*args):
"""Ask a user a Yes/No question and return True if Yes, False if No."""
question = ''
for a in args:
question += str(a) + ' '
response = True
while response:
answer = input(question + '? Y/N = ')
answer = answer.upper().strip()
if answer == 'Y':
response = False
answer = True
elif answer == 'N':
response = False
answer = False
return answer
def do_flush():
"""Flush all, some, or none of lines."""
AorB_ = [*flowcells.keys()][0]
port_dict = hs.v24[AorB_].port_dict
# Select lines to flush
LED('all', 'user')
confirm = False
while not confirm:
flush_ports = input("Flush all, some, or none of the lines? ")
if flush_ports.strip().lower() == 'all':
flush_all = True
flush_ports = [*port_dict.keys()]
for vp in hs.v24[AorB_].variable_ports:
if vp in flush_ports:
flush_ports.remove(vp)
confirm = userYN('Confirm flush all lines')
elif flush_ports.strip().lower() in ['none', 'N', 'n', '']:
flush_ports = []
confirm = userYN('Confirm skip flushing lines')
else:
good =[]
bad = []
for fp in flush_ports.split(','):
fp = fp.strip()
if fp in port_dict.keys():
good.append(fp)
else:
try:
fp = int(fp)
if fp in range(1,hs.v24[AorB_].n_ports+1):
good.append(fp)
else:
bad.append(fp)
except:
bad.append(fp)
if len(bad) > 0:
print('Valid ports:', *good)
print('Invalid ports:', *bad)
confirm = not userYN('Re-enter lines to flush')
else:
confirm = userYN('Confirm only flushing',*good)
if confirm:
flush_ports = good
if len(flush_ports) > 0:
while not userYN('Temporary flowcell(s) locked on to stage'): pass
while not userYN('All valve input lines in water'): pass
while not userYN('Ready to flush'): pass
LED('all', 'startup')
# Flush ports
speed = flowcells[AorB_].pump_speed['flush']
volume = flowcells[AorB_].volume['flush']
for port in flush_ports:
if port in hs.v24[AorB_].variable_ports:
flush_ports.append(*hs.v24[AorB_].port_dict[port].values())
else:
hs.message('Flushing ' + str(port))
for fc in flowcells.values():
AorB = fc.position
fc.thread = threading.Thread(target=hs.v24[AorB].move,
args=(port,))
fc.thread.start()
alive = True
while alive:
alive_ = []
for fc in flowcells.values():
alive_.append(fc.thread.is_alive())
alive = any(alive_)
for fc in flowcells.values():
AorB = fc.position
fc.thread = threading.Thread(target=hs.p[AorB].pump,
args=(volume, speed,))
fc.thread.start()
alive = True
while alive:
alive_ = []
for fc in flowcells.values():
alive_.append(fc.thread.is_alive())
alive = any(alive_)
##########################################################
## Flush Lines ###########################################
##########################################################
def do_prime(flush_YorN):
"""Prime lines with all reagents in config if prompted."""
LED('all', 'user')
## Prime lines
confirm = False
while not confirm:
prime_YorN = userYN("Prime lines")
if prime_YorN:
confirm = userYN("Confirm prime lines")
else:
confirm = userYN("Confirm skip priming lines")
# LED('all', 'startup')
# hs.z.move([0,0,0])
# hs.move_stage_out()
#LED('all', 'user')
if prime_YorN:
if flush_YorN:
while not userYN('Temporary flowcell(s) locked on to stage'): pass
while not userYN('Valve input lines in reagents'): pass
while not userYN('Ready to prime lines'): pass
#Flush all lines
LED('all', 'startup')
while True:
AorB_ = [*flowcells.keys()][0]
port_dict = hs.v24[AorB_].port_dict
speed = flowcells[AorB_].pump_speed['prime']
for port in port_dict.keys():
if isinstance(port_dict[port], int):
hs.message('Priming ' + str(port))
for fc in flowcells.values():
port_num = port_dict[port]
AorB = fc.position
fc.thread = threading.Thread(target=hs.v24[AorB].move,
args=(port,))
fc.thread.start()
alive = True
while alive:
alive_ = []
for fc in flowcells.values():
alive_.append(fc.thread.is_alive())
alive = any(alive_)
for fc in flowcells.values():
if port_num in hs.v24[AorB].side_ports:
volume = fc.volume['side']
elif port_num == hs.v24[AorB].sample_port:
volume = fc.volume['sample']
else:
volume = fc.volume['main']
AorB = fc.position
fc.thread = threading.Thread(target=hs.p[AorB].pump,
args=(volume, speed,))
fc.thread.start()
alive = True
while alive:
alive_ = []
for fc in flowcells.values():
alive_.append(fc.thread.is_alive())
alive = any(alive_)
break
# Rinse flowcells
method = config.get('experiment', 'method') # Read method specific info
method = config[method]
rinse_port = method.get('rinse', fallback = None)
rinse = rinse_port in hs.v24[AorB].port_dict
if rinse_port == port: # Option to skip rinse if last reagent pump was rinse reagent
rinse = False
# Get rinse reagents
if not rinse:
LED('all', 'user')
print('Last reagent pumped was', port)
if userYN('Rinse flowcell'):
while not rinse:
if rinse_port not in hs.v24[AorB].port_dict:
rinse_port = input('Specify rinse reagent: ')
rinse = rinse_port in hs.v24[AorB].port_dict
if not rinse:
print('ERROR::Invalid rinse reagent')
print('Choose from:', *list(hs.v24[AorB].port_dict.keys()))
if rinse:
# Simultaneously Rinse Flowcells
for fc in flowcells.values():
fc.thread = threading.Thread(target=do_rinse,
args=(fc,rinse_port,))
fc.thread.start()
alive = True
# Wait for rinsing to complete
while alive:
alive_ = []
for fc in flowcells.values():
alive_.append(fc.thread.is_alive())
alive = any(alive_)
LED('all', 'user')
while not userYN('Temporary flowcell(s) removed'): pass
while not userYN('Experiment flowcell(s) locked on to stage'): pass
if not prime_YorN:
while not userYN('Valve input lines in reagents'): pass
while not userYN('Door closed'): pass
##########################################################
def do_nothing():
"""Do nothing."""
pass
##########################################################
## iterate over lines, send to pump, and print response ##
##########################################################
def do_recipe(fc):
"""Do the next event in the recipe.
**Parameters:**
- fc (flowcell): The current flowcell.
"""
AorB = fc.position
fc.thread = None
# Skip to first line of recipe on initial cycle
if fc.cycle == 1 and fc.first_line is not None:
for i in range(fc.first_line):
line = fc.recipe.readline()
fc.first_line = None
#get instrument and command
instrument = None
while instrument is None:
line = fc.recipe.readline()
if line:
instrument, command = parse_line(line)
else:
break
if line:
# Move reagent valve
if instrument == 'PORT':
#Move to cycle specific reagent if it is variable a reagent
if fc.cycle <= fc.total_cycles:
if command in hs.v24[AorB].variable_ports:
command = hs.v24[AorB].port_dict[command][fc.cycle]
log_message = 'Move to ' + command
fc.thread = threading.Thread(target = hs.v24[AorB].move,
args = (command,))
if fc.cycle <= fc.total_cycles:
LED(AorB, 'awake')
# Pump reagent into flowcell
elif instrument == 'PUMP':
volume = int(command)
speed = fc.pump_speed['reagent']
log_message = 'Pumping ' + str(volume) + ' uL'
fc.thread = threading.Thread(target = hs.p[AorB].pump,
args = (volume, speed,))
if fc.cycle <= fc.total_cycles:
LED(AorB, 'awake')
# Incubate flowcell in reagent for set time
elif instrument == 'HOLD':
if command.isdigit():
holdTime = float(command)*60
log_message = 'Flowcell holding for ' + str(command) + ' min.'
if hs.virtual:
fc.thread = threading.Timer(holdTime/hs.speed_up, fc.endHOLD)
#fc.thread = threading.Timer(holdTime, fc.endHOLD)
else:
fc.thread = threading.Timer(holdTime, fc.endHOLD)
elif command == 'STOP':
hs.message('PySeq::Paused')
LED(AorB, 'user')
input("Press enter to continue...")
log_message = ('Continuing...')
fc.thread = threading.Thread(target = do_nothing)
if fc.cycle <= fc.total_cycles:
LED(AorB, 'sleep')
# Wait for other flowcell to finish event before continuing with current flowcell
elif instrument == 'WAIT':
if command == 'TEMP':
fc.thread = threading.Thread(target = hs.T.wait_fc_T,
args=(AorB, fc.temperature,))
log_message = ('Waiting to reach '+str(fc.temperature)+'°C')
elif fc.waits_for is not None:
if command in flowcells[fc.waits_for].events_since_IMAG:
log_message = command + ' has | |
"""
Utilities to parse files in CHILDES CHAT format.
"""
import sys, os
import re
import ast
import fileinput
from itertools import chain
from typing import List, Tuple, Dict, Union
from pathlib import Path
from collections import OrderedDict
from logger import logger
from helpers.sentence import Sentence
from helpers.token import Token
from helpers.clean_utterance import normalise_utterance
from features import mor2feats, is_key
all_feats = set()
PUNCT = re.compile("([,.;?!:”])")
FEAT = re.compile(r"^\d?[A-Z]+$")
EMPTY = ['.', '0 .', '']
# ---- define unidentifiable patterns to omit----
unidentifiable = [
r"yyy", # phono_coding
r"www", # untranscribed
r"^&", # phono_fragments
]
TO_OMIT = re.compile("|".join(unidentifiable))
# define a mapping between MOR codes and UPOS tags.
MOR2UPOS = {
"adj":"ADJ",
"post":"ADP", # adpositions
"prep":"ADP", # adpositions
"adv":"ADV",
"adv:tem":"ADV",
"v:aux":"AUX",
"aux":"AUX",
"coord":"CCONJ",
"qn":"DET",
"det":"DET",
"quant":"DET", # jpn
"co":"INTJ", # interjection, interaction
"n":"NOUN",
"on":"NOUN", # onomatopoeia
"onoma":"NOUN", # jpn onomatopoeia
"part":"PART",
"pro":"PRON",
"n:prop":"PROPN",
"conj":"SCONJ",
"comp":"SCONJ",
"num":"NUM",
"v":"VERB",
"inf":"PART", # infinitive particles
"cop":"AUX",
"mod":"AUX",
"art":"DET", # article, PronType=Art
"prepart":"DET", # preposition with article
"vimp":"VERB", # verb imperative Mood=Imp
"vpfx":"ADP", # preverb/verbal particles, according to UD website shouldn't be PART
# ---- punctuation marks ----
"end":"PUNCT",
"beg":"PUNCT",
"cm":"PUNCT",
"bq":"PUNCT",
"eq":"PUNCT",
"bq2":"PUNCT",
"eq2":"PUNCT",
# ---- hard to decide ----
"fil":"INTJ", # ?
"neg":"PART", # ?
"wplay":"INTJ", # wordplay
"bab":"INTJ", # babbling
"sing":"INTJ", # singing
# ---- empty or X? ----
"none":"X",
"dia":"X", # dialect
"test":"X", # test words like "wug"
"meta":"X", # metalinguistic use
"phon":"X", # phonologically consistent form
"fam":"X", # family-specific form
"uni":"X", # Unibet transcription
"L2":"X", # second-language form
"neo":"X", # neologism
"chi":"X", # child-invented form
}
# define a mapping between GRs and UD deprels.
GR2DEPREL = {
'mod':'nmod',
'coord':'cc',
'subj':'nsubj',
'com':'discourse',
'pq':'det',
'neg':'advmod',
'cjct':'advcl',
'cpred':'ccomp',
'obj2':'iobj',
'incroot':'root',
'xmod':'acl',
# 'beg':'vocative', # or parataxis, change head from 0 to root's index
'date':'flat',
'comp':'ccomp',
'xjct':'advcl',
'pred':'xcomp',
'name':'flat:name',
'srl':'xcomp',
'jct':'advmod',
'link':'mark',
'app':'appos',
'cmod':'ccomp',
# 'end':'parataxis',
'enum':'conj',
# ---- has alternative ----
'poss':'case', # or "nmod:poss"
'quant':'det', # if numbers 'nummod'
'postmod':'amod', # or "xcomp"
# ---- as is ----
'obj':'obj',
'csubj':'csubj',
'conj':'conj',
'punct':'punct',
'det':'det',
'root':'root',
'aux':'aux',
# ---- punctuations ----
'endp':'punct',
'begp':'punct',
'lp':'punct',
# ---- undecided ----
'om':'discourse:omission',
'cpobj':'acl:relcl',
'cobj':'ccomp',
'njct':'nmod',
'pobj':'obl',
'inf':'mark',
}
def parse_chat(fp):
""" For a open file in CHAT format, get the utterances grouped with
their dependent tiers, meta data including headers and comments, and
the final lines of the file.
"""
meta, metas = [], []
tiers, utterances = [], []
final = []
lines = fp.readlines()
i = 0
ltmp = ""
while i < len(lines):
# ---- obtain full lines by appending tab-initiated continuations to previous lines ----
if not lines[i].startswith("\t"):
ltmp = lines[i].strip()
i += 1
while i < len(lines) and lines[i].startswith("\t"):
ltmp += " " + lines[i].strip()
i += 1
# ---- decide type of ltmp ----
# if ltmp starts with:
# *: utterance
# @: meta
# %: dependent tier
if ltmp.startswith("*"):
# ---- collect previous meta, clear meta ----
metas.append(meta)
meta = []
# ---- if tiers, add to previous utterance, clear tiers ----
if tiers: utterances[-1].extend(tiers)
tiers = []
# ---- add current line to utterances ----
utterances.append([ltmp]) # as list to hold dependent tiers
if ltmp.startswith("@"):
# ---- if tiers, add to previous utterance, clear tiers ----
if tiers: utterances[-1].extend(tiers)
tiers = []
# ---- add current line to meta ----
meta.append(ltmp)
# ---- if EOF, store remaining meta in final ----
if ltmp == "@End":
final.extend(meta)
if ltmp.startswith("%"):
tiers.append(ltmp)
return metas, utterances, final
def check_token(surface: str) -> Tuple[str, str]:
"""Adopted and modified from coltekin/childes-tr/misc/parse-chat.py
For a given surface form of the token, return (surface, clean), where
clean is the token form without CHAT codes.
"""
if surface is None:
return None, None
clean=''
if re.match(TO_OMIT, surface): # phonological forms are also omitted
return surface, clean
# remove unwanted markings to normalise token form
clean = surface.replace(' ', '')
clean = clean.replace('xxx', '') # unintelligible, 'wxxxord' --> 'word'
clean = clean.replace('(', '').replace(')', '')
clean = clean.replace('0', '') # 0token is omitted token
clean = clean.replace('‡', ',') # prefixed interactional marker
clean = clean.replace('„', ',') # suffixed interactional marker
# clean = clean.replace('_', ' ') # compound, uncomment to remove '_' in compounds
if "@" in clean:
clean = clean[:clean.index("@")] # drop any @endings
return surface, clean
# def to_deprel(gr: str) -> str:
# Directly translate a given GR (grammatical relation) to its UD counterpart
# using predefined dictionary GR2DEPREL.
# If the given gr is in GR2DEPREL, return its corresponding deprel,
# otherwise return gr (and give a warning).
# if not gr: # empty or None
# return gr
# if not gr in GR2DEPREL:
# logger.warning(f"{gr} does not have a corresponding deprel in GR2DEPREL.")
# return GR2DEPREL[gr] if gr in GR2DEPREL else gr
def root_token(tokens: List[Token]) -> int:
"""Get index of the real root in multi-root sentences.
Current solution is perhaps inefficient.
"""
root_idx = -1
for tok in tokens:
if tok.deprel == 'root':
root_idx = tok.index
elif tok.deprel == 'incroot':
root_idx = tok.index
elif tok.head == '0' and tok.deprel not in ['vocative', 'parataxis']:
root_idx = tok.index
return root_idx if root_idx > 0 else None
def change_head_to_root(tok: Token, tokens: List[Token], is_multi=False, i=-1):
"""Store original head in MISC, change head to root."""
if is_multi and i >= 0:
l = list(tok.misc)
if l[i]:
l[i] = f"head={str(tok.head[i])}"
else:
l[i] += f"|head={str(tok.head[i])}"
tok.misc = tuple(l)
tok.head[i] = root_token(tokens)
else:
if not tok.misc:
tok.misc = f"head={str(tok.head)}"
else:
tok.misc += f"|head={str(tok.head)}"
tok.head = root_token(tokens)
def conditional_deprel(gr: str, tok: Token, tokens: List[Token], is_multi=False, upos: str=None, lemma: str=None, i=-1) -> str:
"""Translate a given GR (grammatical relation) to its UD counterpart
using conditional mapping for several selected cases, the rest uses dictionary mapping.
Conditional mapping is based on grammatical information of other
tokens in the sentence.
To-Dos:
--------
- be extended to include more cases
- use morph dict to organise input
"""
if gr == 'beg':
# print("beg")
if upos and upos not in ['INTJ', 'PROPN', 'NOUN']:
logger.info("BEG but not vocative.")
# print(tokens)
change_head_to_root(tok, tokens, is_multi, i)
# print("change head")
return 'parataxis'
elif upos in ['PROPN', 'NOUN']:
# print(tokens)
change_head_to_root(tok, tokens, is_multi, i)
# print("change head")
return 'vocative'
elif upos == 'INTJ':
# print(tokens)
change_head_to_root(tok, tokens, is_multi, i)
# print("change head")
return 'discourse'
elif gr == 'end':
change_head_to_root(tok, tokens, is_multi, i)
return 'parataxis'
# ---- dict translation ----
elif not gr in GR2DEPREL:
logger.warning(f"{gr} does not have a corresponding deprel in GR2DEPREL.")
return gr
else:
return GR2DEPREL[gr]
def gr2deprel(tok: Token, tokens: List[Token], is_multi=False):
"""Stores original GR in MISC, translate a given GR (grammatical relation)
to its UD counterpart and modify deps accordingly, also works for multi-word tokens.
"""
upos = ''
lemma = ''
if is_multi:
if not tok.misc:
tok.misc = tuple(f"gr={gr}" for gr in tok.deprel if gr)
else:
tok.misc = tuple((s + f"|gr={tok.deprel[i]}" if s else f"gr={tok.deprel[i]}" for i, s in enumerate(tok.misc) if tok.deprel[i]))
# ==== readable version ====
# tmps = []
# for i, s in enumerate(tok.misc):
# tmp = ""
# if s and tok.deprel[i]:
# tmp = s + f"|gr={tok.deprel[i]}"
# elif tok.deprel[i]:
# tmp = f"gr={tok.deprel[i]}"
# tmps.append(tmp)
# tok.misc = tuple(tmps)
for i, gr in enumerate(tok.deprel):
upos = tok.upos[i] if tok.upos else ''
lemma = tok.lemma[i] if tok.lemma else ''
tok.deprel[i] = conditional_deprel(gr, tok, tokens, is_multi, upos, lemma, i)
tok.deps[i] = f"{tok.head[i]}:{tok.deprel[i]}"
else:
if not tok.misc:
tok.misc = f"gr={tok.deprel}"
else:
tok.misc += f"|gr={tok.deprel}"
gr = tok.deprel # less confusing name
upos = tok.upos
tok.assign_ud_deprel(conditional_deprel(gr, tok, tokens, is_multi, upos, lemma))
tok.deps = f"{tok.head}:{tok.deprel}"
def to_ud_values(tokens: List[Token]) -> List[Token]:
""" Translate CHAT annotations to UD values, currently for GRs to deprels.
This method can also be extended for future decision of feature types and values
if one would like to convert them to UD style. (To-Do)
"""
for tok in tokens:
if type(tok.deprel) is list: # multi-word tokens
gr2deprel(tok, tokens, is_multi=True)
elif tok.deprel: # normal tokens
gr2deprel(tok, tokens)
return tokens
def to_upos(mor_code: str) -> str:
"""If the given mor_code is in the predefined MOR2UPOS dict, return the
corresponding upos, otherwise return mor_code.
"""
if not mor_code: # empty or None
return mor_code
elif not mor_code[:1].isalpha(): # for '+...' and things alike
return None
if not mor_code in MOR2UPOS:
if not re.match(PUNCT, mor_code) and not mor_code.split(':')[0].lower() in MOR2UPOS:
logger.warning(f"{mor_code} does not have a corresponding UPOS in MOR2UPOS.")
return MOR2UPOS[mor_code.split(':')[0]] if mor_code.split(':')[0].lower() in MOR2UPOS else mor_code
return MOR2UPOS[mor_code] if mor_code in MOR2UPOS else mor_code
def parse_gra(gra_segment: str) -> Tuple[str, str]:
head = gra_segment.split('|')[1]
deprel = gra_segment.split('|')[-1].lower()
return head, deprel
def parse_sub(sub_segment: str)-> Tuple[Union[str, None], List[str], str, str]:
lemma = None
feat_str = []
feat = ''
lemma_feats, _, translation = sub_segment.partition('=') # translation
tmps = re.findall(r"[&|-]\w+", lemma_feats)
if tmps: # has feature
# feat_str = [f"FEAT={t[1:].title()}" for t in tmps] # need to convert to UD feats
feat_str = [mor2feats(t) for t in tmps]
feat_str = list(filter(None, feat_str))
feat_str.sort()
# logger.info(feat_str)
feats = [f"{t}" for t in tmps]
feat = '^'.join(feats)
tmp = re.split(r'[|&#-]', lemma_feats)
# lemma = tmp[0]
if tmp[0] == 'I' or not re.match(FEAT, tmp[0]): # !!! sometimes lemma is encoded
lemma = tmp[0]
all_feats.update(tmps[1:])
elif tmp and re.match(FEAT, tmp[0]):
# logger.info(tmp[0])
# logger.info(feat_str)
# logger.info(feat)
feat_str = tmp[0] + '^' + feat if feat else tmp[0]
all_feats.update(tmps)
if not feat_str or not isinstance(feat_str, list):
feat_str = ''
return lemma, feat_str, translation, feat
def parse_mor(mor_segment: str):
"""Given a word-level MOR segment, extract the POS tag, lemma, features and other information
to be stored in the MISC field.
"""
lemma = None
feat_str = []
translation = None
feat = None
miscs = []
pos, _, lemma_feats = mor_segment.partition("|") # split by first |
if pos == lemma_feats:
miscs.append(f"form={mor_segment.replace('|', '@')}")
if '#' in pos: # has prefix
miscs.append(f"prefix={pos.split('#')[0]}")
pos = pos.split('#')[-1]
if pos == '' or '+' in pos: # punct
lemma = lemma_feats.replace('+', '') # special case of punctuations
miscs.append(f"form={pos}")
elif '+' in lemma_feats: # compound
tmps = re.split(r"\+\w+?\|", lemma_feats)
l, f, t, feat = zip(*(parse_sub(tmp) for tmp in tmps[1:])) # tmp[0] is empty string
lemma = ''.join(l)
if any(t): translation = '+'.join(t) # or leave empty
feat_str = list(chain(*f)) # or leave empty
if any(feat): miscs.append(f"feats={'+'.join(feat)}") # or leave empty
ctmps = re.split(r"\+", lemma_feats)
# components = [f"{tuple(ctmp.split('|'))}" for | |
<gh_stars>0
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Gory implementation details of CAS service.
Append only currently. Once object is added, it can't be removed.
Upload protocol:
1) Client asks the server to initiate a new upload session (for hash X).
2) Server starts Resumable Upload protocol to a temporary file in GS.
3) Client uploads data to this temporary file (using resumable upload_id for
authentication).
4) Client finalizes the upload (thus making the temp file visible).
5) Client notifies server that upload has finished.
6) Server starts hash verification task.
7) On successful verification, server copies the file to the final location.
8) Meanwhile client polls server for verification operation status.
9) Once verification finishes, client polls 'PUBLISHED' status.
a) Concurrent uploads of a same file are fine, upload session moves to
PUBLISHED state whenever corresponding hash becomes available in
the store, regardless of who exactly uploaded it.
Features of Google Storage used or taken into consideration:
* upload_id is enough to authenticate the request (no access_token needed).
* upload_id is NOT consumed when upload is finalized and may be reused.
* Each object has ETag that identified its content.
* There's copy-object-if-etag-matches atomic operation.
* Lifecycle management for temporary files, to cleanup old garbage.
Also this module is sensitive to implementation details of 'cloudstorage'
library since it uses its non-public APIs:
* StreamingBuffer._api.api_url and StreamingBuffer._path_with_token.
* ReadBuffer._etag.
* storage_api._get_storage_api(...) and _StorageApi it returns.
"""
import base64
import hashlib
import logging
import random
import re
import string
import urllib
import webapp2
from google.appengine import runtime
from google.appengine.api import datastore_errors
from google.appengine.ext import ndb
# We use cloud storage guts to implement "copy if ETag matches".
import cloudstorage
from cloudstorage import api_utils
from cloudstorage import errors
from cloudstorage import storage_api
from components import auth
from components import decorators
from components import utils
import config
# TODO(vadimsh): Garbage collect expired UploadSession. Right know only public
# upload_session_id expires, rendering sessions unreachable by clients. But the
# entities themselves unnecessarily stay in the datastore.
# How long to keep signed fetch URL alive.
FETCH_URL_EXPIRATION_SEC = 60 * 60
# How long to keep pending upload session alive.
SESSION_EXPIRATION_TIME_SEC = 6 * 60 * 60
# Chunks to read when verifying the hash.
READ_BUFFER_SIZE = 1024 * 1024
# Hash algorithms we are willing to accept: name -> (factory, hex digest len).
SUPPORTED_HASH_ALGOS = {
'SHA1': (hashlib.sha1, 40),
}
# Return values of task queue task handling function.
TASK_DONE = 1
TASK_RETRY = 2
def is_supported_hash_algo(hash_algo):
"""True if given algorithm is supported by CAS service."""
return hash_algo in SUPPORTED_HASH_ALGOS
def is_valid_hash_digest(hash_algo, hash_digest):
"""True if given hex digest looks like a valid hex digest for given algo."""
assert is_supported_hash_algo(hash_algo)
_, digest_size = SUPPORTED_HASH_ALGOS[hash_algo]
return re.match('^[0-9a-f]{%d}$' % digest_size, hash_digest)
def get_cas_service():
"""Factory method that returns configured CASService instance.
If the service is not configured, returns None. Also acts as a mocking point
for unit tests.
"""
conf = config.cached()
if not conf.cas_gs_path or not conf.cas_gs_temp:
return None
try:
cloudstorage.validate_file_path(conf.cas_gs_path.rstrip('/'))
cloudstorage.validate_file_path(conf.cas_gs_temp.rstrip('/'))
except ValueError as err:
logging.error("Invalid CAS config: %s", err)
return None
service_account_key = auth.ServiceAccountKey(
client_email=conf.service_account_email,
private_key=conf.service_account_pkey,
private_key_id=conf.service_account_pkey_id)
if utils.is_local_dev_server(): # pragma: no branch
from . import hacks
hacks.patch_cloudstorage_lib(service_account_key)
return CASService(
conf.cas_gs_path.rstrip('/'),
conf.cas_gs_temp.rstrip('/'),
service_account_key)
class NotFoundError(Exception):
"""Raised by 'open' when the file is not in CAS."""
class UploadIdSignature(auth.TokenKind):
"""Token to use to generate and validate signed upload_session_id."""
expiration_sec = SESSION_EXPIRATION_TIME_SEC
secret_key = auth.SecretKey('upload_session_id_signing', scope='local')
version = 1
class CASService(object):
"""CAS implementation on top of Google Storage."""
def __init__(self, gs_path, gs_temp, service_account_key=None):
self._gs_path = gs_path.rstrip('/')
self._gs_temp = gs_temp.rstrip('/')
self._service_account_key = service_account_key
self._retry_params = api_utils.RetryParams()
cloudstorage.validate_file_path(self._gs_path)
cloudstorage.validate_file_path(self._gs_temp)
def is_fetch_configured(self):
"""True if service account credentials are configured."""
return (
self._service_account_key and
self._service_account_key.private_key_id)
def is_object_present(self, hash_algo, hash_digest):
"""True if the given object is in the store."""
assert is_valid_hash_digest(hash_algo, hash_digest)
return self._is_gs_file_present(
self._verified_gs_path(hash_algo, hash_digest))
def generate_fetch_url(self, hash_algo, hash_digest, filename=None):
"""Returns a signed URL that can be used to fetch an object.
See https://developers.google.com/storage/docs/accesscontrol#Signed-URLs
for more info about signed URLs.
"""
assert is_valid_hash_digest(hash_algo, hash_digest)
assert self.is_fetch_configured()
# Generate the signature.
gs_path = self._verified_gs_path(hash_algo, hash_digest)
expires = str(int(utils.time_time() + FETCH_URL_EXPIRATION_SEC))
to_sign = '\n'.join([
'GET',
'', # Content-MD5, not provided
'', # Content-Type, not provided
expires,
gs_path,
])
signature = self._rsa_sign(self._service_account_key.private_key, to_sign)
params = [
('GoogleAccessId', self._service_account_key.client_email),
('Expires', expires),
('Signature', signature),
]
# Oddly, response-content-disposition is not signed and can be slapped onto
# existing signed URL.
if filename:
assert '"' not in filename, filename
params.append((
'response-content-disposition',
'attachment; filename="%s"' % filename))
# Generate the final URL.
query_params = urllib.urlencode(params)
assert gs_path.startswith('/'), gs_path
return 'https://storage.googleapis.com%s?%s' % (gs_path, query_params)
def open(self, hash_algo, hash_digest, read_buffer_size=None):
"""Opens a file in CAS for reading.
Args:
hash_algo: valid supported hash algorithm to use for verification.
hash_digest: hex hash digest of the content to be uploaded.
read_buffer_size: length of chunk of data to read with each RPC.
Returns:
File-like object, caller takes ownership and should close it.
Raises:
NotFoundError if file is missing.
"""
read_buffer_size = read_buffer_size or READ_BUFFER_SIZE
try:
return cloudstorage.open(
filename=self._verified_gs_path(hash_algo, hash_digest),
mode='r',
read_buffer_size=read_buffer_size,
retry_params=self._retry_params)
except cloudstorage.NotFoundError:
raise NotFoundError()
def start_direct_upload(self, hash_algo):
"""Can be used to upload data to CAS directly from an Appengine handler.
Opens a temp file for writing (and returns wrapper around it). Hashes the
data while it is being written, and moves the temp file to an appropriate
location in CAS once it is closed.
Args:
hash_algo: algorithm to use to calculate data hash.
Returns:
DirectUpload object to write data to.
"""
assert is_supported_hash_algo(hash_algo)
ts_sec = utils.datetime_to_timestamp(utils.utcnow()) / 1000000.
temp_path = self._temp_direct_upload_gs_path(ts_sec)
temp_file = cloudstorage.open(
filename=temp_path,
mode='w',
retry_params=self._retry_params)
def commit_callback(hash_digest, commit):
if commit:
self._gs_copy(temp_path, self._verified_gs_path(hash_algo, hash_digest))
self._gs_delete(temp_path)
return DirectUpload(
file_obj=temp_file,
hasher=SUPPORTED_HASH_ALGOS[hash_algo][0](),
callback=commit_callback)
def create_upload_session(self, hash_algo, hash_digest, caller):
"""Starts a new upload operation.
Starts an upload regardless of whether the object is already stored or not.
Generates upload_url for GS resumable upload protocol.
Args:
hash_algo: valid supported hash algorithm to use for verification.
hash_digest: hex hash digest of the content to be uploaded.
caller: auth.Identity of whoever makes the request.
Returns:
tuple (UploadSession object, signed upload session ID).
"""
assert is_valid_hash_digest(hash_algo, hash_digest)
# TODO(vadimsh): Check that number of pending uploads opened by |caller|
# is low enough. To prevent malicious client from creating tons of uploads.
# New unique ID (long int).
upload_id = UploadSession.allocate_ids(size=1)[0]
# Opening a GCS file and not closing it keeps upload session active.
ts_sec = utils.datetime_to_timestamp(utils.utcnow()) / 1000000.
temp_gs_location = self._temp_upload_session_gs_path(upload_id, ts_sec)
temp_file = cloudstorage.open(
filename=temp_gs_location,
mode='w',
retry_params=self._retry_params)
# See cloudstorage/storage_api.py, StreamingBuffer for _path_with_token.
upload_url = '%s%s' % (temp_file._api.api_url, temp_file._path_with_token)
# New session.
upload_session = UploadSession(
id=upload_id,
hash_algo=hash_algo,
hash_digest=hash_digest,
temp_gs_location=temp_gs_location,
final_gs_location=self._verified_gs_path(hash_algo, hash_digest),
upload_url=upload_url,
status=UploadSession.STATUS_UPLOADING,
created_by=caller)
upload_session.put()
# Generate signed ID. It will be usable only by |caller|.
upload_session_id = UploadIdSignature.generate(
message=[caller.to_bytes()],
embedded={'id': '%s' % upload_session.key.id()})
return upload_session, upload_session_id
def fetch_upload_session(self, upload_session_id, caller):
"""Returns an existing non-expired upload session given its signed ID.
Args:
upload_session_id: signed upload session ID, see create_upload_session.
caller: auth.Identity of whoever makes the request.
Returns:
UploadSession object, or None if session is expired, missing or signature
is not valid.
"""
try:
# Verify the signature, extract upload_id.
embedded = UploadIdSignature.validate(
upload_session_id, [caller.to_bytes()])
upload_id = long(embedded['id'])
except (auth.InvalidTokenError, KeyError, ValueError):
logging.error('Using invalid or expired upload_session_id')
return None
return UploadSession.get_by_id(upload_id)
def maybe_finish_upload(self, upload_session):
"""Called whenever a client checks the status of the upload session.
Args:
upload_session: UploadSession object.
Returns:
Updated UploadSession object.
"""
# Fast check before starting the transaction.
if upload_session.status != UploadSession.STATUS_UPLOADING:
return upload_session
# Move to VERIFYING state, adding the verification task.
@ndb.transactional
def run():
refreshed = upload_session.key.get()
if refreshed.status != UploadSession.STATUS_UPLOADING: # pragma: no cover
return refreshed
success = utils.enqueue_task(
url='/internal/taskqueue/cas-verify/%d' % refreshed.key.id(),
queue_name='cas-verify',
transactional=True)
if not success: # pragma: no cover
raise datastore_errors.TransactionFailedError()
refreshed.status = UploadSession.STATUS_VERIFYING
refreshed.put()
return refreshed
return run()
def verify_pending_upload(self, unsigned_upload_id):
"""Task queue task that checks the hash of a pending upload, finalizes it.
Args:
unsigned_upload_id: long int ID of upload session to check.
Returns:
TASK_RETRY if task should be retried, TASK_DONE if not.
"""
upload_session = UploadSession.get_by_id(unsigned_upload_id)
if upload_session is None:
logging.error('Verifying missing upload session:\n%d', unsigned_upload_id)
return TASK_DONE
if | |
add a repository's owner as a collaborator.")
collaborators = self.list_collaborators(repo)
if collaborator in (c['username'] for c in collaborators):
raise ValueError(
"{0} is already a collaborator of {1}.".format(
collaborator, repo))
db_privileges = [p.upper() for p in db_privileges]
file_privileges = [p.lower() for p in file_privileges]
invalid_db_privileges = set(db_privileges) - {
'SELECT', 'INSERT', 'UPDATE', 'DELETE',
'TRUNCATE', 'REFERENCES', 'TRIGGER'}
if len(invalid_db_privileges) > 0:
raise ValueError(
"Unsupported db privileges: \"{0}\"".format(
','.join(invalid_db_privileges)))
invalid_file_privileges = set(file_privileges) - {'read', 'write'}
if len(invalid_file_privileges) > 0:
raise ValueError(
"Unsupported file privileges: \"{0}\"".format(
','.join(invalid_file_privileges)))
try:
app = App.objects.get(app_id=collaborator)
collaborator_obj, _ = Collaborator.objects.get_or_create(
app=app, repo_name=repo, repo_base=self.repo_base)
except App.DoesNotExist:
user = User.objects.get(username=collaborator)
collaborator_obj, _ = Collaborator.objects.get_or_create(
user=user, repo_name=repo, repo_base=self.repo_base)
# convert privileges list to string and save the object
db_privilege_str = ', '.join(db_privileges).upper()
file_privilege_str = ', '.join(file_privileges).lower()
collaborator_obj.permission = db_privilege_str
collaborator_obj.file_permission = file_privilege_str
collaborator_obj.save()
return self.user_con.add_collaborator(
repo=repo,
collaborator=collaborator,
db_privileges=db_privileges
)
def delete_collaborator(self, repo, collaborator):
"""
Removes a user's or app's privileges on a repo.
Returns True on success.
Raises LookupError when repo or collaborator does not exist.
Raises User.DoesNotExist if collaborator owns repo.
Raises PermissionDenied on insufficient permissions.
"""
with _superuser_connection(self.repo_base) as conn:
collaborators = conn.list_collaborators(repo=repo)
collaborators = [c.get('username') for c in collaborators]
# Current user must be the repo's owner or the collaborator to be
# removed and must be an existing collaborator. If not the owner
# and removing someone else, current user must have CREATE db
# privileges.
if (self.username not in [collaborator, self.repo_base] or
self.username not in collaborators):
DataHubManager.has_repo_db_privilege(
self.username, self.repo_base, repo, 'CREATE')
# The reason we're enforcing permission checks this way is to deal
# with the edge case where a user removes himself as a collaborator
# from another user's repo.
if collaborator not in collaborators:
raise LookupError('Failed to delete collaborator.'
' %s is not a collaborator in the specified '
'repository.' % collaborator)
collab = User.objects.get(username=collaborator)
Collaborator.objects.get(
user=collab, repo_name=repo, repo_base=self.repo_base).delete()
result = conn.delete_collaborator(
repo=repo, collaborator=collaborator)
return result
def list_repo_files(self, repo):
"""
Lists a repo's files.
Returns an empty list on bad repo names.
Raises PermissionDenied on insufficient privileges, even for bad repo
names.
"""
# check for permissions
DataHubManager.has_repo_file_privilege(
self.username, self.repo_base, repo, 'read')
# make a directory for files, if it doesn't already exist
repo_dir = DataHubManager.create_user_data_folder(self.repo_base, repo)
uploaded_files = [f for f in os.listdir(repo_dir)]
return uploaded_files
def list_repo_cards(self, repo):
"""
Lists a repo's cards.
Returns an empty list on bad repo names.
Raises PermissionDenied on insufficient privileges, even for bad repo
names.
"""
# check for permission
DataHubManager.has_repo_file_privilege(
self.username, self.repo_base, repo, 'read')
# get the relevant cards
cards = Card.objects.all().filter(
repo_base=self.repo_base, repo_name=repo)
cards = [c.card_name for c in cards]
return cards
def list_collaborators(self, repo):
"""
returns a list of objects with keys 'username' and 'permissions'.
'permissions' are tied to the database being queried, and left to the
user to be interpreted. For postgres, see
http://www.postgresql.org/docs/9.4/static/sql-grant.html
An example response:
# [{'username': 'foo_user', 'permissions': 'UC'},
{'username': 'bar_user', 'permissions': 'U'}]
Doesn't raise any exceptions, though it really should raise
PermissionDenied if the current user isn't a collaborator.
"""
# get the database's idea of permissions
with _superuser_connection(self.repo_base) as conn:
db_collabs = conn.list_collaborators(repo=repo)
# merge it with the datahub collaborator model permissions
usernames = (db_collab['username'] for db_collab in db_collabs)
dh_collabs = Collaborator.objects.filter(user__username__in=usernames,
repo_base=self.repo_base,
repo_name=repo)
for db_collab in db_collabs:
db_collab['file_permissions'] = next(
(dh_collab.file_permission for dh_collab in dh_collabs
if dh_collab.user.username == db_collab['username']),
'')
return db_collabs
def save_file(self, repo, data_file):
"""
Saves a file to a repo.
Raises PermissionDenied on insufficient privileges.
"""
DataHubManager.has_repo_file_privilege(
self.username, self.repo_base, repo, 'write')
DataHubManager.create_user_data_folder(self.repo_base, repo)
file_name = clean_file_name(data_file.name)
file_path = user_data_path(self.repo_base, repo, file_name)
with open(file_path, 'wb+') as destination:
for chunk in data_file.chunks():
destination.write(chunk)
def delete_file(self, repo, file_name):
"""
Deletes a file from a repo.
Raises PermissionDenied on insufficient privileges.
"""
DataHubManager.has_repo_file_privilege(
self.username, self.repo_base, repo, 'write')
file_path = user_data_path(self.repo_base, repo, file_name)
os.remove(file_path)
def get_file(self, repo, file_name):
"""
Gets the contents of a file in a repo.
Raises PermissionDenied on insufficient privileges.
"""
DataHubManager.has_repo_file_privilege(
self.username, self.repo_base, repo, 'read')
file_path = user_data_path(self.repo_base, repo, file_name)
file = open(file_path).read()
return file
def export_table(self, repo, table, file_format='CSV',
delimiter=',', header=True):
"""
Exports a table to a file in the same repo.
Defaults to CSV format with header row.
Raises LookupError on invalid repo or table.
Raises ProgrammingError on invalid combinations of file_format,
delimiter, and header.
Raises PermissionDenied on insufficient privileges.
"""
# clean up names:
repo = clean_str(repo, '')
table = clean_str(table, '')
# check for permissions
DataHubManager.has_repo_db_privilege(
self.username, self.repo_base, repo, 'CREATE')
# make the base_repo and repo's folder, if they don't already exist
DataHubManager.create_user_data_folder(self.repo_base, repo)
# define the file path for the new table
file_name = clean_file_name(table)
file_path = user_data_path(
self.repo_base, repo, file_name, file_format)
# format the full table name
table_name = '%s.%s' % (repo, table)
# pass arguments to the connector
self.user_con.export_table(
table_name=table_name,
file_path=file_path,
file_format=file_format,
delimiter=delimiter,
header=header)
def export_view(self, repo, view, file_format='CSV',
delimiter=',', header=True):
"""
Exports a view to a file in the same repo.
Defaults to CSV format with header row.
Raises LookupError on invalid repo or view.
Raises ProgrammingError on invalid combinations of file_format,
delimiter, and header.
Raises PermissionDenied on insufficient privileges.
"""
# clean up names:
repo = clean_str(repo, '')
view = clean_str(view, '')
# check for permissions
DataHubManager.has_repo_db_privilege(
self.username, self.repo_base, repo, 'CREATE')
# make the repo_base and repo's folder, if they don't already exist
DataHubManager.create_user_data_folder(self.repo_base, repo)
# define the file path for the new view
file_name = clean_file_name(view)
file_path = user_data_path(
self.repo_base, repo, file_name, file_format)
# format the full view name
view_name = '%s.%s' % (repo, view)
self.user_con.export_view(
view_name=view_name,
file_path=file_path,
file_format=file_format,
delimiter=delimiter,
header=header)
def update_card(self, repo, card_name, new_query=None,
new_name=None, public=None):
"""
Updates a card's name, query, and/or public visibility.
Returns the card on success.
Raises ValueError if new_name is the empty string.
Raises TypeError on invalid public parameter.
Raises PermissionDenied on insufficient privileges or bad new_query.
"""
DataHubManager.has_repo_file_privilege(
self.username, self.repo_base, repo, 'write')
card = Card.objects.get(
repo_base=self.repo_base, repo_name=repo, card_name=card_name)
# update the card
if new_query is not None:
# Queries for cards must work
try:
self.execute_sql(new_query)
except Exception:
raise PermissionDenied(
'Either missing required privileges or bad query')
card.query = new_query
if new_name is not None:
if len(new_name) < 1:
raise ValueError("new_name must be longer than zero "
"characters")
card.card_name = new_name
if public is not None:
if type(public) is not bool:
raise TypeError("public must be of type bool")
card.public = public
card.save()
return card
def get_card(self, repo, card_name):
"""
Gets a card in a repo.
Raises PermissionDenied on insufficient privileges.
"""
# This goes through manage.py because, it requires a check that the
# user actually has repo access.
card = Card.objects.get(
repo_base=self.repo_base, repo_name=repo, card_name=card_name)
if not card.public:
DataHubManager.has_repo_file_privilege(
self.username, self.repo_base, repo, 'read')
card = Card.objects.get(
repo_base=self.repo_base, repo_name=repo, card_name=card_name)
return card
def create_card(self, repo, card_name, query):
"""
Creates a card in a repo from a given query.
Returns the card on success.
Raises IntegrityError if card with same name already exists.
Raises PermissionDenied on insufficient privileges or bad query.
"""
DataHubManager.has_repo_file_privilege(
self.username, self.repo_base, repo, 'write')
# to create a card, the user must be able to successfully execute
# the query from their own database user.
try:
self.execute_sql(query)
except Exception:
raise PermissionDenied(
'Either missing required privileges or bad query')
card, created = Card.objects.get_or_create(
repo_base=self.repo_base, repo_name=repo,
card_name=card_name, query=query)
return card
def export_card(self, repo, card_name, file_format='CSV'):
"""
Exports the results of a card to a new file in the repo.
Any existing file with that name is overwritten.
Raises PermissionDenied on insufficient privileges or bad query.
"""
DataHubManager.has_repo_file_privilege(
self.username, self.repo_base, repo, 'write')
card = Card.objects.get(repo_base=self.repo_base,
repo_name=repo, card_name=card_name)
query = card.query
# to export a card, the user must be able to successfully execute
# the query from their own database user.
try:
self.execute_sql(query)
except Exception:
raise PermissionDenied(
'Either missing required privileges or bad query')
# create the user data folder if it doesn't already exist
DataHubManager.create_user_data_folder(self.repo_base, repo)
file_name = clean_file_name(card_name)
file_path = user_data_path(
self.repo_base, repo, file_name, file_format)
self.user_con.export_query(query=query,
file_path=file_path,
file_format=file_format)
def | |
_eval_func(x: ConstantType, y: ConstantType) -> bool:
"""calculation function for 2 elements"""
return bool(x) & bool(y)
@staticmethod
def _simplify(children: list[Node], env: Optional[Environment] = None) -> list[Node]:
"""returns a simplified version of the tree"""
for i, child in enumerate(children):
if isinstance(child, Constant):
if child.evaluate():
del children[i]
return children if len(children) else [Boolean(True)]
else:
return [Boolean(False)]
elif isinstance(child, And):
del children[i]
return children + list(child.children)
elif isinstance(child, Not):
if child.child in children:
return [Boolean(False)]
return children
class Or(ArbitraryLogicalOperator):
"""logical OR operator node"""
__slots__ = ()
symbol = '|'
wolfram_func = 'Or'
@staticmethod
def _eval_func(x: ConstantType, y: ConstantType) -> bool:
"""calculation function for 2 elements"""
return bool(x) | bool(y)
@staticmethod
def _simplify(children: list[Node], env: Optional[Environment] = None) -> list[Node]:
"""returns a simplified version of the tree"""
for i, child in enumerate(children):
if isinstance(child, Constant):
if not child.evaluate():
del children[i]
return children if len(children) else [Boolean(False)]
else:
return [Boolean(True)]
elif isinstance(child, Or):
del children[i]
return children + list(child.children)
elif isinstance(child, Not):
if child.child in children:
return [Boolean(True)]
return children
class Xor(ArbitraryLogicalOperator):
"""logical XOR operator node"""
__slots__ = ()
symbol = '^'
wolfram_func = 'Xor'
@staticmethod
def _eval_func(x: ConstantType, y: ConstantType) -> bool:
"""calculation function for 2 elements"""
return bool(x) ^ bool(y)
@staticmethod
def _simplify(children: list[Node], env: Optional[Environment] = None) -> list[Node]:
"""returns a simplified version of the tree"""
if len(children) > 1:
for i, child in enumerate(children):
if isinstance(child, Constant):
if not child.evaluate():
del children[i]
return children if len(children) else [Boolean(False)]
else:
del children[i]
if len(children) > 1:
return [Not(Xor(*children)).simplify(env)]
elif len(children) == 1:
return [Not(children[0]).simplify(env)]
else:
return [Boolean(False)]
return children
def Nand(*args: Node) -> Not:
"""logical NAND operator node"""
return Not(And(*args))
def Nor(*args: Node) -> Not:
"""logical NOR operator node"""
return Not(Or(*args))
def Xnor(*args: Node) -> Not:
"""logical XNOR operator node"""
return Not(Xor(*args))
class ComparisonOperator(ArbitraryOperator, metaclass=ABCMeta):
"""Abstract base class for comparison operators"""
__slots__ = ()
_parentheses_needed = '(ComparisonOperator, )'
def evaluate(self, env: Optional[Environment] = None) -> bool:
"""Evaluates the expression tree using the values from env, returns int or float"""
try:
return all(self._eval_func(x.evaluate(env), y.evaluate(env))
for x, y in zip(self.children[:-1], self.children[1:]))
except Exception as ex:
raise EvaluationError from ex
def derivative(self, variable: str) -> Node:
"""returns an expression tree representing the (partial) derivative to the passed variable of this tree"""
return Integer(0)
@staticmethod
def _simplify(children: list[Node], env: Optional[Environment] = None) -> list[Node]:
"""returns a simplified version of the tree"""
return children
class IsEqual(ComparisonOperator):
"""Equality operator node"""
__slots__ = ()
symbol = '=='
wolfram_func = 'EqualTo'
@staticmethod
def _eval_func(x: ConstantType, y: ConstantType) -> bool:
"""calculation function for 2 elements"""
if isinstance(x, (int, float)) and isinstance(y, (int, float)):
return x == y or isclose(x, y)
else:
return x == y
def NotEqual(*args: Node) -> Node:
"""Inequality operator node"""
return Not(IsEqual(*args))
class GreaterThan(ComparisonOperator):
"""Greater-than operator node"""
__slots__ = ()
symbol = '>'
wolfram_func = 'Greater'
@staticmethod
def _eval_func(x: ConstantType, y: ConstantType) -> bool:
"""calculation function for 2 elements"""
if isinstance(x, complex) and x.imag == 0:
x = x.real
if isinstance(y, complex) and y.imag == 0:
y = y.real
if not (isinstance(x, complex) or isinstance(y, complex)):
return x > y
else:
raise EvaluationError from TypeError('Comparison not defined in complex space')
class LessThan(ComparisonOperator):
"""Less-than operator node"""
__slots__ = ()
symbol = '<'
wolfram_func = 'Less'
@staticmethod
def _eval_func(x: ConstantType, y: ConstantType) -> bool:
"""calculation function for 2 elements"""
if isinstance(x, complex) and x.imag == 0:
x = x.real
if isinstance(y, complex) and y.imag == 0:
y = y.real
if not (isinstance(x, complex) or isinstance(y, complex)):
return x < y
else:
raise EvaluationError from TypeError('Comparison not defined in complex space')
class GreaterEqual(ComparisonOperator):
"""Greater-equal operator node"""
__slots__ = ()
symbol = '>='
wolfram_func = 'GreaterEqual'
@staticmethod
def _eval_func(x: ConstantType, y: ConstantType) -> bool:
"""calculation function for 2 elements"""
if isinstance(x, complex) and x.imag == 0:
x = x.real
if isinstance(y, complex) and y.imag == 0:
y = y.real
if not (isinstance(x, complex) or isinstance(y, complex)):
return x >= y
else:
raise EvaluationError from TypeError('Comparison not defined in complex space')
class LessEqual(ComparisonOperator):
"""Less-equal operator node"""
__slots__ = ()
symbol = '<='
wolfram_func = 'LessEqual'
@staticmethod
def _eval_func(x: ConstantType, y: ConstantType) -> bool:
"""calculation function for 2 elements"""
"""calculation function for 2 elements"""
if isinstance(x, complex) and x.imag == 0:
x = x.real
if isinstance(y, complex) and y.imag == 0:
y = y.real
if not (isinstance(x, complex) or isinstance(y, complex)):
return x <= y
else:
raise EvaluationError from TypeError('Comparison not defined in complex space')
class UnaryOperator(Node, metaclass=ABCMeta):
"""Abstract Base Class for single-input operator in expression tree"""
__slots__ = 'child',
symbol = ''
wolfram_func = ''
def __init__(self, child: Node) -> None:
assert isinstance(child, Node)
self.child = child
super().__init__()
def __repr__(self) -> str:
return f'{self.__class__.__name__}({repr(self.child)})'
def dependencies(self) -> set[str]:
"""returns set of all variables present in the tree"""
return self.child.dependencies()
def infix(self) -> str:
"""returns infix representation of the tree"""
return f'{self.symbol}({self.child.infix()})'
def list_nodes(self) -> list[Node]:
"""returns a list of all nodes in the tree"""
out: list[Node] = [self]
return out + self.child.list_nodes()
def mathml(self) -> str:
"""returns the MathML representation of the tree"""
return mathml_tag('row',
mathml_tag('i', self.symbol)
+ mathml_tag('fenced', self.child.mathml()))
def simplify(self, env: Optional[Environment] = None) -> Node:
"""returns a simplified version of the tree"""
try:
return Nodeify(self.evaluate(env)).simplify()
except EvaluationError:
pass
new = self.__class__(self.child.simplify(env))
try:
return Nodeify(new.evaluate(env))
except EvaluationError:
pass
return new
def substitute(self, var: str, sub: Node) -> Node:
"""substitute a variable with an expression inside this tree, returns the resulting tree"""
return self.__class__(self.child.substitute(var, sub))
def wolfram(self) -> str:
"""return wolfram language representation of the tree"""
return f'{self.wolfram_func}[{self.child.wolfram()}]'
class Sine(UnaryOperator):
"""Sine operator node in radians"""
__slots__ = ()
symbol = 'sin'
wolfram_func = 'Sin'
def derivative(self, variable: str) -> Node:
"""returns an expression tree representing the (partial) derivative to the passed variable of this tree"""
return Product(Cosine(self.child),
self.child.derivative(variable))
def evaluate(self, env: Optional[Environment] = None) -> ConstantType:
"""Evaluates the expression tree using the values from env, returns int or float"""
child_ans = self.child.evaluate(env)
if isinstance(child_ans, complex):
if child_ans.imag == 0:
child_ans = child_ans.real
else:
raise EvaluationError from TypeError('mod of complex number')
try:
if (mod2pi := child_ans % 2 * pi) == 0 or mod2pi == pi:
return 0
elif mod2pi == pi / 2:
return 1
elif mod2pi == pi + pi / 2:
return -1
else:
return sin(child_ans)
except Exception as ex:
raise EvaluationError from ex
class Cosine(UnaryOperator):
"""Cosine operator node in radians"""
__slots__ = ()
symbol = 'cos'
wolfram_func = 'Cos'
def derivative(self, variable: str) -> Node:
"""returns an expression tree representing the (partial) derivative to the passed variable of this tree"""
return Subtraction(Integer(0),
Product(Sine(self.child),
self.child.derivative(variable)))
def evaluate(self, env: Optional[Environment] = None) -> ConstantType:
"""Evaluates the expression tree using the values from env, returns int or float"""
child_ans = self.child.evaluate(env)
if isinstance(child_ans, complex):
if child_ans.imag == 0:
child_ans = child_ans.real
else:
raise EvaluationError from TypeError('mod of complex number')
try:
if (mod2pi := child_ans % 2 * pi) == 0:
return 1
elif mod2pi == pi:
return -1
elif mod2pi == pi / 2 or mod2pi == pi + pi / 2:
return 0
else:
return cos(child_ans)
except Exception as ex:
raise EvaluationError from ex
class Tangent(UnaryOperator):
"""Tangent operator node in radians"""
__slots__ = ()
symbol = 'tan'
wolfram_func = 'Tan'
def derivative(self, variable: str) -> Node:
"""returns an expression tree representing the (partial) derivative to the passed variable of this tree"""
return Division(self.child.derivative(variable),
Exponent(Cosine(self.child),
Integer(2)))
def evaluate(self, env: Optional[Environment] = None) -> ConstantType:
"""Evaluates the expression tree using the values from env, returns int or float"""
child_ans = self.child.evaluate(env)
if isinstance(child_ans, complex):
if child_ans.imag == 0:
child_ans = child_ans.real
else:
raise EvaluationError from TypeError('mod of complex number')
try:
if (mod_pi := child_ans % pi) == 0:
return 0
elif mod_pi == pi / 2:
raise EvaluationError from ValueError('tan of k*pi+pi/2 is infinity')
else:
return tan(child_ans)
except Exception as ex:
raise EvaluationError from ex
class ArcSine(UnaryOperator):
"""Arcsine operator node in radians"""
__slots__ = ()
symbol = 'asin'
wolfram_func = 'ArcSin'
def derivative(self, variable: str) -> Node:
"""returns an expression tree | |
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class Referral(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'int',
'created': 'datetime',
'campaign_id': 'int',
'advocate_profile_integration_id': 'str',
'friend_profile_integration_id': 'str',
'start_date': 'datetime',
'expiry_date': 'datetime',
'code': 'str',
'usage_counter': 'int',
'usage_limit': 'int'
}
attribute_map = {
'id': 'id',
'created': 'created',
'campaign_id': 'campaignId',
'advocate_profile_integration_id': 'advocateProfileIntegrationId',
'friend_profile_integration_id': 'friendProfileIntegrationId',
'start_date': 'startDate',
'expiry_date': 'expiryDate',
'code': 'code',
'usage_counter': 'usageCounter',
'usage_limit': 'usageLimit'
}
def __init__(self, id=None, created=None, campaign_id=None, advocate_profile_integration_id=None, friend_profile_integration_id=None, start_date=None, expiry_date=None, code=None, usage_counter=None, usage_limit=None, local_vars_configuration=None): # noqa: E501
"""Referral - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._created = None
self._campaign_id = None
self._advocate_profile_integration_id = None
self._friend_profile_integration_id = None
self._start_date = None
self._expiry_date = None
self._code = None
self._usage_counter = None
self._usage_limit = None
self.discriminator = None
self.id = id
self.created = created
self.campaign_id = campaign_id
self.advocate_profile_integration_id = advocate_profile_integration_id
if friend_profile_integration_id is not None:
self.friend_profile_integration_id = friend_profile_integration_id
if start_date is not None:
self.start_date = start_date
if expiry_date is not None:
self.expiry_date = expiry_date
self.code = code
self.usage_counter = usage_counter
self.usage_limit = usage_limit
@property
def id(self):
"""Gets the id of this Referral. # noqa: E501
Unique ID for this entity. # noqa: E501
:return: The id of this Referral. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Referral.
Unique ID for this entity. # noqa: E501
:param id: The id of this Referral. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def created(self):
"""Gets the created of this Referral. # noqa: E501
The exact moment this entity was created. # noqa: E501
:return: The created of this Referral. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this Referral.
The exact moment this entity was created. # noqa: E501
:param created: The created of this Referral. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and created is None: # noqa: E501
raise ValueError("Invalid value for `created`, must not be `None`") # noqa: E501
self._created = created
@property
def campaign_id(self):
"""Gets the campaign_id of this Referral. # noqa: E501
ID of the campaign from which the referral received the referral code. # noqa: E501
:return: The campaign_id of this Referral. # noqa: E501
:rtype: int
"""
return self._campaign_id
@campaign_id.setter
def campaign_id(self, campaign_id):
"""Sets the campaign_id of this Referral.
ID of the campaign from which the referral received the referral code. # noqa: E501
:param campaign_id: The campaign_id of this Referral. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and campaign_id is None: # noqa: E501
raise ValueError("Invalid value for `campaign_id`, must not be `None`") # noqa: E501
self._campaign_id = campaign_id
@property
def advocate_profile_integration_id(self):
"""Gets the advocate_profile_integration_id of this Referral. # noqa: E501
The Integration Id of the Advocate's Profile # noqa: E501
:return: The advocate_profile_integration_id of this Referral. # noqa: E501
:rtype: str
"""
return self._advocate_profile_integration_id
@advocate_profile_integration_id.setter
def advocate_profile_integration_id(self, advocate_profile_integration_id):
"""Sets the advocate_profile_integration_id of this Referral.
The Integration Id of the Advocate's Profile # noqa: E501
:param advocate_profile_integration_id: The advocate_profile_integration_id of this Referral. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and advocate_profile_integration_id is None: # noqa: E501
raise ValueError("Invalid value for `advocate_profile_integration_id`, must not be `None`") # noqa: E501
self._advocate_profile_integration_id = advocate_profile_integration_id
@property
def friend_profile_integration_id(self):
"""Gets the friend_profile_integration_id of this Referral. # noqa: E501
An optional Integration ID of the Friend's Profile # noqa: E501
:return: The friend_profile_integration_id of this Referral. # noqa: E501
:rtype: str
"""
return self._friend_profile_integration_id
@friend_profile_integration_id.setter
def friend_profile_integration_id(self, friend_profile_integration_id):
"""Sets the friend_profile_integration_id of this Referral.
An optional Integration ID of the Friend's Profile # noqa: E501
:param friend_profile_integration_id: The friend_profile_integration_id of this Referral. # noqa: E501
:type: str
"""
self._friend_profile_integration_id = friend_profile_integration_id
@property
def start_date(self):
"""Gets the start_date of this Referral. # noqa: E501
Timestamp at which point the referral code becomes valid. # noqa: E501
:return: The start_date of this Referral. # noqa: E501
:rtype: datetime
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this Referral.
Timestamp at which point the referral code becomes valid. # noqa: E501
:param start_date: The start_date of this Referral. # noqa: E501
:type: datetime
"""
self._start_date = start_date
@property
def expiry_date(self):
"""Gets the expiry_date of this Referral. # noqa: E501
Expiry date of the referral code. Referral never expires if this is omitted, zero, or negative. # noqa: E501
:return: The expiry_date of this Referral. # noqa: E501
:rtype: datetime
"""
return self._expiry_date
@expiry_date.setter
def expiry_date(self, expiry_date):
"""Sets the expiry_date of this Referral.
Expiry date of the referral code. Referral never expires if this is omitted, zero, or negative. # noqa: E501
:param expiry_date: The expiry_date of this Referral. # noqa: E501
:type: datetime
"""
self._expiry_date = expiry_date
@property
def code(self):
"""Gets the code of this Referral. # noqa: E501
The actual referral code. # noqa: E501
:return: The code of this Referral. # noqa: E501
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this Referral.
The actual referral code. # noqa: E501
:param code: The code of this Referral. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and code is None: # noqa: E501
raise ValueError("Invalid value for `code`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
code is not None and len(code) < 4):
raise ValueError("Invalid value for `code`, length must be greater than or equal to `4`") # noqa: E501
self._code = code
@property
def usage_counter(self):
"""Gets the usage_counter of this Referral. # noqa: E501
The number of times this referral code has been successfully used. # noqa: E501
:return: The usage_counter of this Referral. # noqa: E501
:rtype: int
"""
return self._usage_counter
@usage_counter.setter
def usage_counter(self, usage_counter):
"""Sets the usage_counter of this Referral.
The number of times this referral code has been successfully used. # noqa: E501
:param usage_counter: The usage_counter of this Referral. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and usage_counter is None: # noqa: E501
raise ValueError("Invalid value for `usage_counter`, must not be `None`") # noqa: E501
self._usage_counter = usage_counter
@property
def usage_limit(self):
"""Gets the usage_limit of this Referral. # noqa: E501
The number of times a referral code can be used. This can be set to 0 for no limit, but any campaign usage limits will still apply. # noqa: E501
:return: The usage_limit of this Referral. # noqa: E501
:rtype: int
"""
return self._usage_limit
@usage_limit.setter
def usage_limit(self, usage_limit):
"""Sets the usage_limit of this Referral.
The number of times a referral code can be used. This can be set to 0 for no limit, but any campaign usage limits will still apply. # noqa: E501
:param usage_limit: The usage_limit of this Referral. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and usage_limit is None: # noqa: E501
raise ValueError("Invalid value for `usage_limit`, must not be `None`") # | |
Mass_tooth_stator : float, [kg]
Teeth and copper mass
Mass_yoke_rotor : float, [kg]
Rotor yoke mass
Mass_yoke_stator : float, [kg]
Stator yoke mass
rotor_mass : float, [kg]
Total rotor mass
stator_mass : float, [kg]
Total stator mass
"""
def initialize(self):
super(PMSG_Outer, self).initialize()
def setup(self):
super(PMSG_Outer, self).setup()
n_pc = self.options["n_pc"]
# PMSG_structrual inputs
self.add_input("P_mech", units="W")
self.add_input("N_c", 0.0)
self.add_input("b", 0.0)
self.add_input("c", 0.0)
self.add_input("E_p", 0.0, units="V")
self.add_input("h_yr", val=0.0, units="m")
self.add_input("h_ys", val=0.0, units="m")
self.add_input("h_sr", 0.0, units="m")
self.add_input("h_ss", 0.0, units="m")
self.add_input("t_r", 0.0, units="m")
self.add_input("t_s", 0.0, units="m")
self.add_input("y_sh", units="m")
self.add_input("theta_sh", 0.0, units="rad")
self.add_input("D_nose", 0.0, units="m")
self.add_input("y_bd", units="m")
self.add_input("theta_bd", 0.0, units="rad")
self.add_input("u_allow_pcent", 0.0)
self.add_input("y_allow_pcent", 0.0)
self.add_input("z_allow_deg", 0.0, units="deg")
# Magnetic loading
self.add_input("B_tmax", 0.0, units="T")
self.add_output("B_smax", val=0.0, units="T")
self.add_output("B_symax", val=0.0, units="T")
self.add_output("tau_p", 0.0, units="m")
self.add_output("q", 0.0, units="N/m**2")
self.add_output("len_ag", 0.0, units="m")
# Stator design
self.add_output("h_t", 0.0, units="m")
self.add_output("tau_s", 0.0, units="m")
# Electrical performance
self.add_output("J_actual", val=np.zeros(n_pc), units="A/m**2")
self.add_output("T_e", 0.0, units="N*m")
# Material properties
self.add_output("twist_r", 0.0, units="deg")
self.add_output("twist_s", 0.0, units="deg")
# Mass Outputs
self.add_output("Structural_mass_rotor", 0.0, units="kg")
self.add_output("Structural_mass_stator", 0.0, units="kg")
self.add_output("Mass_tooth_stator", 0.0, units="kg")
self.add_output("Mass_yoke_rotor", 0.0, units="kg")
self.add_output("Mass_yoke_stator", 0.0, units="kg")
self.add_output("rotor_mass", 0.0, units="kg")
self.add_output("stator_mass", 0.0, units="kg")
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Unpack inputs
rad_ag = float(inputs["rad_ag"])
len_s = float(inputs["len_s"])
p = float(inputs["p"])
b = float(inputs["b"])
c = float(inputs["c"])
h_m = float(inputs["h_m"])
h_ys = float(inputs["h_ys"])
h_yr = float(inputs["h_yr"])
h_s = float(inputs["h_s"])
h_ss = float(inputs["h_ss"])
h_0 = float(inputs["h_0"])
B_tmax = float(inputs["B_tmax"])
E_p = float(inputs["E_p"])
P_mech = float(inputs["P_mech"])
P_av_v = float(inputs["machine_rating"])
h_sr = float(inputs["h_sr"])
t_r = float(inputs["t_r"])
t_s = float(inputs["t_s"])
R_sh = 0.5 * float(inputs["D_shaft"])
R_no = 0.5 * float(inputs["D_nose"])
y_sh = float(inputs["y_sh"])
y_bd = float(inputs["y_bd"])
rho_Fes = float(inputs["rho_Fes"])
rho_Fe = float(inputs["rho_Fe"])
sigma = float(inputs["sigma"])
shaft_rpm = inputs["shaft_rpm"]
# Grab constant values
B_r = float(inputs["B_r"])
E = float(inputs["E"])
G = float(inputs["G"])
P_Fe0e = float(inputs["P_Fe0e"])
P_Fe0h = float(inputs["P_Fe0h"])
cofi = float(inputs["cofi"])
h_w = float(inputs["h_w"])
k_fes = float(inputs["k_fes"])
k_fills = float(inputs["k_fills"])
m = int(discrete_inputs["m"])
mu_0 = float(inputs["mu_0"])
mu_r = float(inputs["mu_r"])
p = float(inputs["p"])
phi = float(inputs["phi"])
ratio_mw2pp = float(inputs["ratio_mw2pp"])
resist_Cu = float(inputs["resist_Cu"])
v = float(inputs["v"])
"""
#Assign values to universal constants
B_r = 1.279 # Tesla remnant flux density
E = 2e11 # N/m^2 young's modulus
ratio = 0.8 # ratio of magnet width to pole pitch(bm/self.tau_p)
mu_0 = np.pi*4e-7 # permeability of free space
mu_r = 1.06 # relative permeability
cofi = 0.85 # power factor
#Assign values to design constants
h_0 = 0.005 # Slot opening height
h_w = 0.004 # Slot wedge height
m = 3 # no of phases
#b_s_tau_s = 0.45 # slot width to slot pitch ratio
k_fills = 0.65 # Slot fill factor
P_Fe0h = 4 # specific hysteresis losses W/kg @ 1.5 T
P_Fe0e = 1 # specific hysteresis losses W/kg @ 1.5 T
k_fes = 0.8 # Iron fill factor
#Assign values to universal constants
phi = 90*2*np.pi/360 # tilt angle (rotor tilt -90 degrees during transportation)
v = 0.3 # Poisson's ratio
G = 79.3e9
"""
######################## Electromagnetic design ###################################
K_rad = len_s / (2 * rad_ag) # Aspect ratio
# Calculating air gap length
dia = 2 * rad_ag # air gap diameter
len_ag = 0.001 * dia # air gap length
r_s = rad_ag - len_ag # Stator outer radius
b_so = 2 * len_ag # Slot opening
tau_p = np.pi * dia / (2 * p) # pole pitch
# Calculating winding factor
Slot_pole = b / c
S = Slot_pole * 2 * p * m
testval = S / (m * np.gcd(int(S), int(p)))
if float(np.round(testval, 3)).is_integer():
k_w = winding_factor(int(S), b, c, int(p), m)
b_m = ratio_mw2pp * tau_p # magnet width
alpha_p = np.pi / 2 * ratio_mw2pp
tau_s = np.pi * (dia - 2 * len_ag) / S
# Calculating Carter factor for statorand effective air gap length
gamma = (
4
/ np.pi
* (
b_so / 2 / (len_ag + h_m / mu_r) * np.arctan(b_so / 2 / (len_ag + h_m / mu_r))
- np.log(np.sqrt(1 + (b_so / 2 / (len_ag + h_m / mu_r)) ** 2))
)
)
k_C = tau_s / (tau_s - gamma * (len_ag + h_m / mu_r)) # carter coefficient
g_eff = k_C * (len_ag + h_m / mu_r)
# angular frequency in radians
om_m = 2 * np.pi * shaft_rpm / 60
om_e = p * om_m
freq = om_e / 2 / np.pi # outout frequency
# Calculating magnetic loading
B_pm1 = B_r * h_m / mu_r / (g_eff)
B_g = B_r * h_m / (mu_r * g_eff) * (4 / np.pi) * np.sin(alpha_p)
B_symax = B_pm1 * b_m / (2 * h_ys) * k_fes
B_rymax = B_pm1 * b_m * k_fes / (2 * h_yr)
b_t = B_pm1 * tau_s / B_tmax
N_c = 2 # Number of turns per coil
q = (B_g) ** 2 / 2 / mu_0
# Stator winding length ,cross-section and resistance
l_Cus = 2 * (len_s + np.pi / 4 * (tau_s + b_t)) # length of a turn
# Calculating no-load voltage induced in the stator
N_s = np.rint(E_p / (np.sqrt(2) * len_s * r_s * k_w * om_m * B_g))
# Z = P_av_v / (m*E_p)
# Calculating leakage inductance in stator
V_1 = E_p / 1.1
I_n = P_av_v / 3 / cofi / V_1
J_s = 6.0
A_Cuscalc = I_n / J_s
A_slot = 2 * N_c * A_Cuscalc * (10 ** -6) / k_fills
tau_s_new = np.pi * (dia - 2 * len_ag - 2 * h_w - 2 * h_0) / S
b_s2 = tau_s_new - b_t # Slot top width
b_s1 = np.sqrt(b_s2 ** 2 - 4 * np.pi * A_slot / S)
b_s = (b_s1 + b_s2) * 0.5
N_coil = 2 * S
P_s = mu_0 * (h_s / 3 / b_s + h_w * 2 / (b_s2 + b_so) + h_0 / b_so) # Slot permeance function
L_ssigmas = S / 3 * 4 * N_c ** 2 * len_s * P_s # slot leakage inductance
L_ssigmaew = (
N_coil * N_c ** 2 * mu_0 * tau_s * np.log((0.25 * np.pi * tau_s ** 2) / (0.5 * h_s * b_s))
) # end winding leakage inductance
L_aa = 2 * np.pi / 3 * (N_c ** 2 * mu_0 * len_s * r_s / g_eff)
L_m = L_aa
L_ssigma = L_ssigmas + L_ssigmaew
L_s = L_m + L_ssigma
G_leak = np.abs((1.1 * E_p) ** 4 - (1 / 9) * (P_av_v * om_e * L_s) ** 2)
# Calculating stator current and electrical loading
I_s = np.sqrt(2 * (np.abs((E_p * 1.1) ** 2 - G_leak ** 0.5)) / (om_e * L_s) ** 2)
A_1 = 6 * I_s * N_s / np.pi / dia
J_actual = I_s / (A_Cuscalc * 2 ** 0.5)
L_Cus = N_s * l_Cus
R_s = inputs["resist_Cu"] * (N_s) * l_Cus / (A_Cuscalc * (10 ** -6))
B_smax = np.sqrt(2) * I_s * mu_0 / g_eff
# Calculating Electromagnetically active mass
wedge_area = (b_s * 0.5 - b_so * 0.5) * (2 * h_0 + h_w)
V_Cus = m * L_Cus * (A_Cuscalc * (10 ** -6)) # copper volume
h_t = h_s + h_w + h_0
V_Fest = len_s * S * (b_t * (h_s + h_w + h_0) + wedge_area) # volume of iron in stator tooth
V_Fesy = (
len_s
* np.pi
* ((rad_ag - len_ag - h_s - h_w - h_0) ** 2 - (rad_ag - len_ag - h_s - h_w - h_0 - h_ys) ** 2)
) # volume of iron in stator yoke
V_Fery = len_s * np.pi * ((rad_ag + h_m + h_yr) ** 2 - (rad_ag + h_m) ** 2)
Copper = V_Cus[-1] * inputs["rho_Copper"]
M_Fest = V_Fest * rho_Fe # Mass of stator tooth
M_Fesy = V_Fesy * rho_Fe # Mass of stator yoke
M_Fery = V_Fery * rho_Fe # | |
* chunksize, (i + 1) * chunksize),),
columns,
categories,
lock,
)
for i in range(0, int(ceil(len(x) / chunksize)))
}
meta = dataframe_from_ctable(x, slice(0, 0), columns, categories, lock)
result = DataFrame(dsk, new_name, meta, divisions)
if index:
assert index in x.names
a = da.from_array(x[index], chunks=(chunksize * len(x.names),))
q = np.linspace(0, 100, len(x) // chunksize + 2)
divisions = tuple(da.percentile(a, q).compute())
return set_partition(result, index, divisions, **kwargs)
else:
return result
def dataframe_from_ctable(x, slc, columns=None, categories=None, lock=lock):
"""Get DataFrame from bcolz.ctable
Parameters
----------
x: bcolz.ctable
slc: slice
columns: list of column names or None
>>> import bcolz
>>> x = bcolz.ctable([[1, 2, 3, 4], [10, 20, 30, 40]], names=['a', 'b'])
>>> dataframe_from_ctable(x, slice(1, 3))
a b
1 2 20
2 3 30
>>> dataframe_from_ctable(x, slice(1, 3), columns=['b'])
b
1 20
2 30
>>> dataframe_from_ctable(x, slice(1, 3), columns='b')
1 20
2 30
Name: b, dtype: int...
"""
import bcolz
if columns is None:
columns = x.dtype.names
if isinstance(columns, tuple):
columns = list(columns)
x = x[columns]
if type(slc) is slice:
start = slc.start
stop = slc.stop if slc.stop < len(x) else len(x)
else:
start = slc[0].start
stop = slc[0].stop if slc[0].stop < len(x) else len(x)
idx = pd.Index(range(start, stop))
if lock:
lock.acquire()
try:
if isinstance(x, bcolz.ctable):
chunks = [x[name][slc] for name in columns]
if categories is not None:
chunks = [
pd.Categorical.from_codes(
np.searchsorted(categories[name], chunk), categories[name], True
)
if name in categories
else chunk
for name, chunk in zip(columns, chunks)
]
result = pd.DataFrame(
dict(zip(columns, chunks)), columns=columns, index=idx
)
elif isinstance(x, bcolz.carray):
chunk = x[slc]
if categories is not None and columns and columns in categories:
chunk = pd.Categorical.from_codes(
np.searchsorted(categories[columns], chunk),
categories[columns],
True,
)
result = pd.Series(chunk, name=columns, index=idx)
finally:
if lock:
lock.release()
return result
def _partition_from_array(data, index=None, initializer=None, **kwargs):
"""Create a Dask partition for either a DataFrame or Series.
Designed to be used with :func:`dask.blockwise.blockwise`. ``data`` is the array
from which the partition will be created. ``index`` can be:
1. ``None``, in which case each partition has an independent RangeIndex
2. a `tuple` with two elements, the start and stop values for a RangeIndex for
this partition, which gives a continuously varying RangeIndex over the
whole Dask DataFrame
3. an instance of a ``pandas.Index`` or a subclass thereof
The ``kwargs`` _must_ contain an ``initializer`` key which is set by calling
``type(meta)``.
"""
if isinstance(index, tuple):
index = pd.RangeIndex(*index)
return initializer(data, index=index, **kwargs)
def from_dask_array(x, columns=None, index=None, meta=None):
"""Create a Dask DataFrame from a Dask Array.
Converts a 2d array into a DataFrame and a 1d array into a Series.
Parameters
----------
x : da.Array
columns : list or string
list of column names if DataFrame, single string if Series
index : dask.dataframe.Index, optional
An optional *dask* Index to use for the output Series or DataFrame.
The default output index depends on whether `x` has any unknown
chunks. If there are any unknown chunks, the output has ``None``
for all the divisions (one per chunk). If all the chunks are known,
a default index with known divisions is created.
Specifying `index` can be useful if you're conforming a Dask Array
to an existing dask Series or DataFrame, and you would like the
indices to match.
meta : object, optional
An optional `meta` parameter can be passed for dask
to specify the concrete dataframe type to be returned.
By default, pandas DataFrame is used.
Examples
--------
>>> import dask.array as da
>>> import dask.dataframe as dd
>>> x = da.ones((4, 2), chunks=(2, 2))
>>> df = dd.io.from_dask_array(x, columns=['a', 'b'])
>>> df.compute()
a b
0 1.0 1.0
1 1.0 1.0
2 1.0 1.0
3 1.0 1.0
See Also
--------
dask.bag.to_dataframe: from dask.bag
dask.dataframe._Frame.values: Reverse conversion
dask.dataframe._Frame.to_records: Reverse conversion
"""
meta = _meta_from_array(x, columns, index, meta=meta)
name = "from-dask-array-" + tokenize(x, columns)
graph_dependencies = [x]
arrays_and_indices = [x.name, "ij" if x.ndim == 2 else "i"]
numblocks = {x.name: x.numblocks}
if index is not None:
# An index is explicitly given by the caller, so we can pass it through to the
# initializer after a few checks.
if index.npartitions != x.numblocks[0]:
msg = (
"The index and array have different numbers of blocks. "
"({} != {})".format(index.npartitions, x.numblocks[0])
)
raise ValueError(msg)
divisions = index.divisions
graph_dependencies.append(index)
arrays_and_indices.extend([index._name, "i"])
numblocks[index._name] = (index.npartitions,)
elif np.isnan(sum(x.shape)):
# The shape of the incoming array is not known in at least one dimension. As
# such, we can't create an index for the entire output DataFrame and we set
# the divisions to None to represent that.
divisions = [None] * (len(x.chunks[0]) + 1)
else:
# The shape of the incoming array is known and we don't have an explicit index.
# Create a mapping of chunk number in the incoming array to
# (start row, stop row) tuples. These tuples will be used to create a sequential
# RangeIndex later on that is continuous over the whole DataFrame.
divisions = [0]
stop = 0
index_mapping = {}
for i, increment in enumerate(x.chunks[0]):
stop += increment
index_mapping[(i,)] = (divisions[i], stop)
divisions.append(stop)
divisions[-1] -= 1
arrays_and_indices.extend([BlockwiseDepDict(mapping=index_mapping), "i"])
if is_series_like(meta):
kwargs = {"dtype": x.dtype, "name": meta.name, "initializer": type(meta)}
else:
kwargs = {"columns": meta.columns, "initializer": type(meta)}
blk = blockwise(
_partition_from_array,
name,
"i",
*arrays_and_indices,
numblocks=numblocks,
concatenate=True,
# kwargs passed through to the DataFrame/Series initializer
**kwargs,
)
graph = HighLevelGraph.from_collections(name, blk, dependencies=graph_dependencies)
return new_dd_object(graph, name, meta, divisions)
def _link(token, result):
"""A dummy function to link results together in a graph
We use this to enforce an artificial sequential ordering on tasks that
don't explicitly pass around a shared resource
"""
return None
def _df_to_bag(df, index=False, format="tuple"):
if isinstance(df, pd.DataFrame):
if format == "tuple":
return list(map(tuple, df.itertuples(index)))
elif format == "dict":
if index:
return [
{**{"index": idx}, **values}
for values, idx in zip(df.to_dict("records"), df.index)
]
else:
return df.to_dict(orient="records")
elif isinstance(df, pd.Series):
if format == "tuple":
return list(df.items()) if index else list(df)
elif format == "dict":
return df.to_frame().to_dict(orient="records")
def to_bag(df, index=False, format="tuple"):
"""Create Dask Bag from a Dask DataFrame
Parameters
----------
index : bool, optional
If True, the elements are tuples of ``(index, value)``, otherwise
they're just the ``value``. Default is False.
format : {"tuple", "dict", "frame"}, optional
Whether to return a bag of tuples, dictionaries, or
dataframe-like objects. Default is "tuple". If "frame",
the original partitions of ``df`` will not be transformed
in any way.
Examples
--------
>>> bag = df.to_bag() # doctest: +SKIP
"""
from dask.bag.core import Bag
if not isinstance(df, (DataFrame, Series)):
raise TypeError("df must be either DataFrame or Series")
name = "to_bag-" + tokenize(df, index, format)
if format == "frame":
# Use existing graph and name of df, but
# drop meta to produce a Bag collection
dsk = df.dask
name = df._name
else:
dsk = {
(name, i): (_df_to_bag, block, index, format)
for (i, block) in enumerate(df.__dask_keys__())
}
dsk.update(df.__dask_optimize__(df.__dask_graph__(), df.__dask_keys__()))
return Bag(dsk, name, df.npartitions)
def to_records(df):
"""Create Dask Array from a Dask Dataframe
Warning: This creates a dask.array without precise shape information.
Operations that depend on shape information, like slicing or reshaping,
will not work.
Examples
--------
>>> df.to_records() # doctest: +SKIP
See Also
--------
dask.dataframe._Frame.values
dask.dataframe.from_dask_array
"""
return df.map_partitions(M.to_records)
# TODO: type this -- causes lots of papercuts
@insert_meta_param_description
def from_delayed(
dfs,
meta=None,
divisions=None,
prefix="from-delayed",
verify_meta=True,
):
"""Create Dask DataFrame from many Dask Delayed objects
Parameters
----------
dfs : list of Delayed or Future
An iterable of ``dask.delayed.Delayed`` objects, such as come from
``dask.delayed`` or an iterable of ``distributed.Future`` objects,
such as come from ``client.submit`` interface. These comprise the individual
partitions of the resulting dataframe.
$META
divisions : tuple, str, optional
Partition boundaries along the index.
For tuple, see https://docs.dask.org/en/latest/dataframe-design.html#partitions
For string 'sorted' will compute the delayed values to find index
values. Assumes that the indexes are mutually sorted.
If None, then won't use index information
prefix : str, optional
Prefix to prepend to the keys.
verify_meta : bool, optional
If True check that the partitions have consistent metadata, defaults to True.
"""
from dask.delayed import Delayed
if isinstance(dfs, | |
# Copyright (c) 2020, <NAME>, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of <NAME>, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME>, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import itertools
import os
import os.path as osp
from copy import deepcopy
from datetime import datetime
from pathlib import Path
from typing import Callable, Iterable, List, Optional, Sequence, Union
import numpy as np
import torch as to
import torch.nn as nn
import yaml
import pyrado
from pyrado.logger import set_log_prefix_dir
from pyrado.utils import get_class_name
from pyrado.utils.data_types import dict_path_access, update_matching_keys_recursively
from pyrado.utils.input_output import print_cbt, select_query
from pyrado.utils.ordering import natural_sort
class Experiment:
"""
Class for defining experiments
This is a path-like object, and as such it can be used everywhere a normal path would be used.
Experiment folder path:
<base_dir>/<env_name>/<algo_name>/<timestamp>--<extra_info>
"""
def __init__(
self,
env_name: str,
algo_name: str,
extra_info: str = None,
exp_id: str = None,
timestamp: datetime = None,
base_dir: str = pyrado.TEMP_DIR,
include_slurm_id: bool = True,
):
"""
Constructor
:param env_name: environment trained on
:param algo_name: algorithm trained with, usually also includes the policy type, e.g. 'a2c_fnn'
:param extra_info: additional information on the experiment (free form)
:param exp_id: combined timestamp and extra_info, usually the final folder name
:param timestamp: experiment creation timestamp
:param base_dir: base storage directory
:param include_slurm_id: if a SLURM ID is present in the environment variables, include it in the experiment ID
"""
slurm_id = None
if include_slurm_id and "SLURM_ARRAY_JOB_ID" in os.environ:
slurm_id = str(os.environ["SLURM_ARRAY_JOB_ID"])
if "SLURM_ARRAY_TASK_ID" in os.environ:
slurm_id += "-" + str(os.environ["SLURM_ARRAY_TASK_ID"])
if exp_id is None:
# Create exp id from timestamp and info
if timestamp is None:
timestamp = datetime.now()
exp_id = timestamp.strftime(pyrado.timestamp_format)
if extra_info is not None:
exp_id = exp_id + "--" + extra_info
if slurm_id is not None:
exp_id += "_slurm-" + slurm_id
else:
# Try to parse extra_info from exp id
sd = exp_id.split("--", 1)
if len(sd) == 1:
time_str = sd[0]
else:
time_str, extra_info = sd
# Parse time string
if "_" in time_str:
timestamp = datetime.strptime(time_str, pyrado.timestamp_format)
else:
timestamp = datetime.strptime(time_str, pyrado.timestamp_date_format)
# Store values
self.env_name = env_name
self.algo_name = algo_name
self.extra_info = extra_info
self.exp_id = exp_id
self.timestamp = timestamp
self.base_dir = base_dir
def __fspath__(self):
"""Allows to use the experiment object where the experiment path is needed."""
return osp.join(self.base_dir, self.env_name, self.algo_name, self.exp_id)
def __str__(self):
"""Get an information string."""
return f"{self.env_name}/{self.algo_name}/{self.exp_id}"
@property
def prefix(self):
"""Combination of experiment and algorithm"""
return osp.join(self.env_name, self.algo_name)
def matches(self, hint: str) -> bool:
"""Check if this experiment matches the given hint."""
# Split hint into <env>/<algo>/<id>
parts = Path(hint).parts
if len(parts) == 1:
# Filter by exp name only
(env_name,) = parts
return self.env_name == env_name
elif len(parts) == 2:
# Filter by exp name only
env_name, algo_name = parts
return self.env_name == env_name and self.algo_name == algo_name
elif len(parts) == 3:
# Filter by exp name only
env_name, algo_name, eid = parts
return self.env_name == env_name and self.algo_name == algo_name and self.exp_id == eid
else:
raise pyrado.ValueErr(msg=f"fThe hint int contains {len(parts)} parts, but should be <= 3!")
def setup_experiment(
env_name: str,
algo_name: str,
extra_info: str = None,
base_dir: str = pyrado.TEMP_DIR,
include_slurm_id: bool = True,
):
"""
Setup a new experiment for recording.
:param env_name: environment trained on
:param algo_name: algorithm trained with, usually also includes the policy type, e.g. 'a2c_fnn'
:param extra_info: additional information on the experiment (free form)
:param base_dir: base storage directory
:param include_slurm_id: if a SLURM ID is present in the environment variables, include them in the experiment ID
"""
# Create experiment object
exp = Experiment(env_name, algo_name, extra_info, base_dir=base_dir, include_slurm_id=include_slurm_id)
# Create the folder
os.makedirs(exp, exist_ok=True)
# Set the global logger variable
set_log_prefix_dir(exp)
return exp
def _childdirs(parent: str):
"""Yield only direct child directories."""
for cn in os.listdir(parent):
cp = osp.join(parent, cn)
if osp.isdir(cp):
yield cn
def _le_env_algo(env_name: str, algo_name: str, base_dir: str):
for exp_id in _childdirs(osp.join(base_dir, env_name, algo_name)):
yield Experiment(env_name, algo_name, exp_id=exp_id, base_dir=base_dir)
def _le_env(env_name: str, base_dir: str):
for algo_name in _childdirs(osp.join(base_dir, env_name)):
yield from _le_env_algo(env_name, algo_name, base_dir)
def _le_base(base_dir: str):
for env_name in _childdirs(base_dir):
yield from _le_env(env_name, base_dir)
def _le_select_filter(env_name: str, algo_name: str, base_dir: str):
if env_name is None:
return _le_base(base_dir)
if algo_name is None:
return _le_env(env_name, base_dir)
return _le_env_algo(env_name, algo_name, base_dir)
def list_experiments(
env_name: str = None, algo_name: str = None, base_dir: str = None, *, temp: bool = True, perma: bool = True
):
"""
List all stored experiments.
:param env_name: filter by env name
:param algo_name: filter by algorithm name. Requires env_name to be used too
:param base_dir: explicit base dir if desired. May also be a list of bases.
Uses `pyrado.TEMP_DIR` and `pyrado.EXP_DIR` if not specified.
:param temp: set to `False` to not look in the `pyrado.TEMP` directory
:param perma: set to `False` to not look in the `pyrado.PERMA` directory
"""
# Parse bases
if base_dir is None:
# Use temp/perm if requested
if temp:
yield from _le_select_filter(env_name, algo_name, pyrado.TEMP_DIR)
if perma:
yield from _le_select_filter(env_name, algo_name, pyrado.EXP_DIR)
elif not isinstance(base_dir, (str, bytes, os.PathLike)):
# Multiple base dirs
for bd in base_dir:
yield from _le_select_filter(env_name, algo_name, bd)
else:
# Single base dir
yield from _le_select_filter(env_name, algo_name, base_dir)
def _select_latest(exps: Iterable) -> Union[Experiment, None]:
"""
Select the most recent experiment from an iterable of experiments. Return `None` if there are no experiments.
:param exps: iterable of experiments
:return: latest experiment ot `None`
"""
se = sorted(exps, key=lambda exp: exp.timestamp, reverse=True) # sort from latest to oldest
return None if len(se) == 0 else se[0]
def _select_all(exps: Iterable) -> Union[List[Experiment], None]:
"""
Select all experiments from an iterable of experiments and sort them from from latest to oldest.
Return `None` if there are no experiments.
:param exps: iterable of experiments
:return: temporally sorted experiment ot `None`
"""
se = sorted(exps, key=lambda exp: exp.timestamp, reverse=True) # sort from latest to oldest
return None if len(se) == 0 else se
def select_by_hint(exps: Sequence[Experiment], hint: str):
"""Select experiment by hint."""
if osp.isabs(hint):
# Hint is a full experiment path
return hint
# Select matching exps
selected = filter(lambda exp: exp.matches(hint), exps)
sl = _select_latest(selected)
if sl is None:
print_cbt(f"No experiment matching hint {hint}", "r")
return sl
def create_experiment_formatter(
show_hparams: Optional[List[str]] = None, show_extra_info: bool = True
) -> Callable[[Experiment], str]:
"""
Returns an experiment formatter (i.e. a function that takes an experiment and produces a string) to be used in the
ask-for-experiments dialog. It produces useful information like the timestamp based on the experiments' data.
:param show_hparams: list of "paths" to hyper-parameters that to be shown in the selection dialog; sub-dicts can be
references with a dot, e.g. `env.dt`
:param show_extra_info: whether to show the information stored in the `extra_info` field of the experiment
:return: a function that serves as the formatter
"""
def formatter(exp: | |
<reponame>z3z1ma/dbt-osmosis
from enum import Enum
from itertools import chain
from pathlib import Path
from typing import (Any, Dict, Iterable, Iterator, List, Mapping,
MutableMapping, Optional, Set, Tuple, Union)
import agate
import dbt.config.runtime as dbt_config
import dbt.parser.manifest as dbt_parser
from dbt.adapters.factory import (Adapter, get_adapter, register_adapter,
reset_adapters)
from dbt.contracts.connection import AdapterResponse
from dbt.contracts.graph.manifest import ManifestNode, NodeType
from dbt.contracts.graph.parsed import ColumnInfo, ParsedModelNode
from dbt.exceptions import CompilationException, RuntimeException
from dbt.flags import DEFAULT_PROFILES_DIR, set_from_args
from dbt.task.deps import DepsTask
from dbt.tracking import disable_tracking
from pydantic import BaseModel
from rich.progress import track
from ruamel.yaml import YAML
from dbt_osmosis.core.exceptions import (InvalidOsmosisConfig,
MissingOsmosisConfig,
SanitizationRequired)
from dbt_osmosis.core.logging import logger
disable_tracking()
AUDIT_REPORT = """
:white_check_mark: [bold]Audit Report[/bold]
-------------------------------
Database: [bold green]{database}[/bold green]
Schema: [bold green]{schema}[/bold green]
Table: [bold green]{table}[/bold green]
Total Columns in Database: {total_columns}
Total Documentation Coverage: {coverage}%
Action Log:
Columns Added to dbt: {n_cols_added}
Column Knowledge Inherited: {n_cols_doc_inherited}
Extra Columns Removed: {n_cols_removed}
"""
# TODO: Let user supply a custom config file / csv of strings which we consider "not-documented placeholders", these are just my own
PLACEHOLDERS = [
"Pending further documentation",
"Pending further documentation.",
"No description for this column",
"No description for this column.",
"Not documented",
"Not documented.",
"Undefined",
"Undefined.",
"",
]
FILE_ADAPTER_POSTFIX = "://"
class PseudoArgs:
def __init__(
self,
threads: Optional[int] = 1,
target: Optional[str] = None,
profiles_dir: Optional[str] = None,
project_dir: Optional[str] = None,
vars: Optional[str] = "{}",
):
self.threads = threads
if target:
self.target = target # We don't want target in args context if it is None
self.profiles_dir = profiles_dir or DEFAULT_PROFILES_DIR
self.project_dir = project_dir
self.vars = vars # json.dumps str
self.dependencies = []
self.single_threaded = threads == 1
class OsmosisConfig(str, Enum):
SchemaYaml = "schema.yml"
FolderYaml = "folder.yml"
ModelYaml = "model.yml"
SchemaModelYaml = "schema/model.yml"
class SchemaFile(BaseModel):
target: Path
current: Optional[Path] = None
@property
def is_valid(self) -> bool:
return self.current == self.target
class RestructureQuantum(BaseModel):
output: Dict[str, Any] = {}
supersede: Dict[Path, List[str]] = {}
class DbtOsmosis:
def __init__(
self,
fqn: Optional[str] = None,
target: Optional[str] = None,
profiles_dir: Optional[str] = None,
project_dir: Optional[str] = None,
threads: Optional[int] = 1,
dry_run: bool = False,
):
# Build pseudo args
args = PseudoArgs(
threads=threads,
target=target,
profiles_dir=profiles_dir,
project_dir=project_dir,
)
self.args = args
# Load dbt + verify connection to data warehhouse
set_from_args(args, args)
self.project, self.profile = dbt_config.RuntimeConfig.collect_parts(args)
self.config = dbt_config.RuntimeConfig.from_parts(self.project, self.profile, args)
reset_adapters()
register_adapter(self.config)
self.adapter = self._verify_connection(get_adapter(self.config))
# Parse project
self.dbt = dbt_parser.ManifestLoader.get_full_manifest(self.config)
# Selector Passed in From CLI
self.fqn = fqn
# Utilities
self.yaml = self._build_yaml_parser()
self.dry_run = dry_run
self.track_package_install = (
lambda *args, **kwargs: None
) # Monkey patching to make self compatible with DepsTask
@staticmethod
def _verify_connection(adapter: Adapter) -> Adapter:
try:
with adapter.connection_named("debug"):
adapter.debug_query()
except Exception as exc:
raise Exception("Could not connect to Database") from exc
else:
return adapter
@staticmethod
def _build_yaml_parser() -> YAML:
yaml = YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
yaml.width = 800
yaml.preserve_quotes = True
yaml.default_flow_style = False
return yaml
@property
def project_name(self) -> str:
return self.project.project_name
@property
def project_root(self) -> str:
return self.project.project_root
def rebuild_dbt_manifest(self, reset: bool = False) -> None:
self.dbt = dbt_parser.ManifestLoader.get_full_manifest(self.config, reset=reset)
@property
def manifest(self) -> Dict[str, Any]:
return self.dbt.flat_graph
@staticmethod
def get_patch_path(node: ManifestNode) -> Path:
return Path(node.patch_path.split(FILE_ADAPTER_POSTFIX)[-1])
def execute_macro(
self,
macro: str,
kwargs: Optional[Dict[str, Any]] = None,
run_compiled_sql: bool = False,
fetch: bool = False,
) -> Tuple[
str, Optional[AdapterResponse], Optional[agate.Table]
]: # returns Macro `return` value from Jinja be it string, SQL, or dict
"""Wraps adapter execute_macro"""
with self.adapter.connection_named("dbt-osmosis"):
compiled_macro = self.adapter.execute_macro(
macro_name=macro, manifest=self.dbt, kwargs=kwargs
)
if run_compiled_sql:
resp, table = self.adapter.execute(compiled_macro, fetch=fetch)
return compiled_macro, resp, table
return compiled_macro, None, None
def _filter_model(self, node: ManifestNode) -> bool:
"""Validates a node as being a targetable model. Validates both models and sources."""
fqn = self.fqn or ".".join(node.fqn[1:])
fqn_parts = fqn.split(".")
logger().debug("%s: %s -> %s", node.resource_type, fqn, node.fqn[1:])
return (
# Verify Resource Type
node.resource_type in (NodeType.Model, NodeType.Source)
# Verify Package == Current Project
and node.package_name == self.project_name
# Verify Materialized is Not Ephemeral if NodeType is Model [via short-circuit]
and (node.resource_type != NodeType.Model or node.config.materialized != "ephemeral")
# Verify FQN Length [Always true if no fqn was supplied]
and len(node.fqn[1:]) >= len(fqn_parts)
# Verify FQN Matches Parts [Always true if no fqn was supplied]
and all(left == right for left, right in zip(fqn_parts, node.fqn[1:]))
)
def filtered_models(
self, subset: Optional[MutableMapping[str, ManifestNode]] = None
) -> Iterator[Tuple[str, ManifestNode]]:
"""Generates an iterator of valid models"""
for unique_id, dbt_node in (
subset.items() if subset else chain(self.dbt.nodes.items(), self.dbt.sources.items())
):
if self._filter_model(dbt_node):
yield unique_id, dbt_node
def get_osmosis_config(self, node: ManifestNode) -> Optional[OsmosisConfig]:
"""Validates a config string. If input is a source, we return the resource type str instead"""
if node.resource_type == NodeType.Source:
return None
osmosis_config = node.config.get("dbt-osmosis")
if not osmosis_config:
raise MissingOsmosisConfig(
f"Config not set for model {node.name}, we recommend setting the config at a directory level through the `dbt_project.yml`"
)
try:
return OsmosisConfig(osmosis_config)
except ValueError as exc:
raise InvalidOsmosisConfig(
f"Invalid config for model {node.name}: {osmosis_config}"
) from exc
def get_schema_path(self, node: ManifestNode) -> Optional[Path]:
"""Resolve absolute schema file path for a manifest node"""
schema_path = None
if node.resource_type == NodeType.Model and node.patch_path:
schema_path: str = node.patch_path.partition(FILE_ADAPTER_POSTFIX)[-1]
elif node.resource_type == NodeType.Source:
if hasattr(node, "source_name"):
schema_path: str = node.path
if schema_path:
return Path(self.project_root).joinpath(schema_path)
def get_target_schema_path(self, node: ManifestNode) -> Path:
"""Resolve the correct schema yml target based on the dbt-osmosis config for the model / directory"""
osmosis_config = self.get_osmosis_config(node)
if not osmosis_config:
return Path(node.root_path, node.original_file_path)
# Here we resolve file migration targets based on the config
if osmosis_config == OsmosisConfig.SchemaYaml:
schema = "schema"
elif osmosis_config == OsmosisConfig.FolderYaml:
schema = node.fqn[-2]
elif osmosis_config == OsmosisConfig.ModelYaml:
schema = node.name
elif osmosis_config == OsmosisConfig.SchemaModelYaml:
schema = "schema/" + node.name
else:
raise InvalidOsmosisConfig(f"Invalid dbt-osmosis config for model: {node.fqn}")
return Path(node.root_path, node.original_file_path).parent / Path(f"{schema}.yml")
@staticmethod
def get_database_parts(node: ManifestNode) -> Tuple[str, str, str]:
return node.database, node.schema, getattr(node, "alias", node.name)
def get_base_model(self, node: ManifestNode) -> Dict[str, Any]:
"""Construct a base model object with model name, column names populated from database"""
columns = self.get_columns(node)
return {
"name": node.alias or node.name,
"columns": [{"name": column_name} for column_name in columns],
}
def bootstrap_existing_model(
self, model_documentation: Dict[str, Any], node: ManifestNode
) -> Dict[str, Any]:
"""Injects columns from database into existing model if not found"""
model_columns: List[str] = [
c["name"].lower() for c in model_documentation.get("columns", [])
]
database_columns = self.get_columns(node)
for column in database_columns:
if column.lower() not in model_columns:
logger().info(":syringe: Injecting column %s into dbt schema", column)
model_documentation.setdefault("columns", []).append({"name": column})
return model_documentation
def get_columns(self, node: ManifestNode) -> List[str]:
"""Get all columns in a list for a model"""
parts = self.get_database_parts(node)
table = self.adapter.get_relation(*parts)
columns = []
if not table:
logger().info(
":cross_mark: Relation %s.%s.%s does not exist in target database, cannot resolve columns",
*parts,
)
return columns
try:
columns = [c.name for c in self.adapter.get_columns_in_relation(table)]
except CompilationException as error:
logger().info(
":cross_mark: Could not resolve relation %s.%s.%s against database active tables during introspective query: %s",
*parts,
str(error),
)
return columns
@staticmethod
def assert_schema_has_no_sources(schema: Mapping) -> Mapping:
"""Inline assertion ensuring that a schema does not have a source key"""
if schema.get("sources"):
raise SanitizationRequired(
"Found `sources:` block in a models schema file. We require you separate sources in order to organize your project."
)
return schema
def build_schema_folder_mapping(
self,
target_node_type: Optional[Union[NodeType.Model, NodeType.Source]] = None,
) -> Dict[str, SchemaFile]:
"""Builds a mapping of models or sources to their existing and target schema file paths"""
if target_node_type == NodeType.Source:
# Source folder mapping is reserved for source importing
target_nodes = self.dbt.sources
elif target_node_type == NodeType.Model:
target_nodes = self.dbt.nodes
else:
target_nodes = {**self.dbt.nodes, **self.dbt.sources}
# Container for output
schema_map = {}
logger().info("...building project structure mapping in memory")
# Iterate over models and resolve current path vs declarative target path
for unique_id, dbt_node in self.filtered_models(target_nodes):
schema_path = self.get_schema_path(dbt_node)
osmosis_schema_path = self.get_target_schema_path(dbt_node)
schema_map[unique_id] = SchemaFile(target=osmosis_schema_path, current=schema_path)
return schema_map
def draft_project_structure_update_plan(self) -> Dict[Path, RestructureQuantum]:
"""Build project structure update plan based on `dbt-osmosis:` configs set across dbt_project.yml and model files.
The update plan includes injection of undocumented models. Unless this plan is constructed and executed by the `commit_project_restructure` function,
dbt-osmosis will only operate on models it is aware of through the existing documentation.
Returns:
MutableMapping: Update plan where dict keys consist of targets and contents consist of outputs which match the contents of the `models` to be | |
import argparse
import logging
import os
import platform
import re
import threading
from enum import Enum
from os import environ
from subprocess import CalledProcessError
from typing import Iterable, Optional
from packaging import version
from packaging.version import InvalidVersion
from . import CONFIG_VERSION_PATH, Configuration, git, Repo, StateError, SUBMODULE_PATH
from .commands import capture_command, CommandError
from .github import GithubFacade
from .version import (
add_dev_to_version,
format_version_debian,
format_version_pep440,
get_explicit_version,
get_version,
)
logger = logging.getLogger(__name__)
class BranchKind(Enum):
MASTER = "master"
RELEASE = "release/v.*"
VARIANT = "variant/.*"
def check_remotes(config: Configuration):
def check_remote(repo, remote, remote_name):
if not remote:
raise StateError(f"{repo}: Repository must have an {remote_name} remote. Your work flow is not supported.")
check_remote(config.open, config.open.origin_remote, "origin")
check_remote(config.open, config.open.upstream_remote, "upstream")
check_remote(config.enterprise, config.enterprise.origin_remote, "origin")
check_remote(config.enterprise, config.enterprise.upstream_remote, "upstream")
def show_subcommand(args):
ver = get_version(
args.configuration,
commit=args.commit,
variant=args.variant,
pretend_master=args.master,
pretend_clean=args.clean,
)
if args.component == "full":
print(args.format(ver))
else:
print(getattr(ver, args.component))
def setup_show_subcommand(subparsers):
parser = subparsers.add_parser(
"show", help="Computes and prints the version (or components of it) in the current tree."
)
group_component = parser.add_mutually_exclusive_group()
group_component.add_argument(
"--major", help="Output the major version", dest="component", action="store_const", const="major"
)
group_component.add_argument(
"--minor", help="Output the minor version", dest="component", action="store_const", const="minor"
)
group_component.add_argument(
"--patch",
"--micro",
help="Output the patch (a.k.a. micro) version",
dest="component",
action="store_const",
const="micro",
)
group_component.add_argument(
"--local",
help="Output the local version (variant and development tag)",
dest="component",
action="store_const",
const="local",
)
# group_component.add_argument(
# "--dev-tag",
# help="Output the development tag (the commit counts and hashes)",
# dest="component",
# action="store_const",
# const="dev_tag",
# )
# group_component.add_argument(
# "--variant", help="Output the variant", dest="component", action="store_const", const="variant"
# )
group_component.add_argument(
"--full",
help="Output the full version including all components [default]",
dest="component",
action="store_const",
const="full",
)
parser.set_defaults(component="full")
group_format = parser.add_mutually_exclusive_group()
group_format.add_argument(
"--debian",
help="Output a Debian-compatible version number (with ~)",
dest="format",
action="store_const",
const=format_version_debian,
)
group_format.add_argument(
"--conda",
"--pep440",
help="Output a Conda-compatible version number (with .) [default]",
dest="format",
action="store_const",
const=format_version_pep440,
)
parser.set_defaults(format=format_version_pep440)
parser.add_argument(
"--pretend-master",
help="Pretend that the working tree or commit is committed to master and clean.",
action="store_true",
dest="master",
)
parser.add_argument(
"--override-variant",
help="Specify the variant tag. (environment variable: BUILD_VARIANT)",
type=str,
dest="variant",
default=environ.get("BUILD_VARIANT"),
)
parser.add_argument(
"commit",
type=str,
nargs="?",
help="Get the version for a clean checkout of the given commit in the repo of the current directory. (Note: "
"Works best from the enterprise repo or in open only mode.)",
default=None,
)
setup_global_log_arguments(parser)
setup_global_repo_arguments(parser)
parser.set_defaults(subcommand_impl=show_subcommand)
def provenance_subcommand(args):
values = dict()
if platform.node():
values.update(hostname=platform.node())
if hasattr(os, "getlogin"):
values.update(user=os.getlogin())
if platform.platform():
values.update(platform=platform.platform())
try:
values.update(lsb_release=capture_command("lsb_release", "-ds"))
except CalledProcessError:
pass
environment_to_capture = {"CONDA_PREFIX"}
for var in environment_to_capture:
if var in environ:
values[var.lower()] = environ[var]
config: Configuration = args.configuration
katana_repo_root = config.open.dir
values.update(katana_repo_root=katana_repo_root.absolute())
values.update(katana_branch=git.get_branch_checked_out(katana_repo_root))
values.update(katana_upstream=config.open.upstream_url)
values.update(katana_origin=config.open.origin_url)
values.update(katana_hash=git.get_hash(git.HEAD, katana_repo_root))
if config.has_enterprise:
katana_enterprise_repo_path = config.enterprise.dir
values.update(katana_enterprise_repo_path=katana_enterprise_repo_path.absolute())
values.update(katana_enterprise_branch=git.get_branch_checked_out(katana_enterprise_repo_path))
values.update(katana_enterprise_upstream=config.enterprise.upstream_url)
values.update(katana_enterprise_origin=config.enterprise.origin_url)
values.update(
katana_enterprise_hash=git.get_hash(git.HEAD, katana_enterprise_repo_path, exclude_dirty=(SUBMODULE_PATH,))
)
format_str = args.format
format_str = format_str.replace("\\n", "\n").replace("\\t", "\t")
print("".join(format_str.format(k, v, k=k, K=k.upper(), v=v) for k, v in values.items()), end="")
def setup_provenance_subcommand(subparsers):
parser = subparsers.add_parser(
"provenance", help="Prints a provenance description for inclusion in artifacts. This is not a version.",
)
group_format = parser.add_mutually_exclusive_group()
group_format.add_argument(
"--define", help="Format as #defines.", dest="format", action="store_const", const='#define {K} "{v}" \n'
)
group_format.add_argument(
"--yaml", help="Format as YAML.", dest="format", action="store_const", const='{k}: "{v}"\n'
)
group_format.add_argument(
"--python", help="Format as Python.", dest="format", action="store_const", const='{k} = "{v}"\n'
)
group_format.add_argument(
"--format", "-f", help="Provide a format string for each value. Use the source luck.", dest="format", type=str
)
setup_global_log_arguments(parser)
setup_global_repo_arguments(parser)
parser.set_defaults(subcommand_impl=provenance_subcommand, format='{k}: "{v}"\n')
def bump_checks(args):
config: Configuration = args.configuration
check_clean(args, config)
current_branch = get_current_branch_from_either_repository(config)
kind = get_branch_kind(current_branch, (BranchKind.MASTER, BranchKind.RELEASE, BranchKind.VARIANT))
check_at_branch(current_branch, config)
git.switch(current_branch, config.enterprise, config.dry_run)
git.switch(current_branch, config.open, config.dry_run)
prev_version, variant = get_explicit_version(git.HEAD, True, config.open, no_dev=False)
next_version = version.Version(args.next_version)
check_branch_version(current_branch, kind, next_version, prev_version)
def get_current_branch_from_either_repository(config: Configuration):
current_branch = git.get_branch_checked_out(config.open, ref_only=True)
if config.has_enterprise:
current_branch = current_branch or git.get_branch_checked_out(config.enterprise, ref_only=True)
if not current_branch:
raise StateError("Operation is not supported without a branch checked out (currently HEAD is detached).")
return current_branch
def check_branch_version(
current_branch: str, kind: BranchKind, next_version: version.Version, prev_version: Optional[version.Version]
):
if prev_version and kind == BranchKind.RELEASE and prev_version.release != next_version.release:
raise ValueError("The semantic version cannot be changed on a release branch.")
expected_release_branch_name = "release/v" + ".".join(str(i) for i in next_version.release)
if kind == BranchKind.RELEASE and current_branch != expected_release_branch_name:
raise StateError(
f"The semantic version does not match the release branch name: {expected_release_branch_name} != {current_branch}"
)
if prev_version and kind != BranchKind.VARIANT and prev_version.local:
raise StateError(
f"The non-variant branch {current_branch} has a variant. This should not happen. Somebody broke the rules."
)
if kind != BranchKind.VARIANT and next_version.local:
raise ValueError(f"The variant cannot be set on the non-variant branch {current_branch}.")
if kind == BranchKind.VARIANT and next_version.local != current_branch.split("/", maxsplit=1)[1]:
branch_variant = current_branch.split("/", maxsplit=1)[1]
raise StateError(
f"The variant in the version and the variant in the branch name must be the same: {next_version.local} != {branch_variant}"
)
if prev_version and next_version <= prev_version:
raise ValueError(f"The next version ({next_version}) must be greater than the current one ({prev_version})")
if prev_version and kind == BranchKind.VARIANT and next_version.release != prev_version.release:
raise ValueError(
"To change the version of a variant branch, merge master into the variant branch. Bumping the version directly on the variant branch is not allowed."
)
def get_branch_kind(current_branch, kinds: Iterable[BranchKind]):
for kind in kinds:
if re.match(kind.value, current_branch):
return kind
kinds_str = ", ".join(k.value for k in kinds)
raise StateError(f"The current branch ({current_branch}) should be one of: {kinds_str}")
def check_at_branch(branch, config):
check_remotes(config)
if git.get_hash(f"{config.open.upstream_remote}/{branch}", config.open) != git.get_hash(git.HEAD, config.open):
raise StateError(f"{config.katana_repo_path} HEAD is up to date with {branch}")
if config.has_enterprise and git.get_hash(
f"{config.enterprise.upstream_remote}/{branch}", config.enterprise
) != git.get_hash(git.HEAD, config.enterprise):
raise StateError(f"{config.katana_enterprise_repo_path} HEAD is up to date with {branch}")
def bump_subcommand(args):
bump_checks(args)
config: Configuration = args.configuration
g = GithubFacade(config)
prev_version, variant = get_explicit_version(git.HEAD, True, config.open, no_dev=True)
next_version = version.Version(args.next_version)
current_branch = git.get_branch_checked_out(config.open)
return bump_both_repos(config, g, prev_version, next_version, current_branch)
def check_branch_not_exist(config: Configuration, branch_name):
check_remotes(config)
if git.ref_exists(branch_name, config.open):
raise StateError(f"Branch {branch_name} already exists in {config.open.dir.name}")
if git.ref_exists(branch_name, config.enterprise):
raise StateError(f"Branch {branch_name} already exists in {config.enterprise.dir.name}")
def bump_both_repos(config: Configuration, g: GithubFacade, prev_version, next_version, base):
check_remotes(config)
next_version_str = format_version_pep440(next_version)
current_branch = git.get_branch_checked_out(config.open)
if config.dry_run:
print(next_version_str)
else:
with open(config.open.dir / CONFIG_VERSION_PATH, "wt", encoding="utf-8") as fi:
fi.write(next_version_str)
fi.write("\n")
title = f"Bump version to {next_version} on {base}"
main_body = f"Previous version {prev_version}.\n(Automatically generated with `scripts/version`)"
branch_name = f"bump/v{next_version_str}"
check_branch_not_exist(config, branch_name)
def bump_create_branch_and_pr(repo: Repo, files, pr_body) -> "PullRequest":
git.create_branch(
branch_name, dir=repo, dry_run=config.dry_run,
)
git.switch(
branch_name, dir=repo, dry_run=config.dry_run,
)
git.commit(
msg=f"{title}\n\n{main_body}", files=files, dir=repo, dry_run=config.dry_run,
)
git.push(repo.origin_remote, branch_name, dir=repo, dry_run=config.dry_run)
return g.create_pr(repo.upstream_url, repo.origin_url, branch_name, base, title, pr_body)
open_pr = bump_create_branch_and_pr(config.open, files=[config.open.dir / CONFIG_VERSION_PATH], pr_body=main_body,)
enterprise_pr = None
if config.has_enterprise:
enterprise_pr = bump_create_branch_and_pr(
config.enterprise,
files=[config.enterprise.dir / SUBMODULE_PATH],
pr_body=f"After: {open_pr.base.repo.full_name}#{open_pr.number}\n\n{main_body}",
)
git.switch(current_branch, config.enterprise, dry_run=config.dry_run)
git.switch(current_branch, config.open, dry_run=config.dry_run)
todos = [f"TODO: Review and merge {open_pr.html_url} as soon as possible."]
if enterprise_pr:
todos.append(
f"""
TODO: Review {enterprise_pr.html_url} as soon as possible. Once the above PR is
merged run 'scripts/version update_dependent_pr {enterprise_pr.number}' to update
the PR based on the open commit hash and then merge it.""".strip()
)
return todos
def setup_bump_subcommand(subparsers):
parser = subparsers.add_parser("bump", help="Bump the version.",)
parser.add_argument("next_version", type=str)
setup_global_log_arguments(parser)
setup_global_repo_arguments(parser)
setup_global_action_arguments(parser)
parser.set_defaults(subcommand_impl=bump_subcommand)
PR_AFTER_RE = re.compile(
r"""
After:\s*(
(?P<username>[\w.-]+)/(?P<repository>[\w.-]+)\#(?P<external_number>[0-9]+)|
\#(?P<internal_number>[0-9]+)
)
""",
re.IGNORECASE | re.VERBOSE,
)
def update_dependent_pr_subcommand(args):
config: Configuration = args.configuration
check_remotes(config)
check_clean(args, config)
g = GithubFacade(config)
enterprise_pr = g.get_pr(config.enterprise.upstream_url, number=args.number)
if enterprise_pr.commits > 1:
raise NotImplementedError(
"update_dependent_pr only supports single commit PRs. (It could be implemented if needed.)"
)
after_match = PR_AFTER_RE.search(enterprise_pr.body)
if not after_match:
raise ValueError(
f"PR {enterprise_pr.base.repo.full_name}#{enterprise_pr.number} does not have an 'After:' annotation."
)
if after_match.group("external_number"):
repo_full_name = "{username}/{repository}".format(**after_match.groupdict())
open_repo = g.github.get_repo(repo_full_name)
open_pr = open_repo.get_pull(int(after_match.group("external_number")))
if not open_pr.merged:
raise StateError(f"The dependency {open_repo.full_name}#{open_pr.number} is not merged.")
enterprise_original_branch = git.get_branch_checked_out(config.enterprise)
open_original_branch = git.get_branch_checked_out(config.open)
git.switch(enterprise_pr.head.ref, config.enterprise, config.dry_run)
git.switch(open_pr.merge_commit_sha, config.open, config.dry_run)
git.commit_amend([SUBMODULE_PATH], config.enterprise, config.dry_run)
git.push(config.enterprise.origin_remote, enterprise_pr.head.ref, config.enterprise, config.dry_run, force=True)
git.switch(enterprise_original_branch, config.enterprise, config.dry_run)
git.switch(open_original_branch, config.open, config.dry_run)
return [f"TODO: Merge {enterprise_pr.html_url} as soon as possible."]
else:
raise StateError(
"PR does not have an acceptable 'After:' annotation. Only external PR references are supported. "
f"(Was '{after_match.group(0)}')"
)
def setup_update_dependent_pr_subcommand(subparsers):
parser = subparsers.add_parser(
"update_dependent_pr", help="Update an enterprise PR to match a merged PR in a submodule.",
)
parser.add_argument("number", help="The PR number in Github.", type=int)
setup_global_log_arguments(parser)
setup_global_repo_arguments(parser)
setup_global_action_arguments(parser)
parser.set_defaults(subcommand_impl=update_dependent_pr_subcommand)
def tag_subcommand(args):
config: Configuration = args.configuration
check_remotes(config)
check_clean(args, config)
commit = git.HEAD
current_branch = get_current_branch_from_either_repository(config)
kind = get_branch_kind(current_branch, (BranchKind.RELEASE, BranchKind.VARIANT))
if (
not git.is_ancestor_of(commit, f"{config.open.upstream_remote}/{current_branch}", config.open)
and args.require_upstream
and not args.pretend_upstream
):
raise StateError(f"HEAD of {current_branch} is not upstream")
if (
not git.is_ancestor_of(commit, f"{config.enterprise.upstream_remote}/{current_branch}", config.enterprise)
and args.require_upstream
and not args.pretend_upstream
):
raise StateError(f"HEAD of {current_branch} is not upstream")
next_version = version.Version(args.version)
check_branch_version(current_branch, kind, next_version, prev_version=None)
tag_name = | |
start time/frame for later
blankscreen.frameNStart = frameN # exact frame index
blankscreen.tStart = t # local t and not account for scr refresh
blankscreen.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(blankscreen, 'tStartRefresh') # time at next scr refresh
blankscreen.setAutoDraw(True)
if blankscreen.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > blankscreen.tStartRefresh + 0.5-frameTolerance:
# keep track of stop time/frame for later
blankscreen.tStop = t # not accounting for scr refresh
blankscreen.frameNStop = frameN # exact frame index
win.timeOnFlip(blankscreen, 'tStopRefresh') # time at next scr refresh
blankscreen.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in BreakComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Break"-------
for thisComponent in BreakComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# ------Prepare to start Routine "Instructions1"-------
continueRoutine = True
# update component parameters for each repeat
textInstructionsResp.keys = []
textInstructionsResp.rt = []
_textInstructionsResp_allKeys = []
# keep track of which components have finished
Instructions1Components = [textInstructions, textInstructionsResp]
for thisComponent in Instructions1Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
Instructions1Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Instructions1"-------
while continueRoutine:
# get current time
t = Instructions1Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=Instructions1Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *textInstructions* updates
if textInstructions.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
textInstructions.frameNStart = frameN # exact frame index
textInstructions.tStart = t # local t and not account for scr refresh
textInstructions.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(textInstructions, 'tStartRefresh') # time at next scr refresh
textInstructions.setAutoDraw(True)
# *textInstructionsResp* updates
waitOnFlip = False
if textInstructionsResp.status == NOT_STARTED and tThisFlip >= 0-frameTolerance:
# keep track of start time/frame for later
textInstructionsResp.frameNStart = frameN # exact frame index
textInstructionsResp.tStart = t # local t and not account for scr refresh
textInstructionsResp.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(textInstructionsResp, 'tStartRefresh') # time at next scr refresh
textInstructionsResp.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(textInstructionsResp.clock.reset) # t=0 on next screen flip
win.callOnFlip(textInstructionsResp.clearEvents, eventType='keyboard') # clear events on next screen flip
if textInstructionsResp.status == STARTED and not waitOnFlip:
theseKeys = textInstructionsResp.getKeys(keyList=['space'], waitRelease=False)
_textInstructionsResp_allKeys.extend(theseKeys)
if len(_textInstructionsResp_allKeys):
textInstructionsResp.keys = _textInstructionsResp_allKeys[-1].name # just the last key pressed
textInstructionsResp.rt = _textInstructionsResp_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in Instructions1Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Instructions1"-------
for thisComponent in Instructions1Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "Instructions" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "Break"-------
continueRoutine = True
routineTimer.add(0.500000)
# update component parameters for each repeat
# keep track of which components have finished
BreakComponents = [blankscreen]
for thisComponent in BreakComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
BreakClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Break"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = BreakClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=BreakClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *blankscreen* updates
if blankscreen.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
blankscreen.frameNStart = frameN # exact frame index
blankscreen.tStart = t # local t and not account for scr refresh
blankscreen.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(blankscreen, 'tStartRefresh') # time at next scr refresh
blankscreen.setAutoDraw(True)
if blankscreen.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > blankscreen.tStartRefresh + 0.5-frameTolerance:
# keep track of stop time/frame for later
blankscreen.tStop = t # not accounting for scr refresh
blankscreen.frameNStop = frameN # exact frame index
win.timeOnFlip(blankscreen, 'tStopRefresh') # time at next scr refresh
blankscreen.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in BreakComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Break"-------
for thisComponent in BreakComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# ------Prepare to start Routine "Instructions2"-------
continueRoutine = True
# update component parameters for each repeat
imgInstructionsResp.keys = []
imgInstructionsResp.rt = []
_imgInstructionsResp_allKeys = []
# keep track of which components have finished
Instructions2Components = [imgInstructions, imgInstructionsResp]
for thisComponent in Instructions2Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
Instructions2Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Instructions2"-------
while continueRoutine:
# get current time
t = Instructions2Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=Instructions2Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *imgInstructions* updates
if imgInstructions.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
imgInstructions.frameNStart = frameN # exact frame index
imgInstructions.tStart = t # local t and not account for scr refresh
imgInstructions.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(imgInstructions, 'tStartRefresh') # time at next scr refresh
imgInstructions.setAutoDraw(True)
# *imgInstructionsResp* updates
waitOnFlip = False
if imgInstructionsResp.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
imgInstructionsResp.frameNStart = frameN # exact frame index
imgInstructionsResp.tStart = t # local t and not account for scr refresh
imgInstructionsResp.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(imgInstructionsResp, 'tStartRefresh') # time at next scr refresh
imgInstructionsResp.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(imgInstructionsResp.clock.reset) # t=0 on next screen flip
win.callOnFlip(imgInstructionsResp.clearEvents, eventType='keyboard') # clear events on next screen flip
if imgInstructionsResp.status == STARTED and not waitOnFlip:
theseKeys = imgInstructionsResp.getKeys(keyList=['space'], waitRelease=False)
_imgInstructionsResp_allKeys.extend(theseKeys)
if len(_imgInstructionsResp_allKeys):
imgInstructionsResp.keys = _imgInstructionsResp_allKeys[-1].name # just the last key pressed
imgInstructionsResp.rt = _imgInstructionsResp_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in Instructions2Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Instructions2"-------
for thisComponent in Instructions2Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if imgInstructionsResp.keys in ['', [], None]: # No response was made
imgInstructionsResp.keys = None
# the Routine "imgInstructions" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine | |
the first *n* rows of the given Dataset"""
try:
df = next(
pd.read_sql_table(
table_name=self._table.name,
schema=self._table.schema,
con=self.engine,
chunksize=n,
)
)
except (ValueError, NotImplementedError):
# it looks like MetaData that is used by pd.read_sql_table
# cannot work on a temp table.
# If it fails, we are trying to get the data using read_sql
head_sql_str = "select * from "
if self._table.schema and self.engine.dialect.name.lower() != "bigquery":
head_sql_str += self._table.schema + "."
elif self.engine.dialect.name.lower() == "bigquery":
head_sql_str += "`" + self._table.name + "`"
else:
head_sql_str += self._table.name
head_sql_str += " limit {0:d}".format(n)
# Limit is unknown in mssql! Use top instead!
if self.engine.dialect.name.lower() == "mssql":
head_sql_str = "select top({n}) * from {table}".format(
n=n, table=self._table.name
)
df = pd.read_sql(head_sql_str, con=self.engine)
return PandasDataset(
df,
expectation_suite=self.get_expectation_suite(
discard_failed_expectations=False,
discard_result_format_kwargs=False,
discard_catch_exceptions_kwargs=False,
discard_include_config_kwargs=False,
),
)
def get_row_count(self):
count_query = sa.select([sa.func.count()]).select_from(self._table)
return int(self.engine.execute(count_query).scalar())
def get_column_count(self):
return len(self.columns)
def get_table_columns(self) -> List[str]:
return [col["name"] for col in self.columns]
def get_column_nonnull_count(self, column):
ignore_values = [None]
count_query = sa.select(
[
sa.func.count().label("element_count"),
sa.func.sum(
sa.case(
[
(
sa.or_(
sa.column(column).in_(ignore_values),
# Below is necessary b/c sa.in_() uses `==` but None != None
# But we only consider this if None is actually in the list of ignore values
sa.column(column).is_(None)
if None in ignore_values
else False,
),
1,
)
],
else_=0,
)
).label("null_count"),
]
).select_from(self._table)
count_results = dict(self.engine.execute(count_query).fetchone())
element_count = int(count_results.get("element_count") or 0)
null_count = int(count_results.get("null_count") or 0)
return element_count - null_count
def get_column_sum(self, column):
return self.engine.execute(
sa.select([sa.func.sum(sa.column(column))]).select_from(self._table)
).scalar()
def get_column_max(self, column, parse_strings_as_datetimes=False):
if parse_strings_as_datetimes:
raise NotImplementedError
return self.engine.execute(
sa.select([sa.func.max(sa.column(column))]).select_from(self._table)
).scalar()
def get_column_min(self, column, parse_strings_as_datetimes=False):
if parse_strings_as_datetimes:
raise NotImplementedError
return self.engine.execute(
sa.select([sa.func.min(sa.column(column))]).select_from(self._table)
).scalar()
def get_column_value_counts(self, column, sort="value", collate=None):
if sort not in ["value", "count", "none"]:
raise ValueError("sort must be either 'value', 'count', or 'none'")
query = (
sa.select(
[
sa.column(column).label("value"),
sa.func.count(sa.column(column)).label("count"),
]
)
.where(sa.column(column) != None)
.group_by(sa.column(column))
)
if sort == "value":
# NOTE: depending on the way the underlying database collates columns,
# ordering can vary. postgresql collate "C" matches default sort
# for python and most other systems, but is not universally supported,
# so we use the default sort for the system, unless specifically overridden
if collate is not None:
query = query.order_by(sa.column(column).collate(collate))
else:
query = query.order_by(sa.column(column))
elif sort == "count":
query = query.order_by(sa.column("count").desc())
results = self.engine.execute(query.select_from(self._table)).fetchall()
series = pd.Series(
[row[1] for row in results],
index=pd.Index(data=[row[0] for row in results], name="value"),
name="count",
)
return series
def get_column_mean(self, column):
return self.engine.execute(
sa.select([sa.func.avg(sa.column(column))]).select_from(self._table)
).scalar()
def get_column_unique_count(self, column):
return self.engine.execute(
sa.select([sa.func.count(sa.func.distinct(sa.column(column)))]).select_from(
self._table
)
).scalar()
def get_column_median(self, column):
nonnull_count = self.get_column_nonnull_count(column)
element_values = self.engine.execute(
sa.select([sa.column(column)])
.order_by(sa.column(column))
.where(sa.column(column) != None)
.offset(max(nonnull_count // 2 - 1, 0))
.limit(2)
.select_from(self._table)
)
column_values = list(element_values.fetchall())
if len(column_values) == 0:
column_median = None
elif nonnull_count % 2 == 0:
# An even number of column values: take the average of the two center values
column_median = (
float(
column_values[0][0]
+ column_values[1][0] # left center value # right center value
)
/ 2.0
) # Average center values
else:
# An odd number of column values, we can just take the center value
column_median = column_values[1][0] # True center value
return column_median
def get_column_quantiles(
self, column: str, quantiles, allow_relative_error: bool = False
):
if self.sql_engine_dialect.name.lower() == "mssql":
# mssql requires over(), so we add an empty over() clause
selects = [
sa.func.percentile_disc(quantile)
.within_group(sa.column(column).asc())
.over()
for quantile in quantiles
]
elif self.sql_engine_dialect.name.lower() == "bigquery":
# BigQuery does not support "WITHIN", so we need a special case for it
selects: List = [
sa.func.percentile_disc(sa.column(column), quantile).over()
for quantile in quantiles
]
else:
selects: List = [
sa.func.percentile_disc(quantile).within_group(sa.column(column).asc())
for quantile in quantiles
]
try:
quantiles = self.engine.execute(
sa.select(selects).select_from(self._table)
).fetchone()
except ProgrammingError:
# ProgrammingError: (psycopg2.errors.SyntaxError) Aggregate function "percentile_disc" is not supported;
# use approximate percentile_disc or percentile_cont instead.
if self.attempt_allowing_relative_error():
# Redshift does not have a percentile_disc method, but does support an approximate version.
if allow_relative_error:
sql_approx: str = get_approximate_percentile_disc_sql(
selects=selects, sql_engine_dialect=self.sql_engine_dialect
)
selects = [sa.text(sql_approx)]
try:
quantiles = self.engine.execute(
sa.select(selects).select_from(self._table)
).fetchone()
except ProgrammingError as pe:
exception_message: str = "An SQL syntax Exception occurred."
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(pe).__name__}: "{str(pe)}". Traceback: "{exception_traceback}".'
logger.error(exception_message)
raise pe
else:
raise ValueError(
f'The SQL engine dialect "{str(self.sql_engine_dialect)}" does not support computing quantiles '
"without approximation error; set allow_relative_error to True to allow approximate quantiles."
)
else:
raise ValueError(
f'The SQL engine dialect "{str(self.sql_engine_dialect)}" does not support computing quantiles with '
"approximation error; set allow_relative_error to False to disable approximate quantiles."
)
return list(quantiles)
def get_column_stdev(self, column):
if self.sql_engine_dialect.name.lower() != "mssql":
res = self.engine.execute(
sa.select([sa.func.stddev_samp(sa.column(column))])
.select_from(self._table)
.where(sa.column(column) != None)
).fetchone()
else:
# stdev_samp is not a recognized built-in function name but stdevp does exist for mssql!
res = self.engine.execute(
sa.select([sa.func.stdevp(sa.column(column))])
.select_from(self._table)
.where(sa.column(column) != None)
).fetchone()
return float(res[0])
def get_column_hist(self, column, bins):
"""return a list of counts corresponding to bins
Args:
column: the name of the column for which to get the histogram
bins: tuple of bin edges for which to get histogram values; *must* be tuple to support caching
"""
case_conditions = []
idx = 0
bins = list(bins)
# If we have an infinte lower bound, don't express that in sql
if (bins[0] == -np.inf) or (bins[0] == -float("inf")):
case_conditions.append(
sa.func.sum(
sa.case([(sa.column(column) < bins[idx + 1], 1)], else_=0)
).label("bin_" + str(idx))
)
idx += 1
for idx in range(idx, len(bins) - 2):
case_conditions.append(
sa.func.sum(
sa.case(
[
(
sa.and_(
bins[idx] <= sa.column(column),
sa.column(column) < bins[idx + 1],
),
1,
)
],
else_=0,
)
).label("bin_" + str(idx))
)
if (bins[-1] == np.inf) or (bins[-1] == float("inf")):
case_conditions.append(
sa.func.sum(
sa.case([(bins[-2] <= sa.column(column), 1)], else_=0)
).label("bin_" + str(len(bins) - 1))
)
else:
case_conditions.append(
sa.func.sum(
sa.case(
[
(
sa.and_(
bins[-2] <= sa.column(column),
sa.column(column) <= bins[-1],
),
1,
)
],
else_=0,
)
).label("bin_" + str(len(bins) - 1))
)
query = (
sa.select(case_conditions)
.where(sa.column(column) != None,)
.select_from(self._table)
)
hist = list(self.engine.execute(query).fetchone())
return hist
def get_column_count_in_range(
self, column, min_val=None, max_val=None, strict_min=False, strict_max=True
):
if min_val is None and max_val is None:
raise ValueError("Must specify either min or max value")
if min_val is not None and max_val is not None and min_val > max_val:
raise ValueError("Min value must be <= to max value")
min_condition = None
max_condition = None
if min_val is not None:
if strict_min:
min_condition = sa.column(column) > min_val
else:
min_condition = sa.column(column) >= min_val
if max_val is not None:
if strict_max:
max_condition = sa.column(column) < max_val
else:
max_condition = sa.column(column) <= max_val
if min_condition is not None and max_condition is not None:
condition = sa.and_(min_condition, max_condition)
elif min_condition is not None:
condition = min_condition
else:
condition = max_condition
query = query = (
sa.select([sa.func.count((sa.column(column)))])
.where(sa.and_(sa.column(column) != None, condition))
.select_from(self._table)
)
return self.engine.execute(query).scalar()
def create_temporary_table(self, table_name, custom_sql, schema_name=None):
"""
Create Temporary table based on sql query. This will be used as a basis for executing expectations.
WARNING: this feature is new in v0.4.
It hasn't been tested in all SQL dialects, and may change based on community feedback.
:param custom_sql:
"""
###
# NOTE: 20200310 - The update to support snowflake transient table creation revealed several
# import cases that are not fully handled.
# The snowflake-related change updated behavior to allow both custom_sql and schema to be specified. But
# the underlying incomplete handling of schema remains.
#
# Several cases we need to consider:
#
# 1. Distributed backends (e.g. Snowflake and BigQuery) often use a `<database>.<schema>.<table>`
# syntax, but currently we are biased towards only allowing schema.table
#
# 2. In the wild, we see people using several ways to declare the schema they want to use:
# a. In the connection string, the original RFC only specifies database, but schema is supported by some
# backends (Snowflake) as a query parameter.
# b. As a default for a user (the equivalent of USE SCHEMA being provided at the beginning of a session)
# c. As part of individual queries.
#
# 3. We currently don't make it possible to select from a table in | |
<filename>manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py
# Copyright (c) 2015 <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
NetApp Data ONTAP cDOT multi-SVM storage driver library.
This library extends the abstract base library and completes the multi-SVM
functionality needed by the cDOT multi-SVM Manila driver. This library
variant creates Data ONTAP storage virtual machines (i.e. 'vservers')
as needed to provision shares.
"""
import re
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import excutils
from manila import exception
from manila.i18n import _
from manila.share.drivers.netapp.dataontap.client import client_cmode
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base
from manila.share.drivers.netapp import utils as na_utils
from manila import utils
LOG = log.getLogger(__name__)
SUPPORTED_NETWORK_TYPES = (None, 'flat', 'vlan')
SEGMENTED_NETWORK_TYPES = ('vlan',)
DEFAULT_MTU = 1500
class NetAppCmodeMultiSVMFileStorageLibrary(
lib_base.NetAppCmodeFileStorageLibrary):
@na_utils.trace
def check_for_setup_error(self):
if self._have_cluster_creds:
if self.configuration.netapp_vserver:
msg = ('Vserver is specified in the configuration. This is '
'ignored when the driver is managing share servers.')
LOG.warning(msg)
else: # only have vserver creds, which is an error in multi_svm mode
msg = _('Cluster credentials must be specified in the '
'configuration when the driver is managing share servers.')
raise exception.InvalidInput(reason=msg)
# Ensure one or more aggregates are available.
if not self._find_matching_aggregates():
msg = _('No aggregates are available for provisioning shares. '
'Ensure that the configuration option '
'netapp_aggregate_name_search_pattern is set correctly.')
raise exception.NetAppException(msg)
(super(NetAppCmodeMultiSVMFileStorageLibrary, self).
check_for_setup_error())
@na_utils.trace
def _get_vserver(self, share_server=None, vserver_name=None):
if share_server:
backend_details = share_server.get('backend_details')
vserver = backend_details.get(
'vserver_name') if backend_details else None
if not vserver:
msg = _('Vserver name is absent in backend details. Please '
'check whether Vserver was created properly.')
raise exception.VserverNotSpecified(msg)
elif vserver_name:
vserver = vserver_name
else:
msg = _('Share server not provided')
raise exception.InvalidInput(reason=msg)
if not self._client.vserver_exists(vserver):
raise exception.VserverNotFound(vserver=vserver)
vserver_client = self._get_api_client(vserver)
return vserver, vserver_client
def _get_ems_pool_info(self):
return {
'pools': {
'vserver': None,
'aggregates': self._find_matching_aggregates(),
},
}
@na_utils.trace
def _handle_housekeeping_tasks(self):
"""Handle various cleanup activities."""
self._client.prune_deleted_nfs_export_policies()
self._client.prune_deleted_snapshots()
self._client.remove_unused_qos_policy_groups()
(super(NetAppCmodeMultiSVMFileStorageLibrary, self).
_handle_housekeeping_tasks())
@na_utils.trace
def _find_matching_aggregates(self):
"""Find all aggregates match pattern."""
aggregate_names = self._client.list_non_root_aggregates()
pattern = self.configuration.netapp_aggregate_name_search_pattern
return [aggr_name for aggr_name in aggregate_names
if re.match(pattern, aggr_name)]
@na_utils.trace
def setup_server(self, network_info, metadata=None):
"""Creates and configures new Vserver."""
vlan = network_info['segmentation_id']
ports = {}
for network_allocation in network_info['network_allocations']:
ports[network_allocation['id']] = network_allocation['ip_address']
@utils.synchronized('netapp-VLAN-%s' % vlan, external=True)
def setup_server_with_lock():
LOG.debug('Creating server %s', network_info['server_id'])
self._validate_network_type(network_info)
vserver_name = self._get_vserver_name(network_info['server_id'])
server_details = {
'vserver_name': vserver_name,
'ports': jsonutils.dumps(ports)
}
try:
self._create_vserver(vserver_name, network_info)
except Exception as e:
e.detail_data = {'server_details': server_details}
raise
return server_details
return setup_server_with_lock()
@na_utils.trace
def _validate_network_type(self, network_info):
"""Raises exception if the segmentation type is incorrect."""
if network_info['network_type'] not in SUPPORTED_NETWORK_TYPES:
msg = _('The specified network type %s is unsupported by the '
'NetApp clustered Data ONTAP driver')
raise exception.NetworkBadConfigurationException(
reason=msg % network_info['network_type'])
@na_utils.trace
def _get_vserver_name(self, server_id):
return self.configuration.netapp_vserver_name_template % server_id
@na_utils.trace
def _create_vserver(self, vserver_name, network_info):
"""Creates Vserver with given parameters if it doesn't exist."""
if self._client.vserver_exists(vserver_name):
msg = _('Vserver %s already exists.')
raise exception.NetAppException(msg % vserver_name)
# NOTE(lseki): If there's already an ipspace created for the same VLAN
# port, reuse it. It will be named after the previously created share
# server's neutron subnet id.
node_name = self._client.list_cluster_nodes()[0]
port = self._get_node_data_port(node_name)
vlan = network_info['segmentation_id']
ipspace_name = self._client.get_ipspace_name_for_vlan_port(
node_name, port, vlan) or self._create_ipspace(network_info)
LOG.debug('Vserver %s does not exist, creating.', vserver_name)
self._client.create_vserver(
vserver_name,
self.configuration.netapp_root_volume_aggregate,
self.configuration.netapp_root_volume,
self._find_matching_aggregates(),
ipspace_name)
vserver_client = self._get_api_client(vserver=vserver_name)
security_services = None
try:
self._create_vserver_lifs(vserver_name,
vserver_client,
network_info,
ipspace_name)
self._create_vserver_admin_lif(vserver_name,
vserver_client,
network_info,
ipspace_name)
self._create_vserver_routes(vserver_client,
network_info)
vserver_client.enable_nfs(
self.configuration.netapp_enabled_share_protocols)
security_services = network_info.get('security_services')
if security_services:
self._client.setup_security_services(security_services,
vserver_client,
vserver_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to configure Vserver.")
# NOTE(dviroel): At this point, the lock was already acquired
# by the caller of _create_vserver.
self._delete_vserver(vserver_name,
security_services=security_services,
needs_lock=False)
def _get_valid_ipspace_name(self, network_id):
"""Get IPspace name according to network id."""
return 'ipspace_' + network_id.replace('-', '_')
@na_utils.trace
def _create_ipspace(self, network_info):
"""If supported, create an IPspace for a new Vserver."""
if not self._client.features.IPSPACES:
return None
if (network_info['network_allocations'][0]['network_type']
not in SEGMENTED_NETWORK_TYPES):
return client_cmode.DEFAULT_IPSPACE
# NOTE(cknight): Neutron needs cDOT IP spaces because it can provide
# overlapping IP address ranges for different subnets. That is not
# believed to be an issue for any of Manila's other network plugins.
ipspace_id = network_info.get('neutron_subnet_id')
if not ipspace_id:
return client_cmode.DEFAULT_IPSPACE
ipspace_name = self._get_valid_ipspace_name(ipspace_id)
self._client.create_ipspace(ipspace_name)
return ipspace_name
@na_utils.trace
def _create_vserver_lifs(self, vserver_name, vserver_client, network_info,
ipspace_name):
"""Create Vserver data logical interfaces (LIFs)."""
nodes = self._client.list_cluster_nodes()
node_network_info = zip(nodes, network_info['network_allocations'])
for node_name, network_allocation in node_network_info:
lif_name = self._get_lif_name(node_name, network_allocation)
self._create_lif(vserver_client, vserver_name, ipspace_name,
node_name, lif_name, network_allocation)
@na_utils.trace
def _create_vserver_admin_lif(self, vserver_name, vserver_client,
network_info, ipspace_name):
"""Create Vserver admin LIF, if defined."""
network_allocations = network_info.get('admin_network_allocations')
if not network_allocations:
LOG.info('No admin network defined for Vserver %s.',
vserver_name)
return
node_name = self._client.list_cluster_nodes()[0]
network_allocation = network_allocations[0]
lif_name = self._get_lif_name(node_name, network_allocation)
self._create_lif(vserver_client, vserver_name, ipspace_name,
node_name, lif_name, network_allocation)
@na_utils.trace
def _create_vserver_routes(self, vserver_client, network_info):
"""Create Vserver route and set gateways."""
route_gateways = []
# NOTE(gouthamr): Use the gateway from the tenant subnet/s
# for the static routes. Do not configure a route for the admin
# subnet because fast path routing will work for incoming
# connections and there are no requirements for outgoing
# connections on the admin network yet.
for net_allocation in (network_info['network_allocations']):
if net_allocation['gateway'] not in route_gateways:
vserver_client.create_route(net_allocation['gateway'])
route_gateways.append(net_allocation['gateway'])
@na_utils.trace
def _get_node_data_port(self, node):
port_names = self._client.list_node_data_ports(node)
pattern = self.configuration.netapp_port_name_search_pattern
matched_port_names = [port_name for port_name in port_names
if re.match(pattern, port_name)]
if not matched_port_names:
raise exception.NetAppException(
_('Could not find eligible network ports on node %s on which '
'to create Vserver LIFs.') % node)
return matched_port_names[0]
def _get_lif_name(self, node_name, network_allocation):
"""Get LIF name based on template from manila.conf file."""
lif_name_args = {
'node': node_name,
'net_allocation_id': network_allocation['id'],
}
return self.configuration.netapp_lif_name_template % lif_name_args
@na_utils.trace
def _create_lif(self, vserver_client, vserver_name, ipspace_name,
node_name, lif_name, network_allocation):
"""Creates LIF for Vserver."""
port = self._get_node_data_port(node_name)
ip_address = network_allocation['ip_address']
netmask = utils.cidr_to_netmask(network_allocation['cidr'])
vlan = network_allocation['segmentation_id']
network_mtu = network_allocation.get('mtu')
mtu = network_mtu or DEFAULT_MTU
if not vserver_client.network_interface_exists(
vserver_name, node_name, port, ip_address, netmask, vlan):
self._client.create_network_interface(
ip_address, netmask, vlan, node_name, port, vserver_name,
lif_name, ipspace_name, mtu)
@na_utils.trace
def get_network_allocations_number(self):
"""Get number of network interfaces to be created."""
return len(self._client.list_cluster_nodes())
@na_utils.trace
def get_admin_network_allocations_number(self, admin_network_api):
"""Get number of network allocations for creating admin LIFs."""
return 1 if admin_network_api else 0
@na_utils.trace
def teardown_server(self, server_details, security_services=None):
"""Teardown share server."""
vserver = server_details.get(
'vserver_name') if server_details else None
if not vserver:
LOG.warning("Vserver not specified for share server being "
"deleted. Deletion of share server record will "
"proceed anyway.")
return
elif not self._client.vserver_exists(vserver):
LOG.warning("Could not find Vserver for share server being "
"deleted: %s. Deletion of share server "
"record will proceed anyway.", vserver)
return
self._delete_vserver(vserver, security_services=security_services)
@na_utils.trace
def _delete_vserver(self, vserver, security_services=None,
needs_lock=True):
"""Delete a Vserver plus IPspace and security services as needed."""
ipspace_name = self._client.get_vserver_ipspace(vserver)
vserver_client = self._get_api_client(vserver=vserver)
network_interfaces = vserver_client.get_network_interfaces()
interfaces_on_vlans = []
vlans = []
for interface in network_interfaces:
if '-' in interface['home-port']:
interfaces_on_vlans.append(interface)
vlans.append(interface['home-port'])
if vlans:
vlans = '-'.join(sorted(set(vlans))) if vlans else None
vlan_id = vlans.split('-')[-1]
else:
vlan_id = None
def _delete_vserver_without_lock():
self._client.delete_vserver(vserver,
vserver_client,
security_services=security_services)
if ipspace_name and not self._client.ipspace_has_data_vservers(
ipspace_name):
self._client.delete_ipspace(ipspace_name)
self._delete_vserver_vlans(interfaces_on_vlans)
@utils.synchronized('netapp-VLAN-%s' % vlan_id, external=True)
def _delete_vserver_with_lock():
_delete_vserver_without_lock()
if needs_lock:
return _delete_vserver_with_lock()
else:
return _delete_vserver_without_lock()
@na_utils.trace
def _delete_vserver_vlans(self, network_interfaces_on_vlans):
"""Delete Vserver's VLAN configuration from ports"""
for interface in network_interfaces_on_vlans:
try:
home_port = interface['home-port']
port, vlan = home_port.split('-')
node = interface['home-node']
self._client.delete_vlan(node, port, vlan)
except exception.NetAppException:
LOG.exception("Deleting Vserver VLAN failed.")
def get_configured_ip_versions(self):
versions = [4]
options = self._client.get_net_options()
if options['ipv6-enabled']:
versions.append(6)
return versions
def manage_server(self, context, share_server, identifier, driver_options):
"""Manages a vserver by renaming it and returning backend_details."""
new_vserver_name = self._get_vserver_name(share_server['id'])
old_vserver_name = self._get_correct_vserver_old_name(identifier)
if new_vserver_name != old_vserver_name:
self._client.rename_vserver(old_vserver_name, new_vserver_name)
backend_details = {'vserver_name': new_vserver_name}
return new_vserver_name, backend_details
def unmanage_server(self, server_details, security_services=None):
pass
def get_share_server_network_info(
self, context, share_server, identifier, driver_options):
"""Returns a list of IPs for each vserver network interface."""
vserver_name = self._get_correct_vserver_old_name(identifier)
vserver, vserver_client = self._get_vserver(vserver_name=vserver_name)
interfaces = vserver_client.get_network_interfaces()
| |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from edibles import DATADIR
from edibles import PYTHONDIR
from edibles.utils.edibles_spectrum import EdiblesSpectrum
class EdiblesOracle:
"""
This class will process the EDIBLES obs log and target info files.
Users can then query the oracle for observations matching specific criteria.
"""
def __init__(self):
print(DATADIR)
folder = Path(PYTHONDIR+"/data")
filename=folder /"DR4_ObsLog.csv"
self.obslog = pd.read_csv(filename)
filename=folder /"sightline_data"/"Formatted_EBV.csv"
self.ebvlog = pd.read_csv(filename)
filename=folder /"sightline_data"/"Formatted_SpType.csv"
self.sptypelog = pd.read_csv(filename)
filename = folder /"sightline_data"/"Formatted_LogN(HI).csv"
self.nhilog = pd.read_csv(filename)
filename = folder /"sightline_data"/"Formatted_LogN(H2).csv"
self.nhiilog = pd.read_csv(filename)
filename = folder /"sightline_data"/"Formatted_f(H2).csv"
self.fh2log = pd.read_csv(filename)
filename = folder /"sightline_data"/"Formatted_RV.csv"
self.rvlog = pd.read_csv(filename)
filename = folder /"sightline_data"/"Formatted_AV.csv"
self.avlog = pd.read_csv(filename)
filename = folder /"sightline_data"/"ObservedObjects.csv"
self.object_log = pd.read_csv(filename,names=["object"],header=0)
#print(self.sptypelog.dtypes)
# total_rows = len(self.ebvlog.index)
# print(total_rows)
def _getObsListFilteredByObsLogParameters(self, object=None, Wave=None, WaveMin=None, WaveMax=None, MergedOnly=False, OrdersOnly=False):
'''Filter all the observations in the ObsLog by the parameters
contained in the obslog, i.e. by object (if specified), wavelength
range or merged versus specific orders. '''
# We will use Boolean matches for all filter criteria.
#print('Inside the function: object is', object)
bool_object_matches = np.zeros(len(self.obslog.index),dtype=bool)
#print(object.dtype)
if object is None:
bool_object_matches = np.ones(len(self.ebvlog.index),dtype=bool)
elif (isinstance(object, np.ndarray) | isinstance(object, list)):
for thisobject in object:
#print("Object in loop:", thisobject)
#print(self.obslog.Object == thisobject)
bool_object_matches = (self.obslog.Object == thisobject) | (bool_object_matches)
#print(bool_object_matches.sum())
else:
bool_object_matches = self.ebvlog.object == object
#print('Inside the function: number of matches is ', bool_object_matches.sum())
# Do we have to filter out merged or single-order spectra? Note that if both
# MergedOnly and OrdersOnly are True, only the Merged spectra will be returned.
if MergedOnly and OrdersOnly:
print("EDIBLES Oracle WARNING: ONLY RETURNING MERGED SPECTRA")
bool_order_matches = self.obslog.Order != "Z"
if OrdersOnly is True:
bool_order_matches = self.obslog.Order != "ALL"
if MergedOnly is True:
bool_order_matches = self.obslog.Order == "ALL"
#print(bool_order_matches)
bool_wave_matches = np.ones(len(self.obslog.index),dtype=bool)
if Wave:
bool_wave_matches = (self.obslog.WaveMin < Wave) & (self.obslog.WaveMax > Wave)
if WaveMin:
bool_wave_matches = (self.obslog.WaveMax > WaveMin) & (bool_wave_matches)
if WaveMax:
bool_wave_matches = (self.obslog.WaveMin < WaveMax) & (bool_wave_matches)
ind = np.where(bool_object_matches & bool_order_matches & bool_wave_matches)
#print(ind)
print("**Filtered File List**")
print(self.obslog.iloc[ind].Filename)
return self.obslog.iloc[ind].Filename
def FilterEngine(self, object, log, value, unc_lower, unc_upper, reference_id):
# Generic function to filter through the list of objects.
# Note: object should be a list or a numpy array type!
# First, find all the objects in our log that match the specified objects.
bool_object_matches = np.zeros(len(log.index),dtype=bool)
if object is None:
bool_object_matches = np.ones(len(log.index),dtype=bool)
elif (isinstance(object, np.ndarray) | isinstance(object, list)):
for thisobject in object:
bool_object_matches = (log.object == thisobject) | (bool_object_matches)
#print(bool_object_matches.sum())
else:
print("EDIBLES Oracle is Panicking in FilterEngine: don't know what I'm dealing with!")
# Next, find all the matches with the parameters -- but only if they are specified!
# Initialize a boolean array to match all entries in the sightline file.
# Then work through each of the criteria and add the corresponding filter criterion.
bool_value_matches = np.ones(len(log.index),dtype=bool)
#print(bool_value_matches)
if value is not None:
# Only keep sightline if the value is an exact match.
bool_value_matches = (log.value == value)
if unc_lower is not None:
bool_value_matches = (log.value > unc_lower) & bool_value_matches
if unc_upper is not None:
print(value)
print(unc_upper)
bool_value_matches = (log.value < unc_upper) & bool_value_matches
# Now process the references or "preferred" values.
# If reference is "All", we should not apply an additional filter.
# If reference is specified, filter on that reference.
# If no reference is specified, use the preferred value.
if reference_id is None:
bool_value_matches = (log.preferred_flag == 1) & bool_value_matches
elif reference_id=='All':
pass
else:
#check if proper ref. is given [1,2] for EBV, [3,4] fpr SpT.
bool_value_matches = (log.reference_id == reference_id) & bool_value_matches
bool_combined_matches = bool_object_matches & bool_value_matches
#ind = np.where(bool_combined_matches)
#matching_objects = log.object.values[ind]
matching_objects_df = log.loc[bool_combined_matches, ['object','value']]
print('getFilteredObslist: Found a total of ', bool_object_matches.sum(), ' object match(es).')
print('getFilteredObslist: Found a total of ', bool_value_matches.sum(), ' parameter match(es).')
print('getFilteredObslist: Found a total of ', bool_combined_matches.sum(), ' combined match(es).')
return matching_objects_df
def getFilteredObjects(self,object=None, Wave=None, \
EBV=None, EBV_min=None, EBV_max=None, EBV_reference=None, \
SpType=None, SpType_min=None, SpType_max=None, SpType_reference=None, \
WaveMin=None, WaveMax=None, LogNHI=None,LogNHI_min=None,LogNHI_max=None,\
LogNHI_reference=None,LogNHII=None,LogNHII_min=None,LogNHII_max=None, \
LogNHII_reference=None, fH2=None,fH2_min=None,fH2_max=None, \
fH2_reference=None, RV=None,RV_min=None,RV_max=None, \
RV_reference=None, AV=None,AV_min=None,AV_max=None, \
AV_reference=None):
'''This method will provide a filtered list of objects that match
the specified criteria on sightline/target parameters as well as
on observational criteria (e.g. wavelength range). This function consists
of two steps:
| 1. Find all targets that match specified target parameters. This is done
for each parameter using the FilterEngine function.
| 2. Find the objects that match all target specifications. '''
# STEP 1: Filter objects for each of the parameters -- but only if parameters are specified!
if (EBV or EBV_min or EBV_max or EBV_reference) is not None:
print("EBV")
matching_objects_ebv = self.FilterEngine(object, self.ebvlog, EBV, EBV_min, EBV_max, EBV_reference)
else:
matching_objects_ebv = self.object_log
if (SpType or SpType_min or SpType_max or SpType_reference) is not None:
print("SP_TYPE")
matching_objects_sptype = self.FilterEngine(object, self.sptypelog, SpType, SpType_min, SpType_max, SpType_reference)
else:
matching_objects_sptype = self.object_log
if (LogNHI or LogNHI_min or LogNHI_max or LogNHI_reference) is not None:
print("LogN(HI)")
matching_objects_lognhi = self.FilterEngine(object, self.nhilog, LogNHI, LogNHI_min, LogNHI_max, LogNHI_reference)
else:
matching_objects_lognhi = self.object_log
if (LogNHII or LogNHII_min or LogNHII_max or LogNHII_reference) is not None:
print("LogN(HII)")
matching_objects_lognhii = self.FilterEngine(object, self.nhiilog, LogNHII, LogNHII_min, LogNHII_max, LogNHII_reference)
else:
matching_objects_lognhii = self.object_log
if (fH2 or fH2_min or fH2_max or fH2_reference) is not None:
print("fH2")
matching_objects_fh2 = self.FilterEngine(object, self.fh2log, fH2, fH2_min, fH2_max, fH2_reference)
else:
matching_objects_fh2 = self.object_log
if (RV or RV_min or RV_max or RV_reference) is not None:
print("RV")
matching_objects_rv = self.FilterEngine(object, self.rvlog, RV, RV_min, RV_max, RV_reference)
else:
matching_objects_rv = self.object_log
if (AV or AV_min or AV_max or AV_reference) is not None:
print("AV")
matching_objects_av = self.FilterEngine(object, self.avlog, AV, AV_min, AV_max, AV_reference)
else:
matching_objects_av = self.object_log
# STEP 2: Find the common objects
ebv_objects = matching_objects_ebv['object']
sptype_objects = matching_objects_sptype['object']
lognhi_objects = matching_objects_lognhi['object']
lognhii_objects = matching_objects_lognhii['object']
fh2_objects = matching_objects_fh2['object']
rv_objects = matching_objects_rv['object']
av_objects = matching_objects_av['object']
#print(lognhi_objects.tolist())
#print(ebv_objects.tolist())
#print(sptype_objects.tolist())
##################
if object is None:
search_list = self.object_log["object"].to_list()
else:
search_list = object
common_objects_set = set(search_list).intersection(ebv_objects.to_list(),sptype_objects.to_list(),lognhi_objects.to_list(),lognhii_objects.to_list(),fh2_objects.to_list(),rv_objects.to_list(),av_objects.to_list())
###################
common_objects_list= list(common_objects_set)
print("***Common Objects***")
if len(common_objects_list) == 0:
print("None")
else:
print(common_objects_list)
return (common_objects_list)
def getFilteredObsList(self,object=None, Wave=None, MergedOnly=False, OrdersOnly=False,\
EBV=None, EBV_min=None, EBV_max=None, EBV_reference=None, \
SpType=None, SpType_min=None, SpType_max=None, SpType_reference=None, \
WaveMin=None, WaveMax=None, LogNHI=None,LogNHI_min=None,LogNHI_max=None,\
LogNHI_reference=None,LogNHII=None,LogNHII_min=None,LogNHII_max=None, \
LogNHII_reference=None, fH2=None,fH2_min=None,fH2_max=None, \
fH2_reference=None, RV=None,RV_min=None,RV_max=None, \
RV_reference=None, AV=None,AV_min=None,AV_max=None, \
AV_reference=None):
'''This method will provide a filtered list of observations that match
the specified criteria on sightline/target parameters as well as
on observational criteria (e.g. wavelength range). This function consists
of three steps:
| 1. Find all targets that match specified target parameters. This is done
for each parameter using the FilterEngine function.
| 2. Find the objects that match all target specifications.
| 3. Find the observations that match specified parameters for only these targets. '''
#print(getFilteredObslist.__dict__)
# STEP 1: Filter objects for each of the parameters -- but only if parameters are specified!
if (EBV or EBV_min or EBV_max or EBV_reference) is not None:
print("EBV")
matching_objects_ebv = self.FilterEngine(object, self.ebvlog, EBV, EBV_min, EBV_max, EBV_reference)
else:
matching_objects_ebv = self.object_log
if (SpType or SpType_min or SpType_max or SpType_reference) is not None:
print("SP_TYPE")
matching_objects_sptype = self.FilterEngine(object, self.sptypelog, SpType, SpType_min, SpType_max, SpType_reference)
else:
matching_objects_sptype = self.object_log
if (LogNHI or LogNHI_min or LogNHI_max or LogNHI_reference) is not None:
print("LogN(HI)")
matching_objects_lognhi = self.FilterEngine(object, self.nhilog, LogNHI, LogNHI_min, LogNHI_max, LogNHI_reference)
else:
matching_objects_lognhi = self.object_log
if (LogNHII or LogNHII_min or LogNHII_max or LogNHII_reference) is not None:
print("LogN(HII)")
matching_objects_lognhii = self.FilterEngine(object, self.nhiilog, LogNHII, LogNHII_min, LogNHII_max, LogNHII_reference)
else:
matching_objects_lognhii = self.object_log
if (fH2 or fH2_min or fH2_max or fH2_reference) is not None:
print("fH2")
matching_objects_fh2 = self.FilterEngine(object, self.fh2log, fH2, fH2_min, fH2_max, fH2_reference)
else:
matching_objects_fh2 = self.object_log
if (RV or RV_min or RV_max or RV_reference) is not None:
print("RV")
| |
car_data['labels']:
onehot[car_data['labels']['model'] + offset] = 1.0
offset += offsets[1]
if 'color' in car_data['labels']:
onehot[car_data['labels']['color'] + offset] = 1.0
offset += offsets[2]
if 'manufacturer' in car_data['labels']:
onehot[car_data['labels']['manufacturer'] + offset] = 1.0
offset += offsets[3]
if 'body' in car_data['labels']:
onehot[car_data['labels']['body'] + offset] = 1.0
offset += offsets[4]
if 'orientation' in car_data['labels']:
angle = rotation_angles[car_data['labels']['rotation']]
onehot[offset] = np.round(np.cos(angle), 4) * 10
onehot[offset + 1] = np.round(np.sin(angle), 4) * 10
# onehot[offset + 2] = 1.0 v8
# onehot[rotation_order[car_data['labels']['orientation']] + offset] = 1.0
# offset += len(orientations)
offset += offsets[5]
if 'ratio' in car_data['labels']:
onehot[car_data['labels']['ratio'] + offset] = 1.0
offset += offsets[6]
if 'background' in car_data['labels']:
onehot[car_data['labels']['background'] + offset] = 1.0
labels.append(onehot)
if add_labels:
tfr.add_labels(np.asarray(labels))
def create_from_images_oversample(
tfrecord_dir='',
image_dir='',
shuffle=False,
width=256,
height=256):
label_dir = 'E:/cars/labels_body-80_crop-multiple_update-orientation_update-body_background_update-rotation_v2_v3'
version = 'v7'
if version is 'v5':
offsets = [1, 67, 12, 18, 10, 8, 5, 6] # v5
if version is 'v7':
offsets = [1, 67, 12, 18, 10, 2, 5, 6] # v6, v7, v9
# offsets = [1, 67, 12, 18, 10, 3, 5, 6] # v8
num_images = len(os.listdir(label_dir))
# multipliers = [2.74, 1.0, 1.0, 2.51, 2.50, 4.29, 1.10, 1.44]
# multipliers = [3.085, 1.0, 1.0, 2.337, 2.733, 5.688, 1.175, 1.517]
multipliers = [3.069, 1.0, 1.0, 2.255, 2.706, 5.708, 1.158, 1.457]
with TFRecordExporter(tfrecord_dir, num_images) as tfr:
labels = []
for file in tqdm(sorted(os.listdir(label_dir))):
label_file = label_dir + '/' + file
with open(label_file) as json_file:
car_data = json.load(json_file)
repeat = 1
if 'rotation' in car_data['labels']:
multiplier = multipliers[car_data['labels']['rotation']]
repeat = int(np.floor(multiplier) + int(np.random.uniform() < (multiplier - np.floor(multiplier))))
for _ in range(repeat):
img_file = image_dir + '/' + file[:-5]
img = PIL.Image.open(img_file)
img = img.resize((width, height), PIL.Image.ANTIALIAS)
img = np.asarray(img)
img = img.transpose([2, 0, 1]) # HWC => CHW
canvas = np.zeros([3, width, width], dtype=np.uint8)
canvas[:, (width - height) // 2: (width + height) // 2] = img
tfr.add_image(img)
label_size = np.sum(offsets)
onehot = np.zeros(label_size, dtype=np.float32)
onehot[0] = 1.0
rotation_order = [0, 1, 7, 2, 6, 4, 3, 5] # v5
if version is 'v7':
pi = np.pi # v6
rotation_angles = [0, 0.25 * pi, 1.75 * pi, 0.5 * pi, 1.5 * pi, 1 * pi, 0.75 * pi, 1.25 * pi] # v6
rotation_angles = np.array(rotation_angles) # v6
offset = offsets[0]
if 'model' in car_data['labels']:
onehot[car_data['labels']['model'] + offset] = 1.0
offset += offsets[1]
if 'color' in car_data['labels']:
onehot[car_data['labels']['color'] + offset] = 1.0
offset += offsets[2]
if 'manufacturer' in car_data['labels']:
onehot[car_data['labels']['manufacturer'] + offset] = 1.0
offset += offsets[3]
if 'body' in car_data['labels']:
onehot[car_data['labels']['body'] + offset] = 1.0
offset += offsets[4]
if 'rotation' in car_data['labels']:
if version is 'v7':
angle = rotation_angles[car_data['labels']['rotation']] # v6
onehot[offset] = np.round(np.cos(angle), 4) # v6
onehot[offset + 1] = np.round(np.sin(angle), 4) # v6
if version is 'v5':
onehot[rotation_order[car_data['labels']['rotation']] + offset] = 1.0 # v5
offset += offsets[5]
if 'ratio' in car_data['labels']:
onehot[car_data['labels']['ratio'] + offset] = 1.0
offset += offsets[6]
if 'background' in car_data['labels']:
onehot[car_data['labels']['background'] + offset] = 1.0
labels.append(onehot)
tfr.add_labels(np.asarray(labels))
def create_from_images_oversample_classifier(
tfrecord_dir='./datasets/all_cars_all_labels',
image_dir='../modified_datasets/cars_flat_ratio_warnings',
shuffle=False,
width=256,
height=256
):
label_dir = 'E:/cars/labels_body-80_crop-multiple_update-orientation_update-body_background_update-rotation_v2_v3'
version = 'v5'
offsets = [1, 67, 12, 18, 10, 8, 5, 6] # v5
# offsets = [1, 67, 12, 18, 10, 2, 5, 6] # v6, v7, v9
# offsets = [1, 67, 12, 18, 10, 3, 5, 6] # v8
print('counting images')
num_images = len(os.listdir(label_dir))
add_images = True
add_labels = True
multipliers = [3.069, 1.0, 1.0, 2.255, 2.706, 5.708, 1.158, 1.457]
test_images = []
test_labels = []
test_tfrecord_dir = tfrecord_dir + '_test'
with TFRecordExporter(tfrecord_dir, num_images) as tfr:
labels = []
for file in tqdm(sorted(os.listdir(label_dir))):
test = np.random.uniform() < 0.15
label_file = label_dir + '/' + file
with open(label_file) as json_file:
car_data = json.load(json_file)
repeat = 1
if 'rotation' in car_data['labels']:
multiplier = multipliers[car_data['labels']['rotation']]
repeat = int(
np.floor(multiplier) + int(np.random.uniform() < (multiplier - np.floor(multiplier))))
for _ in range(repeat):
if add_images:
img_file = image_dir + '/' + file[:-5]
img = PIL.Image.open(img_file)
img = img.resize((width, height), PIL.Image.ANTIALIAS)
img = np.asarray(img)
img = img.transpose([2, 0, 1]) # HWC => CHW
canvas = np.zeros([3, width, width], dtype=np.uint8)
canvas[:, (width - height) // 2: (width + height) // 2] = img
if not test:
tfr.add_image(img)
else:
test_images.append(img)
if add_labels:
label_size = np.sum(offsets)
onehot = np.zeros(label_size, dtype=np.float32)
onehot[0] = 1.0
rotation_order = [0, 1, 7, 2, 6, 4, 3, 5] # v5
if version is 'v7':
pi = np.pi # v6
rotation_angles = [0, 0.25 * pi, 1.75 * pi, 0.5 * pi, 1.5 * pi, 1 * pi, 0.75 * pi,
1.25 * pi] # v6
rotation_angles = np.array(rotation_angles) # v6
offset = offsets[0]
if 'model' in car_data['labels']:
onehot[car_data['labels']['model'] + offset] = 1.0
offset += offsets[1]
if 'color' in car_data['labels']:
onehot[car_data['labels']['color'] + offset] = 1.0
offset += offsets[2]
if 'manufacturer' in car_data['labels']:
onehot[car_data['labels']['manufacturer'] + offset] = 1.0
offset += offsets[3]
if 'body' in car_data['labels']:
onehot[car_data['labels']['body'] + offset] = 1.0
offset += offsets[4]
if 'rotation' in car_data['labels']:
if version is 'v7':
angle = rotation_angles[car_data['labels']['rotation']] # v6
onehot[offset] = np.round(np.cos(angle), 4) # v6
onehot[offset + 1] = np.round(np.sin(angle), 4) # v6
if version is 'v5':
onehot[rotation_order[car_data['labels']['rotation']] + offset] = 1.0 # v5
offset += offsets[5]
if 'ratio' in car_data['labels']:
onehot[car_data['labels']['ratio'] + offset] = 1.0
offset += offsets[6]
if 'background' in car_data['labels']:
onehot[car_data['labels']['background'] + offset] = 1.0
if not test:
labels.append(onehot)
else:
test_labels.append(onehot)
if add_labels:
tfr.add_labels(np.asarray(labels))
with TFRecordExporter(test_tfrecord_dir, expected_images=len(test_images)) as tfr:
for img in test_images:
tfr.add_image(img)
tfr.add_labels(np.asarray(test_labels))
def create_classifier_dataset(
tfrecord_dir='./datasets/all_cars_all_labels',
image_dir='/Users/florian/PycharmProjects/stylegan2/datasets/images_256',
shuffle=False,
width=256,
height=256
):
label_dir = '/Users/florian/OneDrive/labels_body-80_crop-multiple_update-orientation_update-body_background_update-rotation_v2_v3'
offsets = [1, 67, 12, 18, 10, 8, 5, 6]
selected_label = 1
multipliers = []
label_names = [
'real/fake',
'model',
'color',
'manufacturer',
'body',
'rotation',
'ratio',
'background'
]
print('counting images')
num_images = len(os.listdir(label_dir))
add_images = True
add_labels = True
with TFRecordExporter(tfrecord_dir, num_images) as tfr:
labels = []
for file in tqdm(sorted(os.listdir(label_dir))):
repeat = 1
if 'rotation' in car_data['labels']:
multiplier = multipliers[car_data['labels']['rotation']]
repeat = int(np.floor(multiplier) + int(np.random.uniform() < (multiplier - np.floor(multiplier))))
for _ in range(repeat):
add_current_image = False
if add_labels:
label_file = label_dir + '/' + file
onehot = np.zeros(offsets[selected_label], dtype=np.float32)
with open(label_file) as json_file:
car_data = json.load(json_file)
if label_names[selected_label] in car_data['labels']:
onehot[car_data['labels'][label_names[selected_label]]] = 1.0
add_current_image = True
labels.append(onehot)
if add_images and add_current_image:
img_file = image_dir + '/' + file[:-5]
img = PIL.Image.open(img_file)
img = img.resize((width, height), PIL.Image.ANTIALIAS)
img = np.asarray(img)
img = img.transpose([2, 0, 1]) # HWC => CHW
canvas = np.zeros([3, width, width], dtype=np.uint8)
canvas[:, (width - height) // 2: (width + height) // 2] = img
tfr.add_image(img)
if add_labels:
tfr.add_labels(np.asarray(labels))
# ----------------------------------------------------------------------------
def execute_cmdline(argv):
prog = argv[0]
parser = argparse.ArgumentParser(
prog=prog,
description='Tool for creating multi-resolution TFRecords datasets for StyleGAN and ProGAN.',
epilog='Type "%s <command> -h" for more information.' % prog)
subparsers = parser.add_subparsers(dest='command')
subparsers.required = True
def add_command(cmd, desc, example=None):
epilog = 'Example: %s %s' % (prog, example) if example is not None else None
return subparsers.add_parser(cmd, description=desc, help=desc, epilog=epilog)
p = add_command('display', 'Display images in dataset.',
'display datasets/mnist')
p.add_argument('tfrecord_dir', help='Directory containing dataset')
p = add_command('extract', 'Extract images from dataset.',
'extract datasets/mnist mnist-images')
p.add_argument('tfrecord_dir', help='Directory containing dataset')
p.add_argument('output_dir', help='Directory to extract the images into')
p = add_command('compare', 'Compare two datasets.',
'compare datasets/mydataset datasets/mnist')
p.add_argument('tfrecord_dir_a', help='Directory containing first dataset')
p.add_argument('tfrecord_dir_b', help='Directory containing second dataset')
p.add_argument('--ignore_labels', help='Ignore labels (default: 0)', type=int, default=0)
p = add_command('create_mnist', 'Create dataset for MNIST.',
'create_mnist datasets/mnist ~/downloads/mnist')
p.add_argument('tfrecord_dir', help='New dataset directory to be created')
p.add_argument('mnist_dir', help='Directory containing MNIST')
p = add_command('create_mnistrgb', 'Create dataset for MNIST-RGB.',
'create_mnistrgb datasets/mnistrgb ~/downloads/mnist')
p.add_argument('tfrecord_dir', help='New dataset directory to be created')
p.add_argument('mnist_dir', help='Directory containing MNIST')
p.add_argument('--num_images', help='Number of composite images to create (default: 1000000)', type=int,
default=1000000)
p.add_argument('--random_seed', help='Random seed (default: 123)', type=int, default=123)
p = add_command('create_cifar10', 'Create dataset for CIFAR-10.',
'create_cifar10 datasets/cifar10 ~/downloads/cifar10')
p.add_argument('tfrecord_dir', help='New dataset directory to be created')
p.add_argument('cifar10_dir', help='Directory containing CIFAR-10')
p = add_command('create_cifar100', 'Create dataset for CIFAR-100.',
'create_cifar100 datasets/cifar100 ~/downloads/cifar100')
p.add_argument('tfrecord_dir', help='New dataset directory to be created')
p.add_argument('cifar100_dir', help='Directory containing CIFAR-100')
p = add_command('create_svhn', 'Create dataset for SVHN.',
'create_svhn datasets/svhn ~/downloads/svhn')
p.add_argument('tfrecord_dir', help='New dataset directory to be created')
p.add_argument('svhn_dir', help='Directory containing SVHN')
| |
# -*- coding: utf-8 -*-
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
from dash.exceptions import PreventUpdate
import pandas as pd
from app import app
import os
from plotly.colors import sequential, n_colors
import plotly.graph_objs as go
from plotly.subplots import make_subplots
import numpy as np
import json
from utilities import writeStyle
# ------------ #
# ------------ #
# DATA SETUP #
# ------------ #
# ------------ #
# styles = {
# 'pre': {
# 'border': 'thin lightgrey solid',
# 'overflowX': 'scroll'
# }
# }
# Directories
data_dir = 'data/quality'
quality_dir = os.path.dirname(__file__) # the cwd relative path of the script file
# script_dir = os.path.dirname(__file__) # the cwd relative path of the script file
# Filenames
participants_fn = os.path.join('data', 'participants.tsv')
fdallsubs_fn = os.path.join(data_dir, 'sub-all_task-all_run-all_desc-fdallsubs.tsv')
fdmean_fn = os.path.join(data_dir, 'sub-all_task-all_run-all_desc-fdmean.tsv')
fdsum_fn = os.path.join(data_dir, 'sub-all_task-all_run-all_desc-fdsum.tsv')
tsnrgm_fn = os.path.join(data_dir, 'sub-all_task-all_run-all_desc-tsnrgm.tsv')
tsnrwm_fn = os.path.join(data_dir, 'sub-all_task-all_run-all_desc-tsnrwm.tsv')
tsnrcsf_fn = os.path.join(data_dir, 'sub-all_task-all_run-all_desc-tsnrcsf.tsv')
tsnrbrain_fn = os.path.join(data_dir, 'sub-all_task-all_run-all_desc-tsnrbrain.tsv')
qc_summary_fn = os.path.join(data_dir, 'sub-all_task-all_desc-allQCmetrics.tsv')
# Get data
df_participants = pd.read_csv(participants_fn, sep='\t')
df_fdallsubs = pd.read_csv(fdallsubs_fn, sep='\t')
df_fdmean = pd.read_csv(fdmean_fn, sep='\t')
df_fdsum = pd.read_csv(fdsum_fn, sep='\t')
df_tsnrgm = pd.read_csv(tsnrgm_fn, sep='\t')
df_tsnrwm = pd.read_csv(tsnrwm_fn, sep='\t')
df_tsnrcsf = pd.read_csv(tsnrcsf_fn, sep='\t')
df_tsnrbrain = pd.read_csv(tsnrbrain_fn, sep='\t')
df_qcsummary = pd.read_csv(qc_summary_fn, sep='\t')
df_qcsummary = df_qcsummary.round(2)
# Dataset specifics
all_subs = list(df_participants['participant_id'])
tasks = ['rest', 'motor', 'emotion']
runs = ['1', '2']
cols_tasksruns = ['rest 1', 'motor 1', 'emotion 1', 'rest 2', 'motor 2', 'emotion 2']
sub_opts = [{'label': sub, 'value': sub} for sub in all_subs]
subs2 = ['sub-001', 'sub-010']
sub2_opts = [{'label': sub, 'value': sub} for sub in subs2]
task_opts = [{'label': task.capitalize(), 'value': task} for task in tasks]
run_opts = [{'label': 'Run '+run, 'value': run} for run in runs]
tasks_v2 = ['rest_run-1', 'fingerTapping', 'emotionProcessing', 'rest_run-2', 'fingerTappingImagined', 'emotionProcessingImagined']
tasks_v2_names = ['Rest 1', 'Finger tapping', 'Emotion processing', 'Rest 2', 'Finger tapping (imagined)', 'Emotion processing (imagined)']
task_opts_v2 = [{'label': tasks_v2_names[i], 'value': task} for i, task in enumerate(tasks_v2)]
tasks_1stlevel_v2 = ['fingerTapping', 'emotionProcessing', 'fingerTappingImagined', 'emotionProcessingImagined']
tasks_1stlevel_v2_names = ['Finger tapping', 'Emotion processing', 'Finger tapping (imagined)', 'Emotion processing (imagined)']
tasks_1stlevel_opts_v2 = [{'label': tasks_1stlevel_v2_names[i], 'value': task} for i, task in enumerate(tasks_1stlevel_v2)]
# Physiology data
respData = {}
cardData = {}
for i, task in enumerate(tasks_v2):
txt = 'task-' + task
respData[txt] = pd.read_csv(os.path.join(data_dir, 'sub-all_task-' + task + '_desc-physioResp.tsv'), sep='\t')
cardData[txt] = pd.read_csv(os.path.join(data_dir, 'sub-all_task-' + task + '_desc-physioCard.tsv'), sep='\t')
# QC summary data
max_rows = 200
qctable = html.Table([
html.Thead(
html.Tr([html.Th(col) for col in df_qcsummary.columns.values.tolist()])
),
html.Tbody([
html.Tr([
html.Td(df_qcsummary.iloc[i][col], style=writeStyle(col)) for col in df_qcsummary.columns],
) for i in range(min(len(df_qcsummary), max_rows))
]),
],
className='qcsummary',
)
# ------------ #
# ------------ #
# FIGURES #
# ------------ #
# ------------ #
# HEAD MOVEMENT TAB
# Fig 1
colors = n_colors('rgb(5, 200, 200)', 'rgb(200, 10, 10)', 28, colortype='rgb')
data = []
layout = go.Layout(
xaxis = dict(tickangle=45),
yaxis=dict(title='Framewise displacement (mm)', range=[-0.3, 3]),
margin={
't': 10,
}
)
fig = go.Figure(layout=layout)
i = 0
for colname, color in zip(all_subs, colors):
data.append(df_fdallsubs[colname].to_numpy())
fig.add_trace(go.Violin(y=data[i], line_color=color, name=colname, orientation='v', side='positive', width=1.8, points=False, box_visible=True, meanline_visible=True))
i += 1
fig.update_layout(xaxis_showgrid=False, xaxis_zeroline=False)
# Fig 2 (A) and (B)
fig2 = go.Figure()
fig2b = go.Figure()
# TSNR TAB
# Fig 3, 4, 4b
fig3 = go.Figure()
layout = go.Layout(
xaxis = dict(
tickmode = 'array',
tickvals = [0, 1, 2, 3, 4, 5],
ticktext = tasks_v2_names
),
yaxis=dict(title='Mean whole brain tSNR'),
margin={
't': 6,
}
)
fig4 = go.Figure(layout=layout)
dataTSNRmean = []
for i, sub in enumerate(all_subs):
X = np.arange(6)
Y1 = df_tsnrbrain.iloc[i].to_numpy()
fig4.add_trace(go.Scatter(x=X, y=Y1, mode='lines+markers', name=sub, hovertemplate='<b>' + sub + '</b>: %{x}<br>TSNR mean = %{y:.2f} mm<extra></extra>'))
# Fig 4b
layout = go.Layout(
yaxis = dict(title = 'tSNR'), # , range=[-0.5, 2]
margin = {
't': 10,
})
fig4b = go.Figure(layout=layout)
dataTSNRmean = []
for x, taskrun in enumerate(tasks_v2):
temp_dat = df_tsnrgm[taskrun].to_numpy()
dataTSNRmean.append(temp_dat)
fig4b.add_trace(go.Violin(y=dataTSNRmean[x], line_color=sequential.Inferno[3+x], name=tasks_v2_names[x], points='all', pointpos=-0.4, meanline_visible=True, width=1, side='positive', box_visible=True))
fig4b.update_layout(xaxis_showgrid=True, yaxis_showgrid=True, xaxis_zeroline=False, violinmode='group')
# PHYSIOLOGY TAB
# Fig 5 and 6
fig5 = go.Figure()
fig6 = go.Figure()
# -------------------------------------------------- #
# -------------------------------------------------- #
# -------------------------------------------------- #
# -------------------------------------------------- #
# -------------------------------------------------- #
# -------------------------------------------------- #
# ------------ #
# ------------ #
# LAYOUT #
# ------------ #
# ------------ #
main_md = dcc.Markdown('''
Hello!
''')
# The fMRwhy toolbox has a BIDS-compatible data quality pipeline for functional and anatomical MRI, fmrwhy_bids_workflowQC, that can be run automatically for a full BIDS-compliant dataset. After running minimal preprocessing steps it generates a subject-specific HTML-report with quality control metrics and visualizations to allow inspection of the data and its derivatives. Individual reports can be accessed in the derivatives directory of the shared BIDS-compliant dataset of this study (see Heunis et al., 2020 for details). Additionally, a web-application named rt-me-fMRI is provided along with this work and accessible at: (insert link when ready). This browser-based app can be used interactively to explore various summaries of data quality metrics, including distributions of framewise displacement (FD) and tSNR, and physiology recordings, as well as the results of this study.
# None of the participant datasets were excluded after inspection of the included quality metrics, even in cases of more than average or severe motion (specifically sub-010, sub-020, and sub-021), since such data could still be useful for data quality related insights or for future denoising methods validation.
layout = html.Div([
dcc.Store(id="store"),
html.Div([
dbc.Tabs(
[
dbc.Tab(label="Description", tab_id="description"),
dbc.Tab(label="Head movement", tab_id="head_movement"),
dbc.Tab(label="tSNR", tab_id="tsnr"),
dbc.Tab(label="Physiology", tab_id="physio"),
dbc.Tab(label="Tasks", tab_id="tasks"),
dbc.Tab(label="QC summary", tab_id="qc_summary"),
],
id="tabs",
active_tab="description",
),],
style={
'marginBottom': 25,
'marginTop': 25,
'marginLeft': '5%',
'maxWidth': '90%',
'textAlign': 'left'
}
),
html.Div(id="tab-content", className="p-4",
style={
'marginBottom': 25,
'marginTop': 25,
'marginLeft': '5%',
'maxWidth': '90%',
}),
])
# ------------ #
# ------------ #
# CALLBACKS #
# ------------ #
# ------------ #
# Callback for updating Fig2 based on Fig1 clickData
@app.callback(
[Output('fig2', 'figure'),
Output('fig2b', 'figure'),
Output('sub_fd', 'children')],
[Input('drop_subs_fd', 'value')]
)
def update_fd_persub_figs(selected_sub):
# if clickData is None:
# selected_sub = 'sub-001'
# # raise PreventUpdate
# else:
# selected_sub = clickData['points'][0]['x']
fd_fn = os.path.join(data_dir, selected_sub+'_task-all_run-all_desc-fd.tsv')
df_fd = pd.read_csv(fd_fn, sep='\t')
data = []
layout = go.Layout(yaxis=dict(title='Framewise displacement (mm)'))
fig2 = go.Figure(layout=layout)
fig2b = make_subplots(rows=6, cols=1, shared_xaxes=True,
vertical_spacing=0.02)
for i, colname in enumerate(tasks_v2):
data.append(df_fd[colname].to_numpy())
fig2.add_trace(go.Violin(y=data[i], line_color=sequential.Inferno[i+3], name=tasks_v2_names[i], orientation='v', side='positive', width=1.5, points=False, box_visible=True, meanline_visible=True))
fig2b.add_trace(go.Scatter(y=data[i], mode='lines', line = dict(color=sequential.Inferno[i+3], width=2), name=tasks_v2_names[i]), row=i+1, col=1)
fig2b.update_yaxes(title_text="", range=[0, 2], showticklabels=False, row=i+1, col=1)
if i == 3:
fig2b.update_yaxes(title_text="\t\tFramewise Displacement (0-2mm)", row=i+1, col=1)
if i == 5:
fig2b.update_xaxes(title_text="Functional volumes", row=i+1, col=1)
fig2.update_layout(xaxis_showgrid=False, xaxis_zeroline=False, margin = {'t': 10})
fig2b.update_layout(xaxis_showgrid=False, xaxis_zeroline=False, margin = {'t': 10})
new_heading = 'Framewise displacement distributions and timeseries: ' + selected_sub
return [fig2, fig2b, new_heading]
# # Callback for updating tsnr html and figure based on drop1, radio1, radio2 values
# @app.callback(
# Output('drop_subs_tsnr','value'),
# [Input('fig4', 'clickData')]
# )
# def reset_sub_tsnr_info(clickData):
# if clickData is None:
# raise PreventUpdate
# else:
# selected_sub = all_subs[clickData['points'][0]['curveNumber']]
# return selected_sub
@app.callback(
Output('fig4', 'figure'),
[Input('fig4', 'hoverData')]
)
def highlight_trace(hover_data):
# here you set the default settings
for trace in fig4.data:
trace["line"]["width"] = 2
# trace["opacity"] = 0.5
if hover_data:
trace_index = hover_data["points"][0]["curveNumber"]
fig4.data[trace_index]["line"]["width"] = 4.5
# fig4.data[trace_index]["opacity"] = 1
return fig4
# Callback for updating tsnr figure based on subject and task
@app.callback(
Output('fig3', 'figure'),
[Input('drop_subs_tsnr','value'),
Input('radio_tasks_tsnr','value')]
)
def reset_tsnr_imgs(sub, task):
braintsnr_tsv = os.path.join(data_dir, sub+'_task-' + task + '_echo-2_desc-rapreproc_braintsnr.tsv')
GMtsnr_tsv = os.path.join(data_dir, sub+'_task-' + task + '_echo-2_desc-rapreproc_GMtsnr.tsv')
WMtsnr_tsv = os.path.join(data_dir, sub+'_task-' + task + '_echo-2_desc-rapreproc_WMtsnr.tsv')
CSFtsnr_tsv = os.path.join(data_dir, sub+'_task-' + task + '_echo-2_desc-rapreproc_CSFtsnr.tsv')
df_braintsnr = pd.read_csv(braintsnr_tsv, sep='\t').dropna()
df_GMtsnr = pd.read_csv(GMtsnr_tsv, sep='\t').dropna()
df_WMtsnr = pd.read_csv(WMtsnr_tsv, sep='\t').dropna()
df_CSFtsnr = pd.read_csv(CSFtsnr_tsv, sep='\t').dropna()
dat1 = df_braintsnr['tsnr'].to_numpy()
dat2 = df_GMtsnr['tsnr'].to_numpy()
dat3 = df_WMtsnr['tsnr'].to_numpy()
dat4 = df_CSFtsnr['tsnr'].to_numpy()
layout = go.Layout(
yaxis = dict(title = 'Masks'),
xaxis=dict(title='Temporal signal-to-noise ratio (tSNR)', range=[-20, 250]),
# autosize=False,
# width=500,
margin={
't': 0,
})
fig3 = go.Figure(layout=layout)
fig3.add_trace(go.Violin(x=dat1, line_color=sequential.Inferno[5], name='Brain'))
fig3.add_trace(go.Violin(x=dat2, line_color=sequential.Inferno[6], name='GM'))
fig3.add_trace(go.Violin(x=dat3, line_color=sequential.Inferno[7], name='WM'))
fig3.add_trace(go.Violin(x=dat4, line_color=sequential.Inferno[8], name='CSF'))
fig3.update_traces(orientation='h', side='positive', width=3, points=False)
fig3.update_layout(xaxis_showgrid=True, yaxis_showgrid=True, xaxis_zeroline=False, legend={'traceorder':'reversed'})
return fig3
# Callback for updating cardiac and respiratory plot figures based on task
@app.callback(
[Output('fig5', 'figure'),
Output('fig6', 'figure'),
Output('resp_heading', 'children'),
Output('card_heading', 'children')],
[Input('radio3phys','value')]
)
def reset_phys_imgs(task):
txt = 'task-' + task
layout = go.Layout(
xaxis = dict(title = 'Time'),
yaxis=dict(title='Respiration signals (a.u.)', range=[-250, 10]),
margin={
't': 20,
})
fig5 = go.Figure(layout=layout)
layout = go.Layout(
xaxis = dict(title = 'Time'),
yaxis=dict(title='Cardiac signals (a.u.)', range=[-250, 10]),
margin={
't': 20,
})
fig6 = go.Figure(layout=layout)
for i, sub in enumerate(all_subs):
r = respData[txt][sub].to_numpy()
c = cardData[txt][sub].to_numpy()
yr = 5*r - i*9
yc = 5*c - i*9
fig5.add_trace(go.Scatter(y=yr, mode='lines', line = dict(width=2), name=sub))
fig6.add_trace(go.Scatter(y=yc, mode='lines', line = dict(width=1), name=sub))
fig5.update_layout(xaxis_showgrid=False, xaxis_zeroline=False, height=900)
fig6.update_layout(xaxis_showgrid=False, xaxis_zeroline=False, height=900)
fig5.update_yaxes(showticklabels=False)
fig6.update_yaxes(showticklabels=False)
resp_heading = 'Respiratory signals for all subjects: ' + task
card_heading = 'Cardiac signals for all subjects: ' + task
return [fig5, fig6, resp_heading, card_heading]
# ------------- #
# ------------- #
# LAYOUT UPDATE #
# ------------- #
# ------------- #
quality1_md = dcc.Markdown('''
The anatomical and functional data of all participants underwent standard preprocessing and quality control steps (depicted below),
using the open source MATLAB-based and Octave-compatible | |
Haskell'
ProgrammingLanguage__Java = 'Programming Language :: Java'
ProgrammingLanguage__JavaScript = 'Programming Language :: JavaScript'
ProgrammingLanguage__Lisp = 'Programming Language :: Lisp'
ProgrammingLanguage__Logo = 'Programming Language :: Logo'
ProgrammingLanguage__ML = 'Programming Language :: ML'
ProgrammingLanguage__Modula = 'Programming Language :: Modula'
ProgrammingLanguage__OCaml = 'Programming Language :: OCaml'
ProgrammingLanguage__ObjectPascal = 'Programming Language :: Object Pascal'
ProgrammingLanguage__ObjectiveC = 'Programming Language :: Objective C'
ProgrammingLanguage__Other = 'Programming Language :: Other'
ProgrammingLanguage__OtherScriptingEngines = 'Programming Language :: Other Scripting Engines'
ProgrammingLanguage__PHP = 'Programming Language :: PHP'
ProgrammingLanguage__PL_SQL = 'Programming Language :: PL/SQL'
ProgrammingLanguage__PROGRESS = 'Programming Language :: PROGRESS'
ProgrammingLanguage__Pascal = 'Programming Language :: Pascal'
ProgrammingLanguage__Perl = 'Programming Language :: Perl'
ProgrammingLanguage__Pike = 'Programming Language :: Pike'
ProgrammingLanguage__Pliant = 'Programming Language :: Pliant'
ProgrammingLanguage__Prolog = 'Programming Language :: Prolog'
ProgrammingLanguage__Python = 'Programming Language :: Python'
ProgrammingLanguage__Python__2 = 'Programming Language :: Python :: 2'
ProgrammingLanguage__Python__2__Only = 'Programming Language :: Python :: 2 :: Only'
ProgrammingLanguage__Python__23 = 'Programming Language :: Python :: 2.3'
ProgrammingLanguage__Python__24 = 'Programming Language :: Python :: 2.4'
ProgrammingLanguage__Python__25 = 'Programming Language :: Python :: 2.5'
ProgrammingLanguage__Python__26 = 'Programming Language :: Python :: 2.6'
ProgrammingLanguage__Python__27 = 'Programming Language :: Python :: 2.7'
ProgrammingLanguage__Python__3 = 'Programming Language :: Python :: 3'
ProgrammingLanguage__Python__3__Only = 'Programming Language :: Python :: 3 :: Only'
ProgrammingLanguage__Python__30 = 'Programming Language :: Python :: 3.0'
ProgrammingLanguage__Python__31 = 'Programming Language :: Python :: 3.1'
ProgrammingLanguage__Python__32 = 'Programming Language :: Python :: 3.2'
ProgrammingLanguage__Python__33 = 'Programming Language :: Python :: 3.3'
ProgrammingLanguage__Python__34 = 'Programming Language :: Python :: 3.4'
ProgrammingLanguage__Python__35 = 'Programming Language :: Python :: 3.5'
ProgrammingLanguage__Python__36 = 'Programming Language :: Python :: 3.6'
ProgrammingLanguage__Python__37 = 'Programming Language :: Python :: 3.7'
ProgrammingLanguage__Python__38 = 'Programming Language :: Python :: 3.8'
ProgrammingLanguage__Python__39 = 'Programming Language :: Python :: 3.9'
ProgrammingLanguage__Python__Implementation = 'Programming Language :: Python :: Implementation'
ProgrammingLanguage__Python__Implementation__CPython = 'Programming Language :: Python :: Implementation :: CPython'
ProgrammingLanguage__Python__Implementation__IronPython = \
'Programming Language :: Python :: Implementation :: IronPython'
ProgrammingLanguage__Python__Implementation__Jython = 'Programming Language :: Python :: Implementation :: Jython'
ProgrammingLanguage__Python__Implementation__MicroPython = \
'Programming Language :: Python :: Implementation :: MicroPython'
ProgrammingLanguage__Python__Implementation__PyPy = 'Programming Language :: Python :: Implementation :: PyPy'
ProgrammingLanguage__Python__Implementation__Stackless = \
'Programming Language :: Python :: Implementation :: Stackless'
ProgrammingLanguage__R = 'Programming Language :: R'
ProgrammingLanguage__REBOL = 'Programming Language :: REBOL'
ProgrammingLanguage__Rexx = 'Programming Language :: Rexx'
ProgrammingLanguage__Ruby = 'Programming Language :: Ruby'
ProgrammingLanguage__Rust = 'Programming Language :: Rust'
ProgrammingLanguage__SQL = 'Programming Language :: SQL'
ProgrammingLanguage__Scheme = 'Programming Language :: Scheme'
ProgrammingLanguage__Simula = 'Programming Language :: Simula'
ProgrammingLanguage__Smalltalk = 'Programming Language :: Smalltalk'
ProgrammingLanguage__Tcl = 'Programming Language :: Tcl'
ProgrammingLanguage__UnixShell = 'Programming Language :: Unix Shell'
ProgrammingLanguage__VisualBasic = 'Programming Language :: Visual Basic'
ProgrammingLanguage__XBasic = 'Programming Language :: XBasic'
ProgrammingLanguage__YACC = 'Programming Language :: YACC'
ProgrammingLanguage__Zope = 'Programming Language :: Zope'
Topic__AdaptiveTechnologies = 'Topic :: Adaptive Technologies'
Topic__ArtisticSoftware = 'Topic :: Artistic Software'
Topic__Communications = 'Topic :: Communications'
Topic__Communications__BBS = 'Topic :: Communications :: BBS'
Topic__Communications__Chat = 'Topic :: Communications :: Chat'
Topic__Communications__Chat__ICQ = 'Topic :: Communications :: Chat :: ICQ'
Topic__Communications__Chat__InternetRelayChat = 'Topic :: Communications :: Chat :: Internet Relay Chat'
Topic__Communications__Chat__UnixTalk = 'Topic :: Communications :: Chat :: Unix Talk'
Topic__Communications__Conferencing = 'Topic :: Communications :: Conferencing'
Topic__Communications__Email = 'Topic :: Communications :: Email'
Topic__Communications__Email__AddressBook = 'Topic :: Communications :: Email :: Address Book'
Topic__Communications__Email__EmailClients_MUA_ = 'Topic :: Communications :: Email :: Email Clients (MUA)'
Topic__Communications__Email__Filters = 'Topic :: Communications :: Email :: Filters'
Topic__Communications__Email__MailTransportAgents = 'Topic :: Communications :: Email :: Mail Transport Agents'
Topic__Communications__Email__MailingListServers = 'Topic :: Communications :: Email :: Mailing List Servers'
Topic__Communications__Email__Post_Office = 'Topic :: Communications :: Email :: Post-Office'
Topic__Communications__Email__Post_Office__IMAP = 'Topic :: Communications :: Email :: Post-Office :: IMAP'
Topic__Communications__Email__Post_Office__POP3 = 'Topic :: Communications :: Email :: Post-Office :: POP3'
Topic__Communications__FIDO = 'Topic :: Communications :: FIDO'
Topic__Communications__Fax = 'Topic :: Communications :: Fax'
Topic__Communications__FileSharing = 'Topic :: Communications :: File Sharing'
Topic__Communications__FileSharing__Gnutella = 'Topic :: Communications :: File Sharing :: Gnutella'
Topic__Communications__FileSharing__Napster = 'Topic :: Communications :: File Sharing :: Napster'
Topic__Communications__HamRadio = 'Topic :: Communications :: Ham Radio'
Topic__Communications__InternetPhone = 'Topic :: Communications :: Internet Phone'
Topic__Communications__Telephony = 'Topic :: Communications :: Telephony'
Topic__Communications__UsenetNews = 'Topic :: Communications :: Usenet News'
Topic__Database = 'Topic :: Database'
Topic__Database__DatabaseEngines_Servers = 'Topic :: Database :: Database Engines/Servers'
Topic__Database__Front_Ends = 'Topic :: Database :: Front-Ends'
Topic__DesktopEnvironment = 'Topic :: Desktop Environment'
Topic__DesktopEnvironment__FileManagers = 'Topic :: Desktop Environment :: File Managers'
Topic__DesktopEnvironment__GNUstep = 'Topic :: Desktop Environment :: GNUstep'
Topic__DesktopEnvironment__Gnome = 'Topic :: Desktop Environment :: Gnome'
Topic__DesktopEnvironment__KDesktopEnvironment_KDE_ = 'Topic :: Desktop Environment :: K Desktop Environment (KDE)'
Topic__DesktopEnvironment__KDesktopEnvironment_KDE___Themes = \
'Topic :: Desktop Environment :: K Desktop Environment (KDE) :: Themes'
Topic__DesktopEnvironment__PicoGUI = 'Topic :: Desktop Environment :: PicoGUI'
Topic__DesktopEnvironment__PicoGUI__Applications = 'Topic :: Desktop Environment :: PicoGUI :: Applications'
Topic__DesktopEnvironment__PicoGUI__Themes = 'Topic :: Desktop Environment :: PicoGUI :: Themes'
Topic__DesktopEnvironment__ScreenSavers = 'Topic :: Desktop Environment :: Screen Savers'
Topic__DesktopEnvironment__WindowManagers = 'Topic :: Desktop Environment :: Window Managers'
Topic__DesktopEnvironment__WindowManagers__Afterstep = 'Topic :: Desktop Environment :: Window Managers :: Afterstep'
Topic__DesktopEnvironment__WindowManagers__Afterstep__Themes = \
'Topic :: Desktop Environment :: Window Managers :: Afterstep :: Themes'
Topic__DesktopEnvironment__WindowManagers__Applets = 'Topic :: Desktop Environment :: Window Managers :: Applets'
Topic__DesktopEnvironment__WindowManagers__Blackbox = 'Topic :: Desktop Environment :: Window Managers :: Blackbox'
Topic__DesktopEnvironment__WindowManagers__Blackbox__Themes = \
'Topic :: Desktop Environment :: Window Managers :: Blackbox :: Themes'
Topic__DesktopEnvironment__WindowManagers__CTWM = 'Topic :: Desktop Environment :: Window Managers :: CTWM'
Topic__DesktopEnvironment__WindowManagers__CTWM__Themes = \
'Topic :: Desktop Environment :: Window Managers :: CTWM :: Themes'
Topic__DesktopEnvironment__WindowManagers__Enlightenment = \
'Topic :: Desktop Environment :: Window Managers :: Enlightenment'
Topic__DesktopEnvironment__WindowManagers__Enlightenment__Epplets = \
'Topic :: Desktop Environment :: Window Managers :: Enlightenment :: Epplets'
Topic__DesktopEnvironment__WindowManagers__Enlightenment__ThemesDR15 = \
'Topic :: Desktop Environment :: Window Managers :: Enlightenment :: Themes DR15'
Topic__DesktopEnvironment__WindowManagers__Enlightenment__ThemesDR16 = \
'Topic :: Desktop Environment :: Window Managers :: Enlightenment :: Themes DR16'
Topic__DesktopEnvironment__WindowManagers__Enlightenment__ThemesDR17 = \
'Topic :: Desktop Environment :: Window Managers :: Enlightenment :: Themes DR17'
Topic__DesktopEnvironment__WindowManagers__FVWM = 'Topic :: Desktop Environment :: Window Managers :: FVWM'
Topic__DesktopEnvironment__WindowManagers__FVWM__Themes = \
'Topic :: Desktop Environment :: Window Managers :: FVWM :: Themes'
Topic__DesktopEnvironment__WindowManagers__Fluxbox = 'Topic :: Desktop Environment :: Window Managers :: Fluxbox'
Topic__DesktopEnvironment__WindowManagers__Fluxbox__Themes = \
'Topic :: Desktop Environment :: Window Managers :: Fluxbox :: Themes'
Topic__DesktopEnvironment__WindowManagers__IceWM = 'Topic :: Desktop Environment :: Window Managers :: IceWM'
Topic__DesktopEnvironment__WindowManagers__IceWM__Themes = \
'Topic :: Desktop Environment :: Window Managers :: IceWM :: Themes'
Topic__DesktopEnvironment__WindowManagers__MetaCity = 'Topic :: Desktop Environment :: Window Managers :: MetaCity'
Topic__DesktopEnvironment__WindowManagers__MetaCity__Themes = \
'Topic :: Desktop Environment :: Window Managers :: MetaCity :: Themes'
Topic__DesktopEnvironment__WindowManagers__Oroborus = 'Topic :: Desktop Environment :: Window Managers :: Oroborus'
Topic__DesktopEnvironment__WindowManagers__Oroborus__Themes = \
'Topic :: Desktop Environment :: Window Managers :: Oroborus :: Themes'
Topic__DesktopEnvironment__WindowManagers__Sawfish = 'Topic :: Desktop Environment :: Window Managers :: Sawfish'
Topic__DesktopEnvironment__WindowManagers__Sawfish__Themes030 = \
'Topic :: Desktop Environment :: Window Managers :: Sawfish :: Themes 0.30'
Topic__DesktopEnvironment__WindowManagers__Sawfish__Themespre_030 = \
'Topic :: Desktop Environment :: Window Managers :: Sawfish :: Themes pre-0.30'
Topic__DesktopEnvironment__WindowManagers__Waimea = 'Topic :: Desktop Environment :: Window Managers :: Waimea'
Topic__DesktopEnvironment__WindowManagers__Waimea__Themes = \
'Topic :: Desktop Environment :: Window Managers :: Waimea :: Themes'
Topic__DesktopEnvironment__WindowManagers__WindowMaker = \
'Topic :: Desktop Environment :: Window Managers :: Window Maker'
Topic__DesktopEnvironment__WindowManagers__WindowMaker__Applets = \
'Topic :: Desktop Environment :: Window Managers :: Window Maker :: Applets'
Topic__DesktopEnvironment__WindowManagers__WindowMaker__Themes = \
'Topic :: Desktop Environment :: Window Managers :: Window Maker :: Themes'
Topic__DesktopEnvironment__WindowManagers__XFCE = 'Topic :: Desktop Environment :: Window Managers :: XFCE'
Topic__DesktopEnvironment__WindowManagers__XFCE__Themes = \
'Topic :: Desktop Environment :: Window Managers :: XFCE :: Themes'
Topic__Documentation = 'Topic :: Documentation'
Topic__Documentation__Sphinx = 'Topic :: Documentation :: Sphinx'
Topic__Education = 'Topic :: Education'
Topic__Education__ComputerAidedInstruction_CAI_ = 'Topic :: Education :: Computer Aided Instruction (CAI)'
Topic__Education__Testing = 'Topic :: Education :: Testing'
Topic__Games_Entertainment = 'Topic :: Games/Entertainment'
Topic__Games_Entertainment__Arcade = 'Topic :: Games/Entertainment :: Arcade'
Topic__Games_Entertainment__BoardGames = 'Topic :: Games/Entertainment :: Board Games'
Topic__Games_Entertainment__FirstPersonShooters = 'Topic :: Games/Entertainment :: First Person Shooters'
Topic__Games_Entertainment__FortuneCookies = 'Topic :: Games/Entertainment :: Fortune Cookies'
Topic__Games_Entertainment__Multi_UserDungeons_MUD_ = 'Topic :: Games/Entertainment :: Multi-User Dungeons (MUD)'
Topic__Games_Entertainment__PuzzleGames = 'Topic :: Games/Entertainment :: Puzzle Games'
Topic__Games_Entertainment__RealTimeStrategy = 'Topic :: Games/Entertainment :: Real Time Strategy'
Topic__Games_Entertainment__Role_Playing = 'Topic :: Games/Entertainment :: Role-Playing'
Topic__Games_Entertainment__Side_Scrolling_ArcadeGames = 'Topic :: Games/Entertainment :: Side-Scrolling/Arcade Games'
Topic__Games_Entertainment__Simulation = 'Topic :: Games/Entertainment :: Simulation'
Topic__Games_Entertainment__TurnBasedStrategy = 'Topic :: Games/Entertainment :: Turn Based Strategy'
Topic__HomeAutomation = 'Topic :: Home Automation'
Topic__Internet = 'Topic :: Internet'
Topic__Internet__FileTransferProtocol_FTP_ = 'Topic :: Internet :: File Transfer Protocol (FTP)'
Topic__Internet__Finger = 'Topic :: Internet :: Finger'
Topic__Internet__LogAnalysis = 'Topic :: Internet :: Log Analysis'
Topic__Internet__NameService_DNS_ = 'Topic :: Internet :: Name Service (DNS)'
Topic__Internet__ProxyServers = 'Topic :: Internet :: Proxy Servers'
Topic__Internet__WAP = 'Topic :: Internet :: WAP'
Topic__Internet__WWW_HTTP = 'Topic :: Internet :: WWW/HTTP'
Topic__Internet__WWW_HTTP__Browsers = 'Topic :: Internet :: WWW/HTTP :: Browsers'
Topic__Internet__WWW_HTTP__DynamicContent = 'Topic :: Internet :: WWW/HTTP :: Dynamic Content'
Topic__Internet__WWW_HTTP__DynamicContent__CGITools_Libraries = \
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries'
Topic__Internet__WWW_HTTP__DynamicContent__ContentManagementSystem = \
| |
<reponame>daisyden/lpot
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from collections import Counter
from lpot.utils.utility import LazyImport, singleton
from ..utils import logger
from sklearn.metrics import accuracy_score
import numpy as np
torch_ignite = LazyImport('ignite')
torch = LazyImport('torch')
tf = LazyImport('tensorflow')
mx = LazyImport('mxnet')
@singleton
class TensorflowMetrics(object):
def __init__(self):
self.metrics = {}
self.metrics.update(TENSORFLOW_METRICS)
@singleton
class PyTorchMetrics(object):
def __init__(self):
self.metrics = {
"topk": WrapPyTorchMetric(
torch_ignite.metrics.TopKCategoricalAccuracy),
"Accuracy": WrapPyTorchMetric(
torch_ignite.metrics.Accuracy),
"Loss": WrapPyTorchMetric(
torch_ignite.metrics.Loss),
"MAE": WrapPyTorchMetric(
torch_ignite.metrics.MeanAbsoluteError),
"RMSE": WrapPyTorchMetric(
torch_ignite.metrics.RootMeanSquaredError),
"MSE": WrapPyTorchMetric(
torch_ignite.metrics.MeanSquaredError),
}
self.metrics.update(PYTORCH_METRICS)
@singleton
class MXNetMetrics(object):
def __init__(self):
self.metrics = {
"Accuracy": WrapMXNetMetric(mx.metric.Accuracy),
"MAE": WrapMXNetMetric(mx.metric.MAE),
"MSE": WrapMXNetMetric(mx.metric.MSE),
"Loss": WrapMXNetMetric(mx.metric.Loss),
}
self.metrics.update(MXNET_METRICS)
@singleton
class ONNXRTQLMetrics(object):
def __init__(self):
self.metrics = {}
self.metrics.update(ONNXRT_QL_METRICS)
@singleton
class ONNXRTITMetrics(object):
def __init__(self):
self.metrics = {}
self.metrics.update(ONNXRT_IT_METRICS)
framework_metrics = {"tensorflow": TensorflowMetrics,
"mxnet": MXNetMetrics,
"pytorch": PyTorchMetrics,
"pytorch_ipex": PyTorchMetrics,
"onnxrt_qlinearops": ONNXRTQLMetrics,
"onnxrt_integerops": ONNXRTITMetrics}
# user/model specific metrics will be registered here
TENSORFLOW_METRICS = {}
MXNET_METRICS = {}
PYTORCH_METRICS = {}
ONNXRT_QL_METRICS = {}
ONNXRT_IT_METRICS = {}
registry_metrics = {"tensorflow": TENSORFLOW_METRICS,
"mxnet": MXNET_METRICS,
"pytorch": PYTORCH_METRICS,
"pytorch_ipex": PYTORCH_METRICS,
"onnxrt_qlinearops": ONNXRT_QL_METRICS,
"onnxrt_integerops": ONNXRT_IT_METRICS}
class METRICS(object):
def __init__(self, framework):
assert framework in ("tensorflow", "pytorch", "pytorch_ipex", "onnxrt_qlinearops",
"onnxrt_integerops", "mxnet"), \
"framework support tensorflow pytorch mxnet onnxrt"
self.metrics = framework_metrics[framework]().metrics
def __getitem__(self, metric_type):
assert metric_type in self.metrics.keys(), "only support metrics in {}".\
format(self.metrics.keys())
return self.metrics[metric_type]
def register(self, name, metric_cls):
assert name not in self.metrics.keys(), 'registered metric name already exists.'
self.metrics.update({name: metric_cls})
def metric_registry(metric_type, framework):
"""The class decorator used to register all Metric subclasses.
cross framework metric is supported by add param as framework='tensorflow, \
pytorch, mxnet, onnxrt'
Args:
cls (class): The class of register.
Returns:
cls: The class of register.
"""
def decorator_metric(cls):
for single_framework in [fwk.strip() for fwk in framework.split(',')]:
assert single_framework in [
"tensorflow",
"mxnet",
"onnxrt_qlinearops",
"onnxrt_integerops",
"pytorch",
"pytorch_ipex"], "The framework support tensorflow mxnet pytorch onnxrt"
if metric_type in registry_metrics[single_framework].keys():
raise ValueError('Cannot have two metrics with the same name')
registry_metrics[single_framework][metric_type] = cls
return cls
return decorator_metric
class Metric(object):
def __init__(self, metric, single_output=False):
self._metric_cls = metric
self._single_output = single_output
def __call__(self, *args, **kwargs):
self._metric = self._metric_cls(*args, **kwargs)
return self
@abstractmethod
def update(self, preds, labels=None, sample_weight=None):
raise NotImplementedError
@abstractmethod
def reset(self):
raise NotImplementedError
@abstractmethod
def result(self):
raise NotImplementedError
@property
def metric(self):
return self._metric
class WrapPyTorchMetric(Metric):
def update(self, preds, labels=None, sample_weight=None):
if self._single_output:
output = torch.as_tensor(preds)
else:
output = (torch.as_tensor(preds), torch.as_tensor(labels))
self._metric.update(output)
def reset(self):
self._metric.reset()
def result(self):
return self._metric.compute()
class WrapMXNetMetric(Metric):
def update(self, preds, labels=None, sample_weight=None):
preds = mx.nd.array(preds)
labels = mx.nd.array(labels)
self._metric.update(labels=labels, preds=preds)
def reset(self):
self._metric.reset()
def result(self):
acc_name, acc = self._metric.get()
return acc
class WrapONNXRTMetric(Metric):
def update(self, preds, labels=None, sample_weight=None):
preds = np.array(preds)
labels = np.array(labels)
self._metric.update(labels=labels, preds=preds)
def reset(self):
self._metric.reset()
def result(self):
acc_name, acc = self._metric.get()
return acc
def _topk_shape_validate(preds, labels):
# preds shape can be Nxclass_num or class_num(N=1 by default)
# it's more suitable for 'Accuracy' with preds shape Nx1(or 1) output from argmax
if isinstance(preds, int):
preds = [preds]
preds = np.array(preds)
# consider labels just int value 1x1
if isinstance(labels, int):
labels = [labels]
# labels most have 2 axis, 2 cases: N(or Nx1 sparse) or Nxclass_num(one-hot)
# only support 2 dimension one-shot labels
# or 1 dimension one-hot class_num will confuse with N
labels = np.array(labels)
if len(preds.shape) == 1:
N = 1
class_num = preds.shape[0]
preds = preds.reshape([-1, class_num])
elif len(preds.shape) >= 2:
N = preds.shape[0]
preds = preds.reshape([N, -1])
class_num = preds.shape[1]
label_N = labels.shape[0]
assert label_N == N, 'labels batch size should same with preds'
labels = labels.reshape([N, -1])
# one-hot labels will have 2 dimension not equal 1
if labels.shape[1] != 1:
labels = labels.argsort()[..., -1:]
return preds, labels
def _shape_validate(preds, labels):
if isinstance(preds, int):
preds = [preds]
preds = np.array(preds)
if isinstance(labels, int):
labels = [labels]
labels = np.array(labels)
assert preds.shape == labels.shape, 'shape of labels {} does not match \
shape of predictions {}'.format(labels.shape, preds.shape)
return preds, labels
@metric_registry('topk', 'mxnet')
class MxnetTopK(Metric):
"""The class of calculating topk metric, which usually is used in classification.
Args:
topk (dict): The dict of topk for configuration.
"""
def __init__(self, k=1):
self.k = k
self.num_correct = 0
self.num_sample = 0
def update(self, preds, labels, sample_weight=None):
preds, labels = _topk_shape_validate(preds, labels)
preds = preds.argsort()[..., -self.k:]
if self.k == 1:
correct = accuracy_score(preds, labels, normalize=False)
self.num_correct += correct
else:
for p, l in zip(preds, labels):
# get top-k labels with np.argpartition
# p = np.argpartition(p, -self.k)[-self.k:]
l = l.astype('int32')
if l in p:
self.num_correct += 1
self.num_sample += len(labels)
def reset(self):
self.num_correct = 0
self.num_sample = 0
def result(self):
if self.num_sample == 0:
logger.warning("sample num is 0 can't calculate topk")
return 0
else:
return self.num_correct / self.num_sample
@metric_registry('F1', 'tensorflow, pytorch, mxnet, onnxrt_qlinearops, onnxrt_integerops')
class F1(Metric):
def __init__(self):
self._score_list = []
def update(self, preds, labels):
from .f1 import f1_score
result = f1_score(preds, labels)
self._score_list.append(result)
def reset(self):
self._score_list = []
def result(self):
return np.array(self._score_list).mean()
def _accuracy_shape_check(preds, labels):
if isinstance(preds, int):
preds = [preds]
preds = np.array(preds)
if isinstance(labels, int):
labels = [labels]
labels = np.array(labels)
if len(labels.shape) != len(preds.shape) and len(labels.shape)+1 != len(preds.shape):
raise ValueError(
'labels must have shape of (batch_size, ..) and preds must have'
'shape of (batch_size, num_classes, ...) or (batch_size, ..),'
'but given {} and {}.'.format(labels.shape, preds.shape))
return preds, labels
def _accuracy_type_check(preds, labels):
if len(preds.shape) == len(labels.shape)+1:
num_classes = preds.shape[1]
if num_classes == 1:
update_type = 'binary'
else:
update_type = 'multiclass'
elif len(preds.shape) == len(labels.shape):
if len(preds.shape) == 1 or preds.shape[1] ==1:
update_type = 'binary'
else:
update_type = 'multilabel'
return update_type
@metric_registry('Accuracy', 'tensorflow, onnxrt_qlinearops, onnxrt_integerops')
class Accuracy(Metric):
def __init__(self):
self.pred_list = []
self.label_list = []
self.sample = 0
def update(self, preds, labels, sample_weight=None):
preds, labels = _accuracy_shape_check(preds, labels)
update_type = _accuracy_type_check(preds, labels)
if update_type == 'binary':
self.pred_list.extend(preds)
self.label_list.extend(labels)
self.sample += labels.shape[0]
elif update_type == 'multiclass':
self.pred_list.extend(np.argmax(preds, axis=1).astype('int32'))
self.label_list.extend(labels)
self.sample += labels.shape[0]
elif update_type == 'multilabel':
#(N, C, ...) -> (N*..., C)
num_label = preds.shape[1]
last_dim = len(preds.shape)
if last_dim-1 != 1:
trans_list = [0]
trans_list.extend(list(range(2, len(preds.shape))))
trans_list.extend([1])
preds = preds.transpose(trans_list).reshape(-1, num_label)
labels = labels.transpose(trans_list).reshape(-1, num_label)
self.sample += preds.shape[0]*preds.shape[1]
self.pred_list.append(preds)
self.label_list.append(labels)
def reset(self):
self.pred_list = []
self.label_list = []
self.sample = 0
def result(self):
correct_num = np.sum(
np.array(self.pred_list) == np.array(self.label_list))
return correct_num / self.sample
@metric_registry('Loss', 'tensorflow, onnxrt_qlinearops, onnxrt_integerops')
class Loss(Metric):
def __init__(self):
self.sample = 0
self.sum = 0
def update(self, preds, labels, sample_weight=None):
preds, labels = _shape_validate(preds, labels)
self.sample += labels.shape[0]
self.sum += sum(preds)
def reset(self):
self.sample = 0
self.sum = 0
def result(self):
return self.sum / self.sample
@metric_registry('MAE', 'tensorflow, onnxrt_qlinearops, onnxrt_integerops')
class MAE(Metric):
def __init__(self):
self.label_list = []
self.pred_list = []
def update(self, preds, labels, sample_weight=None):
preds, labels = _shape_validate(preds, labels)
self.label_list.extend(labels)
self.pred_list.extend(preds)
def reset(self):
self.label_list = []
self.pred_list = []
def result(self):
ae = [abs(a-b) for (a,b) in zip(self.label_list, self.pred_list)]
return np.mean(ae)
@metric_registry('RMSE', 'tensorflow, mxnet, onnxrt_qlinearops, onnxrt_integerops')
class RMSE(Metric):
def __init__(self):
self.mse = MSE()
def update(self, preds, labels, sample_weight=None):
self.mse.update(preds, labels, sample_weight)
def reset(self):
self.mse.reset()
def result(self):
return np.sqrt(self.mse.result())
@metric_registry('MSE', 'tensorflow, onnxrt_qlinearops, onnxrt_integerops')
class MSE(Metric):
def __init__(self):
self.label_list = []
self.pred_list = []
def update(self, preds, labels, sample_weight=None):
preds, labels = _shape_validate(preds, labels)
self.pred_list.extend(preds)
self.label_list.extend(labels)
def reset(self):
self.label_list = []
self.pred_list = []
def result(self):
square = [(a-b)**2.0 for (a,b) in zip(self.label_list, self.pred_list)]
return np.mean(square)
@metric_registry('topk', 'tensorflow')
class TensorflowTopK(Metric):
"""The class of calculating topk metric, which usually is used in classification.
Args:
topk (dict): The dict of topk for configuration.
"""
def __init__(self, k=1):
self.k = k
self.num_correct = 0
self.num_sample = 0
def update(self, preds, labels, sample_weight=None):
preds, labels = _topk_shape_validate(preds, labels)
labels = labels.reshape([len(labels)])
with tf.Graph().as_default() as acc_graph:
topk = tf.nn.in_top_k(predictions=tf.constant(preds, dtype=tf.float32),
targets=tf.constant(labels, dtype=tf.int32), k=self.k)
fp32_topk = tf.cast(topk, tf.float32)
correct_tensor = tf.reduce_sum(input_tensor=fp32_topk)
with tf.compat.v1.Session() as acc_sess:
correct = acc_sess.run(correct_tensor)
self.num_sample += len(labels)
self.num_correct += correct
def reset(self):
self.num_correct = 0
self.num_sample = 0
| |
<reponame>muhlba91/pulumi-proxmoxve
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['CertifiArgs', 'Certifi']
@pulumi.input_type
class CertifiArgs:
def __init__(__self__, *,
certificate: pulumi.Input[str],
node_name: pulumi.Input[str],
private_key: pulumi.Input[str],
certificate_chain: Optional[pulumi.Input[str]] = None,
overwrite: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a Certifi resource.
:param pulumi.Input[str] certificate: The PEM encoded certificate
:param pulumi.Input[str] node_name: The node name
:param pulumi.Input[str] private_key: The PEM encoded private key
:param pulumi.Input[str] certificate_chain: The PEM encoded certificate chain
:param pulumi.Input[bool] overwrite: Whether to overwrite an existing certificate
"""
pulumi.set(__self__, "certificate", certificate)
pulumi.set(__self__, "node_name", node_name)
pulumi.set(__self__, "private_key", private_key)
if certificate_chain is not None:
pulumi.set(__self__, "certificate_chain", certificate_chain)
if overwrite is not None:
pulumi.set(__self__, "overwrite", overwrite)
@property
@pulumi.getter
def certificate(self) -> pulumi.Input[str]:
"""
The PEM encoded certificate
"""
return pulumi.get(self, "certificate")
@certificate.setter
def certificate(self, value: pulumi.Input[str]):
pulumi.set(self, "certificate", value)
@property
@pulumi.getter(name="nodeName")
def node_name(self) -> pulumi.Input[str]:
"""
The node name
"""
return pulumi.get(self, "node_name")
@node_name.setter
def node_name(self, value: pulumi.Input[str]):
pulumi.set(self, "node_name", value)
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> pulumi.Input[str]:
"""
The PEM encoded private key
"""
return pulumi.get(self, "private_key")
@private_key.setter
def private_key(self, value: pulumi.Input[str]):
pulumi.set(self, "private_key", value)
@property
@pulumi.getter(name="certificateChain")
def certificate_chain(self) -> Optional[pulumi.Input[str]]:
"""
The PEM encoded certificate chain
"""
return pulumi.get(self, "certificate_chain")
@certificate_chain.setter
def certificate_chain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate_chain", value)
@property
@pulumi.getter
def overwrite(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to overwrite an existing certificate
"""
return pulumi.get(self, "overwrite")
@overwrite.setter
def overwrite(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "overwrite", value)
@pulumi.input_type
class _CertifiState:
def __init__(__self__, *,
certificate: Optional[pulumi.Input[str]] = None,
certificate_chain: Optional[pulumi.Input[str]] = None,
expiration_date: Optional[pulumi.Input[str]] = None,
file_name: Optional[pulumi.Input[str]] = None,
issuer: Optional[pulumi.Input[str]] = None,
node_name: Optional[pulumi.Input[str]] = None,
overwrite: Optional[pulumi.Input[bool]] = None,
private_key: Optional[pulumi.Input[str]] = None,
public_key_size: Optional[pulumi.Input[int]] = None,
public_key_type: Optional[pulumi.Input[str]] = None,
ssl_fingerprint: Optional[pulumi.Input[str]] = None,
start_date: Optional[pulumi.Input[str]] = None,
subject: Optional[pulumi.Input[str]] = None,
subject_alternative_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Certifi resources.
:param pulumi.Input[str] certificate: The PEM encoded certificate
:param pulumi.Input[str] certificate_chain: The PEM encoded certificate chain
:param pulumi.Input[str] expiration_date: The expiration date
:param pulumi.Input[str] file_name: The file name
:param pulumi.Input[str] issuer: The issuer
:param pulumi.Input[str] node_name: The node name
:param pulumi.Input[bool] overwrite: Whether to overwrite an existing certificate
:param pulumi.Input[str] private_key: The PEM encoded private key
:param pulumi.Input[int] public_key_size: The public key size
:param pulumi.Input[str] public_key_type: The public key type
:param pulumi.Input[str] ssl_fingerprint: The SSL fingerprint
:param pulumi.Input[str] start_date: The start date
:param pulumi.Input[str] subject: The subject
:param pulumi.Input[Sequence[pulumi.Input[str]]] subject_alternative_names: The subject alternative names
"""
if certificate is not None:
pulumi.set(__self__, "certificate", certificate)
if certificate_chain is not None:
pulumi.set(__self__, "certificate_chain", certificate_chain)
if expiration_date is not None:
pulumi.set(__self__, "expiration_date", expiration_date)
if file_name is not None:
pulumi.set(__self__, "file_name", file_name)
if issuer is not None:
pulumi.set(__self__, "issuer", issuer)
if node_name is not None:
pulumi.set(__self__, "node_name", node_name)
if overwrite is not None:
pulumi.set(__self__, "overwrite", overwrite)
if private_key is not None:
pulumi.set(__self__, "private_key", private_key)
if public_key_size is not None:
pulumi.set(__self__, "public_key_size", public_key_size)
if public_key_type is not None:
pulumi.set(__self__, "public_key_type", public_key_type)
if ssl_fingerprint is not None:
pulumi.set(__self__, "ssl_fingerprint", ssl_fingerprint)
if start_date is not None:
pulumi.set(__self__, "start_date", start_date)
if subject is not None:
pulumi.set(__self__, "subject", subject)
if subject_alternative_names is not None:
pulumi.set(__self__, "subject_alternative_names", subject_alternative_names)
@property
@pulumi.getter
def certificate(self) -> Optional[pulumi.Input[str]]:
"""
The PEM encoded certificate
"""
return pulumi.get(self, "certificate")
@certificate.setter
def certificate(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate", value)
@property
@pulumi.getter(name="certificateChain")
def certificate_chain(self) -> Optional[pulumi.Input[str]]:
"""
The PEM encoded certificate chain
"""
return pulumi.get(self, "certificate_chain")
@certificate_chain.setter
def certificate_chain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate_chain", value)
@property
@pulumi.getter(name="expirationDate")
def expiration_date(self) -> Optional[pulumi.Input[str]]:
"""
The expiration date
"""
return pulumi.get(self, "expiration_date")
@expiration_date.setter
def expiration_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expiration_date", value)
@property
@pulumi.getter(name="fileName")
def file_name(self) -> Optional[pulumi.Input[str]]:
"""
The file name
"""
return pulumi.get(self, "file_name")
@file_name.setter
def file_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "file_name", value)
@property
@pulumi.getter
def issuer(self) -> Optional[pulumi.Input[str]]:
"""
The issuer
"""
return pulumi.get(self, "issuer")
@issuer.setter
def issuer(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "issuer", value)
@property
@pulumi.getter(name="nodeName")
def node_name(self) -> Optional[pulumi.Input[str]]:
"""
The node name
"""
return pulumi.get(self, "node_name")
@node_name.setter
def node_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_name", value)
@property
@pulumi.getter
def overwrite(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to overwrite an existing certificate
"""
return pulumi.get(self, "overwrite")
@overwrite.setter
def overwrite(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "overwrite", value)
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> Optional[pulumi.Input[str]]:
"""
The PEM encoded private key
"""
return pulumi.get(self, "private_key")
@private_key.setter
def private_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_key", value)
@property
@pulumi.getter(name="publicKeySize")
def public_key_size(self) -> Optional[pulumi.Input[int]]:
"""
The public key size
"""
return pulumi.get(self, "public_key_size")
@public_key_size.setter
def public_key_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "public_key_size", value)
@property
@pulumi.getter(name="publicKeyType")
def public_key_type(self) -> Optional[pulumi.Input[str]]:
"""
The public key type
"""
return pulumi.get(self, "public_key_type")
@public_key_type.setter
def public_key_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_key_type", value)
@property
@pulumi.getter(name="sslFingerprint")
def ssl_fingerprint(self) -> Optional[pulumi.Input[str]]:
"""
The SSL fingerprint
"""
return pulumi.get(self, "ssl_fingerprint")
@ssl_fingerprint.setter
def ssl_fingerprint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ssl_fingerprint", value)
@property
@pulumi.getter(name="startDate")
def start_date(self) -> Optional[pulumi.Input[str]]:
"""
The start date
"""
return pulumi.get(self, "start_date")
@start_date.setter
def start_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "start_date", value)
@property
@pulumi.getter
def subject(self) -> Optional[pulumi.Input[str]]:
"""
The subject
"""
return pulumi.get(self, "subject")
@subject.setter
def subject(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subject", value)
@property
@pulumi.getter(name="subjectAlternativeNames")
def subject_alternative_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The subject alternative names
"""
return pulumi.get(self, "subject_alternative_names")
@subject_alternative_names.setter
def subject_alternative_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "subject_alternative_names", value)
class Certifi(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
certificate: Optional[pulumi.Input[str]] = None,
certificate_chain: Optional[pulumi.Input[str]] = None,
node_name: Optional[pulumi.Input[str]] = None,
overwrite: Optional[pulumi.Input[bool]] = None,
private_key: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a Certifi resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] certificate: The PEM encoded certificate
:param pulumi.Input[str] certificate_chain: The PEM encoded certificate chain
:param pulumi.Input[str] node_name: The node name
:param pulumi.Input[bool] overwrite: Whether to overwrite an existing certificate
:param pulumi.Input[str] private_key: The PEM encoded private key
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: CertifiArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a Certifi resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param CertifiArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CertifiArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
certificate: Optional[pulumi.Input[str]] = None,
certificate_chain: Optional[pulumi.Input[str]] = None,
node_name: Optional[pulumi.Input[str]] = None,
overwrite: Optional[pulumi.Input[bool]] = None,
private_key: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.plugin_download_url is None:
opts.plugin_download_url = _utilities.get_plugin_download_url()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CertifiArgs.__new__(CertifiArgs)
if certificate is None and not opts.urn:
raise TypeError("Missing required property 'certificate'")
__props__.__dict__["certificate"] = certificate
__props__.__dict__["certificate_chain"] = certificate_chain
if node_name is None and not opts.urn:
raise TypeError("Missing required property 'node_name'")
__props__.__dict__["node_name"] = node_name
__props__.__dict__["overwrite"] = overwrite
if private_key is None and not opts.urn:
raise TypeError("Missing required property 'private_key'")
__props__.__dict__["private_key"] = private_key
__props__.__dict__["expiration_date"] = None
__props__.__dict__["file_name"] = None
__props__.__dict__["issuer"] = None
__props__.__dict__["public_key_size"] = None
__props__.__dict__["public_key_type"] = None
__props__.__dict__["ssl_fingerprint"] = None
__props__.__dict__["start_date"] = None
__props__.__dict__["subject"] = None
__props__.__dict__["subject_alternative_names"] = None
super(Certifi, __self__).__init__(
'proxmoxve:index/certifi:Certifi',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
certificate: Optional[pulumi.Input[str]] = None,
certificate_chain: Optional[pulumi.Input[str]] = None,
expiration_date: Optional[pulumi.Input[str]] = None,
file_name: Optional[pulumi.Input[str]] = None,
issuer: Optional[pulumi.Input[str]] = None,
node_name: Optional[pulumi.Input[str]] = None,
overwrite: Optional[pulumi.Input[bool]] = None,
private_key: Optional[pulumi.Input[str]] = None,
public_key_size: Optional[pulumi.Input[int]] = None,
public_key_type: Optional[pulumi.Input[str]] = None,
ssl_fingerprint: Optional[pulumi.Input[str]] = None,
start_date: Optional[pulumi.Input[str]] = None,
subject: Optional[pulumi.Input[str]] = None,
subject_alternative_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'Certifi':
"""
Get an existing Certifi | |
replace=False):
time_from = spot - 100
time_to = spot + 100
stick_start_bin = np.zeros([len(astroA.stick_bin)])
stick_start_bin[astroA.indices_d['stick_exact_start']] = 1
stick_signal = stick_start_bin[time_from:time_to]
#Obtain running signal
running_signal = astroA.speed_values[time_from:time_to]
fig_running = plotly_utils.plot_scatter_fmt(np.arange(len(running_signal)), running_signal, astype='float')
fig_stick = plotly_utils.plot_scatter_fmt(np.arange(len(stick_signal)), stick_signal, astype='int')
#Obtain available events during this period
interval_events = list(set(np.where(astroA.res_d['tBegin'] > time_from)[0]) & set(np.where(astroA.res_d['tEnd'] < time_to)[0]))
signal_figs = []
for i, event_i in enumerate(interval_events[0:10]):
adj_from = int(time_from % astroA.input_shape[2])
adj_to = int(time_to % astroA.input_shape[2])
if adj_to < adj_from:
print('Skipping: change time from to')
continue
y = astroA.res_d['dff_only'][event_i][adj_from:adj_to]
x = np.arange(0, adj_to-adj_from)
adj_begin = int(astroA.res_d['tBegin'][event_i] % astroA.input_shape[2]) - adj_from
adj_end = int(astroA.res_d['tEnd'][event_i] % astroA.input_shape[2]) - adj_from
print(adj_begin, adj_end)
fig = plotly_utils.plot_scatter_signal(x, y, adj_begin, adj_end, mode='lines', title='scatter', x_title='', y_title='', with_legend=False)
signal_figs.append(fig)
figs.append([fig_running, fig_stick, signal_figs])
return figs
def get_compare_align_plot_xcorr_all(self, astro_pair_l, align_setting='xcorr', dff_mode=False, behaviour='default', filter_duration=(None, None),
with_border_align=True, n_fake_samples=5, save_results_path=None):
'''
Go with each astrocyte pairs
Calculate day 0-day x correlation
Calculate random samples correlation
Normalize s.t. random samples correlation for all pairs is the same (and the 0-x corr)
Create plot
'''
pair_fakes = []
pair_corrs_l = []
days_id_l = []
for astro_pair in astro_pair_l:
astro_1, astro_2 = astro_pair[0], astro_pair[1]
days = (str(astro_pair[0].day), str(astro_pair[1].day))
days_id = '-'.join(days)
pair_save_results_path = save_results_path + self.get_astro_pair_id(astro_pair) + '.pkl'
if os.path.isfile(pair_save_results_path):
d = saving_utils.load_pickle(pair_save_results_path)
else:
if align_setting == 'xcorr':
#Get move vector
move_vector = compare_astro_utils.get_move_vector_xcorr_default(astro_1, astro_2)
#self.n_samples_corr_fake
d = compare_astro_utils.alignment_counter(astro_1, astro_2,
n_fake_samples=n_fake_samples,
align_setting='param',
eval_setting='xcorr',
fake_sample_setting='from_astro',
move_vector=move_vector,
p=1,
behaviour=behaviour,
filter_duration=filter_duration,
with_output_details=True)
elif align_setting == 'xcorr_free':
d = compare_astro_utils.alignment_counter(astro_1, astro_2,
n_fake_samples=n_fake_samples,
align_setting='param',
eval_setting='xcorr_free',
fake_sample_setting='from_astro',
move_vector=None,
p=1,
behaviour=behaviour,
filter_duration=filter_duration,
with_output_details=True)
if save_results_path is not None:
saving_utils.save_pickle(d, pair_save_results_path)
print(d)
pair_fakes.append(d['num_fake'])
pair_corrs_l.append(d['num_compare'])
days_id_l.append(days_id)
pair_fakes_before = np.copy(pair_fakes)
pair_corrs_l_before = np.copy(pair_corrs_l)
#print('PAIR FAKES', pair_fakes)
mean_num_fake = np.mean([np.mean(pair_fake) for pair_fake in pair_fakes])
pair_corrs_d = {}
for i in range(len(pair_corrs_l)):
#mult = mean_num_fake / np.mean(pair_fakes[i])
#NOT DOING ANY SCALING
mult = 1
pair_fakes[i] = np.array(pair_fakes[i]) * mult
pair_corrs_l[i] = pair_corrs_l[i] * mult
if days_id_l[i] not in pair_corrs_d:
pair_corrs_d[days_id_l[i]] = []
pair_corrs_d[days_id_l[i]].append(pair_corrs_l[i])
print('Pair corrs', pair_corrs_d)
x = ['fake_samples']
y = [[item for sublist in pair_fakes for item in sublist]]
for k in pair_corrs_d.keys():
x.append('days ' + k)
y.append(pair_corrs_d[k])
#tstat, pvalue = ttest_ind_from_stats(np.mean(y[0]), np.std(y[0]), len(y[0]), np.mean(y[1]), np.std(y[1]), len(y[1]))
#print('NUM COMPARE: {}, mode {} behaviour {}'.format(d['num_compare'], dff_mode, behaviour))
fig = plotly_utils.plot_point_box_revised(x, y, title='Behaviour: {} - correlations'.format(behaviour), x_title='', y_title='Aligned xcorr value')
return fig, pair_fakes_before, pair_fakes, pair_corrs_l_before, pair_corrs_l, days_id_l
def get_compare_states_all_xcorr(self, astro_pair, align_setting='xcorr_free', dff_mode='False', n_fake_samples=5, save_pkl_path=None, filter_duration=(None, None),
behaviour_l=['rest', 'running', 'stick_rest', 'stick_run_ind_15', 'default']):
astro_1, astro_2 = astro_pair
print('Working on {}'.format(self.get_astro_pair_id(astro_pair)))
if os.path.isfile(save_pkl_path):
res_d = saving_utils.load_pickle(save_pkl_path)
else:
res_d = {}
for behaviour in behaviour_l:
print('Current behaviour: ', behaviour)
if (behaviour in astro_1.indices_d) and (behaviour in astro_2.indices_d) and \
(behaviour in astro_1.event_subsets) and (behaviour in astro_2.event_subsets):
if align_setting == 'xcorr':
#Get move vector
move_vector = compare_astro_utils.get_move_vector_xcorr_default(astro_1, astro_2)
#self.n_samples_corr_fake
d = compare_astro_utils.alignment_counter(astro_1, astro_2,
n_fake_samples=n_fake_samples if behaviour == 'default' else 0,
align_setting='param',
eval_setting='xcorr',
fake_sample_setting='from_astro',
move_vector=move_vector,
p=1,
behaviour=behaviour,
filter_duration=filter_duration,
with_output_details=True,
dff_mode=dff_mode)
elif align_setting == 'xcorr_free':
d = compare_astro_utils.alignment_counter(astro_1, astro_2,
n_fake_samples=n_fake_samples if behaviour == 'default' else 0,
align_setting='param',
eval_setting='xcorr_free',
fake_sample_setting='from_astro',
move_vector=None,
p=1,
behaviour=behaviour,
filter_duration=filter_duration,
with_output_details=True,
dff_mode=dff_mode)
res_d[behaviour] = d['num_compare']
if behaviour == 'default':
res_d['random'] = d['num_fake']
else:
print('Behaviour {} not in one of {} / {}'.format(behaviour, astro_1.id, astro_2.id))
if save_pkl_path is not None:
saving_utils.save_pickle(res_d, save_pkl_path)
behaviours = [b for b in behaviour_l]
behaviours.append('random')
x = []
y = []
for k in behaviours:
if ((k in astro_1.indices_d) and (k in astro_2.indices_d) and (k in astro_1.event_subsets) and (k in astro_2.event_subsets)) or (k=='random'):
if k != 'random':
res_d[k] = [res_d[k]]
x.append(k)
y.append(res_d[k])
#x = behaviour_l
#y = [res_d['rest'], res_d['running'], res_d['default'], res_d['random']]
print(y)
fig = plotly_utils.plot_point_box_revised(x, y, title='Behaviour correlations', x_title='Behaviour', y_title='Xcorr value')
return fig, res_d
def get_compare_states_same_astro_all_xcorr(self, astro_pair, align_setting='xcorr_free', dff_mode=False, n_fake_samples=5, save_pkl_path=None, filter_duration=(None, None)):
if os.path.isfile(save_pkl_path):
res_d = saving_utils.load_pickle(save_pkl_path)
else:
res_d = {}
astro_1, astro_2 = astro_pair
for astro in astro_pair:
astro_day = astro.day
for behaviour_pair in [['rest', 'running'], ['default', 'default']]:
astro_a_grid, _, _,_ = compare_astro_utils.get_filters_compare([astro], p=1, dff_mode=dff_mode, behaviour=behaviour_pair[0])
astro_a_grid = astro_a_grid[0]
astro_b_grid, _, _,_ = compare_astro_utils.get_filters_compare([astro], p=1, dff_mode=dff_mode, behaviour=behaviour_pair[1])
astro_b_grid = astro_b_grid[0]
if align_setting == 'xcorr':
#Get move vector
move_vector = compare_astro_utils.get_move_vector_xcorr_default(astro_1, astro_2)
d = compare_astro_utils.alignment_counter(astro_1, astro_2,
n_fake_samples=n_fake_samples if behaviour_pair[0] == 'default' else 0,
align_setting='param',
eval_setting='xcorr',
fake_sample_setting='from_astro',
grid_target=astro_a_grid,
grid_source=astro_b_grid,
move_vector=move_vector,
p=1,
behaviour='default',
filter_duration=filter_duration,
with_output_details=True)
elif align_setting == 'xcorr_free':
d = compare_astro_utils.alignment_counter(astro_1, astro_2,
n_fake_samples=n_fake_samples if behaviour_pair[0] == 'default' else 0,
align_setting='param',
eval_setting='xcorr_free',
fake_sample_setting='from_astro',
grid_target=astro_a_grid,
grid_source=astro_b_grid,
move_vector=None,
p=1,
behaviour='default',
filter_duration=filter_duration,
with_output_details=True)
if behaviour_pair[0] == 'rest':
res_d['_'.join(behaviour_pair) + '_{}'.format(astro_day)] = d['num_compare']
if behaviour_pair[0] == 'default':
res_d['random_{}'.format(astro_day)] = d['num_fake']
if save_pkl_path is not None:
saving_utils.save_pickle(res_d, save_pkl_path)
for k in res_d.keys():
if 'random' not in k:
res_d[k] = [res_d[k]]
x = [k for k in res_d.keys()]
y = [res_d[k] for k in x]
fig = plotly_utils.plot_point_box_revised(x, y, title='Behaviour correlations', x_title='Behaviour', y_title='Xcorr value')
return fig, res_d
def get_compare_between_group_xcorr(self, astroA_l_pairs, n_fake_samples=5, dff_mode=False, save_pkl_path=None, filter_duration=[None, None]):
if os.path.isfile(save_pkl_path):
res_d = saving_utils.load_pickle(save_pkl_path)
else:
res_d = {'between' : [], 'random' : [], 'between_id' : []}
for astro_i in range(len(astroA_l_pairs)):
for astro_j in range(astro_i+1, len(astroA_l_pairs)):
astroA_pair_1 = astroA_l_pairs[astro_i]
astroA_pair_2 = astroA_l_pairs[astro_j]
#quick hack, ignore the bad dataset
if astroA_pair_1[0].id == 'm190129_d190226_cx_day_0' or astroA_pair_2[0].id == 'm190129_d190226_cx_day_0':
continue
#continue if we are on same pair
if astroA_pair_1[0].id == astroA_pair_2[0].id:
continue
for i in [0, 1]:
for j in [0, 1]:
astro_pair = [astroA_pair_1[i], astroA_pair_2[j]]
d = compare_astro_utils.alignment_counter(astro_pair[0], astro_pair[1],
n_fake_samples=n_fake_samples,
align_setting='xcorr',
eval_setting='xcorr_random_both',
fake_sample_setting='from_astro',
p=1,
behaviour='default',
dff_mode=dff_mode,
border_nan=True,
with_output_details=True)
res_d['between_id'].append(self.get_astro_pair_id(astro_pair))
res_d['between'].append(d['num_compare'])
res_d['random'].extend(d['num_fake'])
if save_pkl_path is not None:
saving_utils.save_pickle(res_d, save_pkl_path)
x = ['Astro between group', 'Random between group']
y = [res_d['between'], res_d['random']]
fig = plotly_utils.plot_point_box_revised(x, y, title='Between group correlations vs random (95% confidence)', x_title='', y_title='Xcorr value')
return fig, res_d
def get_astro_pair_id(self, astro_pair):
return '_'.join([astro.print_id for astro in astro_pair])
def get_measure_all_bar_plot(self, astroA_l, measure, bh_list=['rest', 'running']):
y_pair_l = [[] for i in range(len(bh_list))]
err_pair_l = [[] for i in range(len(bh_list))]
length_l = [[] for i in range(len(bh_list))]
x = []
for astroA in astroA_l:
x.append(astroA.print_id)
for i, bh in enumerate(bh_list):
measure_res = astroA.res_d[measure][astroA.event_subsets[bh]]
mean, conf_low, conf_high = stat_utils.mean_confidence_interval(measure_res, confidence=0.95)
conf = conf_high - mean
y_pair_l[i].append(mean)
err_pair_l[i].append(conf)
length_l[i].append(len(measure_res))
fig = plotly_utils.plot_group_bar(x, y_pair_l, text_values_l=length_l, title='', text_size=20, x_title='', y_title='', legends=bh_list, std_y=err_pair_l, margin_b=300, margin_r=300)
return fig
def get_measure_all_dot_plot(self, astroA_l, measure, bh_list=['rest', 'running']):
x_l = bh_list
name_l=[]
y_pair_l_l = []
for astroA in astroA_l:
name_l.append(astroA.print_id)
y_pair_l = [[] for i in range(len(bh_list))]
length_l = [[] for i in range(len(bh_list))]
for i, bh in enumerate(bh_list):
if measure != None:
measure_res = astroA.res_d[measure][astroA.event_subsets[bh]]
y_pair_l[i].append(measure_res)
else:
n = (len(astroA.event_subsets[bh]) / len(astroA.indices_d[bh])) * astroA.minute_frames
y_pair_l[i].append([n])
y_pair_l_l.append(y_pair_l)
fig, stats_d = plotly_utils.plot_scatter_mult_with_avg(x_l=x_l, y_l_l=y_pair_l_l, y_mean=None, name_l=name_l, mode='lines', x_title='', y_title='',
confidence=True, with_stats=True)
return fig, stats_d
def get_before_after_transition_events(self, astroA, before_bh, inds_bh, after_bh, before_range=20, after_range=50, measure=None,
duration_filter=[None, None]):
inds = astroA.indices_d[inds_bh]
#Filter indices
indices_filt_before = aqua_utils.filter_range_inds(inds, astroA.indices_d[before_bh], range=(-before_range, -1), prop=1.0)
indices_filt_after = aqua_utils.filter_range_inds(inds, astroA.indices_d[after_bh], range=(1, after_range), prop=1.0)
indices_filt = np.array(np.sort(list(set(indices_filt_before) & set(indices_filt_after))))
if len(indices_filt) == 0:
return [], []
delay_info_args = {'event_inds_subset' : astroA.event_subsets['default'],
'min_delay' : -before_range,
'max_delay' : after_range,
'min_duration' : duration_filter[0],
'max_duration' : duration_filter[1],
'unique_events' : False,
'return_non_unique_delays_arr' : True
}
_, _, _, signal_delays_l_l, peak_mins_l_l, valid_event_i_l_l = aqua_utils.get_delay_info_from_res(indices_filt, astroA.res_d, **delay_info_args)
if measure is None:
before_l = 0
after_l = 0
else:
before_l = []
after_l = []
for i, signal_delays_l in enumerate(signal_delays_l_l):
signal_delays_np = np.array(signal_delays_l)
if measure is None:
before_l += len(signal_delays_np[signal_delays_np < 0])
after_l += len(signal_delays_np[signal_delays_np > 0])
else:
measure_np = np.array(list(astroA.res_d[measure][valid_event_i_l_l[i]]))
before_l.extend(list(measure_np[signal_delays_np < 0]))
after_l.extend(list(measure_np[signal_delays_np > 0]))
if measure is None:
before_l = [before_l]
after_l = [after_l]
return before_l, after_l
def get_measure_all_transition_dot_plot(self, astroA_l, measure, before_bh, inds_bh,
after_bh, before_range=20, after_range=50, duration_filter=[None, None]):
'''
In get measure all dot plot we take a list of behaviours : e.g. [rest, running]
Then we find the events that take place during each behaviour
Then we either measure number of events normalized to minute or the measure values
Here we care about transition. We first find all events that are before transition and then after transition
'''
x_l = [before_bh + '-' + inds_bh, inds_bh + '-' + after_bh]
name_l=[]
y_pair_l_l = []
for astroA in astroA_l:
name_l.append(astroA.print_id)
y_pair_l = [[] for i in range(2)]
length_l = [[] for | |
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2012 <NAME>, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import datetime
import calendar
import traceback
import dbutils
import gitutils
import htmlutils
import page.utils
import log.html
import reviewing.utils as review_utils
import reviewing.html as review_html
import reviewing.comment as review_comment
import configuration
import diff
import profiling
import linkify
import textutils
try:
from customization.paths import getModuleFromFile
except:
def getModuleFromFile(repository, filename):
try:
base, rest = filename.split("/", 1)
return base + "/"
except:
return None
class SummaryColumn(log.html.SummaryColumn):
def __init__(self, review):
log.html.SummaryColumn.__init__(self)
self.__review = review
self.__cache = {}
def fillCache(self, db, review):
cursor = db.cursor()
cursor.execute("""SELECT DISTINCT assignee, child
FROM fullreviewuserfiles
JOIN changesets ON (changesets.id=changeset)
WHERE review=%s
AND state='pending'""",
(review.id,))
for user_id, commit_id in cursor:
self.__cache.setdefault(commit_id, set()).add(user_id)
def render(self, db, commit, target, overrides={}):
user_ids = self.__cache.get(commit.getId(db))
if user_ids:
users = ["%s:%s" % (user.fullname, user.status) for user in dbutils.User.fromIds(db, [user_id for user_id in user_ids])]
target.setAttribute("critic-reviewers", ",".join(sorted(users)))
log.html.SummaryColumn.render(self, db, commit, target, overrides=overrides)
class ApprovalColumn:
APPROVED = 1
TOTAL = 2
def __init__(self, user, review, type, cache):
self.__user = user
self.__review = review
self.__type = type
self.__cache = cache
@staticmethod
def fillCache(db, user, review, cache, profiler):
cursor = db.cursor()
profiler.check("fillCache")
cursor.execute("""SELECT child, state, COUNT(*), SUM(deleted), SUM(inserted)
FROM changesets
JOIN reviewfiles ON (changeset=changesets.id)
WHERE review=%s
GROUP BY child, state""",
(review.id,))
for commit_id, state, nfiles, deleted, inserted in cursor:
data = cache.get(commit_id)
if not data: data = cache[commit_id] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
if state == 'reviewed':
data[3] += nfiles
data[4] += deleted
data[5] += inserted
data[0] += nfiles
data[1] += deleted
data[2] += inserted
profiler.check("fillCache: total")
cursor.execute("""SELECT child, COALESCE(reviewfilechanges.to_state, reviewfiles.state) AS effective_state, COUNT(*), SUM(deleted), SUM(inserted)
FROM changesets
JOIN reviewfiles ON (changeset=changesets.id)
JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id)
LEFT OUTER JOIN reviewfilechanges ON (reviewfilechanges.file=reviewfiles.id
AND reviewfilechanges.uid=reviewuserfiles.uid
AND reviewfilechanges.state='draft')
WHERE review=%s
AND reviewuserfiles.uid=%s
GROUP BY child, effective_state""",
(review.id, user.id))
for commit_id, state, nfiles, deleted, inserted in cursor:
data = cache.get(commit_id)
if state == 'reviewed':
data[9] += nfiles
data[10] += deleted
data[11] += inserted
data[6] += nfiles
data[7] += deleted
data[8] += inserted
profiler.check("fillCache: user")
def __calculate(self, db, commit):
return self.__cache.get(commit.id, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def className(self, db, commit):
if commit:
(total_nfiles, total_deleted, total_inserted,
approved_nfiles, approved_deleted, approved_inserted,
user_total_nfiles, user_total_deleted, user_total_inserted,
user_approved_nfiles, user_approved_deleted, user_approved_inserted) = self.__calculate(db, commit)
if user_approved_nfiles == user_total_nfiles:
category = ""
else:
category = " user"
else:
category = ""
if self.__type == ApprovalColumn.APPROVED:
return "approval" + category
else:
return "total" + category
def heading(self, target):
if self.__type == ApprovalColumn.APPROVED:
target.text("Pending")
else:
target.text("Total")
def render(self, db, commit, target, overrides={}):
(total_nfiles, total_deleted, total_inserted,
approved_nfiles, approved_deleted, approved_inserted,
user_total_nfiles, user_total_deleted, user_total_inserted,
user_approved_nfiles, user_approved_deleted, user_approved_inserted) = self.__calculate(db, commit)
if self.__type == ApprovalColumn.APPROVED:
if user_approved_nfiles == user_total_nfiles:
if approved_nfiles == total_nfiles:
target.text()
elif approved_deleted == total_deleted and approved_inserted == total_inserted:
target.span().text("?? %")
else:
target.span().text("%d %%" % int(100.0 * ((total_deleted + total_inserted) - (approved_deleted + approved_inserted)) / (total_deleted + total_inserted)))
elif user_approved_deleted == user_total_deleted and user_approved_inserted == user_total_inserted:
target.span().text("?? %")
else:
target.span().text("%d %%" % int(100.0 * ((user_total_deleted + user_total_inserted) - (user_approved_deleted + user_approved_inserted)) / (user_total_deleted + user_total_inserted)))
else:
if user_approved_deleted == user_total_deleted and user_approved_inserted == user_total_inserted:
target.span().text("-%d/+%d" % (total_deleted, total_inserted))
else:
target.span().text("-%d/+%d" % (user_total_deleted, user_total_inserted))
def notModified(req, db, user, review):
value = req.getRequestHeader("If-None-Match")
return review.getETag(db, user) == value
def renderShowReview(req, db, user):
profiler = profiling.Profiler()
cursor = db.cursor()
if user.getPreference(db, "commit.diff.compactMode"): default_compact = "yes"
else: default_compact = "no"
compact = req.getParameter("compact", default_compact) == "yes"
highlight = req.getParameter("highlight", None)
review_id = req.getParameter("id", filter=int)
review = dbutils.Review.fromId(db, review_id, profiler=profiler)
profiler.check("create review")
if not review:
raise page.utils.DisplayMessage("Invalid Review ID", "%d is not a valid review ID." % review_id)
if review.getETag(db, user) == req.getRequestHeader("If-None-Match"):
raise page.utils.NotModified
profiler.check("ETag")
repository = review.repository
prefetch_commits = {}
cursor.execute("""SELECT DISTINCT sha1, child
FROM changesets
JOIN reviewchangesets ON (reviewchangesets.changeset=changesets.id)
JOIN commits ON (commits.id=changesets.child)
WHERE review=%s""",
(review.id,))
prefetch_commits.update(dict(cursor))
profiler.check("commits (query)")
cursor.execute("""SELECT old_head, old_head_commit.sha1,
new_head, new_head_commit.sha1,
new_upstream, new_upstream_commit.sha1,
equivalent_merge, equivalent_merge_commit.sha1,
replayed_rebase, replayed_rebase_commit.sha1
FROM reviewrebases
LEFT OUTER JOIN commits AS old_head_commit ON (old_head_commit.id=old_head)
LEFT OUTER JOIN commits AS new_head_commit ON (new_head_commit.id=new_head)
LEFT OUTER JOIN commits AS new_upstream_commit ON (new_upstream_commit.id=new_upstream)
LEFT OUTER JOIN commits AS equivalent_merge_commit ON (equivalent_merge_commit.id=equivalent_merge)
LEFT OUTER JOIN commits AS replayed_rebase_commit ON (replayed_rebase_commit.id=replayed_rebase)
WHERE review=%s""",
(review.id,))
rebases = cursor.fetchall()
if rebases:
has_finished_rebases = False
for (old_head_id, old_head_sha1,
new_head_id, new_head_sha1,
new_upstream_id, new_upstream_sha1,
equivalent_merge_id, equivalent_merge_sha1,
replayed_rebase_id, replayed_rebase_sha1) in rebases:
if old_head_id:
prefetch_commits[old_head_sha1] = old_head_id
if new_head_id:
prefetch_commits[new_head_sha1] = new_head_id
has_finished_rebases = True
if new_upstream_id:
prefetch_commits[new_upstream_sha1] = new_upstream_id
if equivalent_merge_id:
prefetch_commits[equivalent_merge_sha1] = equivalent_merge_id
if replayed_rebase_id:
prefetch_commits[replayed_rebase_sha1] = replayed_rebase_id
profiler.check("auxiliary commits (query)")
if has_finished_rebases:
cursor.execute("""SELECT commits.sha1, commits.id
FROM commits
JOIN reachable ON (reachable.commit=commits.id)
WHERE branch=%s""",
(review.branch.id,))
prefetch_commits.update(dict(cursor))
profiler.check("actual commits (query)")
prefetch_commits = gitutils.FetchCommits(repository, prefetch_commits)
document = htmlutils.Document(req)
html = document.html()
head = html.head()
body = html.body(onunload="void(0);")
def flush(target=None):
return document.render(stop=target, pretty=not compact)
def renderHeaderItems(target):
has_draft_items = review_utils.renderDraftItems(db, user, review, target)
target = target.div("buttons")
if not has_draft_items:
if review.state == "open":
if review.accepted(db):
target.button(id="closeReview", onclick="closeReview();").text("Close Review")
else:
if user in review.owners or user.getPreference(db, "review.pingAnyReview"):
target.button(id="pingReview", onclick="pingReview();").text("Ping Review")
if user in review.owners or user.getPreference(db, "review.dropAnyReview"):
target.button(id="dropReview", onclick="dropReview();").text("Drop Review")
if user in review.owners and not review.description:
target.button(id="writeDescription", onclick="editDescription();").text("Write Description")
else:
target.button(id="reopenReview", onclick="reopenReview();").text("Reopen Review")
target.span("buttonscope buttonscope-global")
profiler.check("prologue")
page.utils.generateHeader(body, db, user, renderHeaderItems, profiler=profiler)
cursor.execute("SELECT 1 FROM fullreviewuserfiles WHERE review=%s AND state='pending' AND assignee=%s", (review.id, user.id))
hasPendingChanges = bool(cursor.fetchone())
if hasPendingChanges:
head.setLink("next", "showcommit?review=%d&filter=pending" % review.id)
profiler.check("header")
document.addExternalStylesheet("resource/showreview.css")
document.addExternalStylesheet("resource/review.css")
document.addExternalStylesheet("resource/comment.css")
document.addExternalScript("resource/showreview.js")
document.addExternalScript("resource/review.js")
document.addExternalScript("resource/comment.js")
document.addExternalScript("resource/reviewfilters.js")
document.addExternalScript("resource/autocomplete.js")
document.addInternalScript(user.getJS())
document.addInternalScript("var isReviewFrontpage = true;")
document.addInternalScript("var owners = [ %s ];" % ", ".join(owner.getJSConstructor() for owner in review.owners))
document.addInternalScript("var updateCheckInterval = %d;" % user.getPreference(db, "review.updateCheckInterval"));
log.html.addResources(document)
document.addInternalScript(review.getJS())
target = body.div("main")
basic = target.table('paleyellow basic', align='center')
basic.col(width='10%')
basic.col(width='60%')
basic.col(width='30%')
h1 = basic.tr().td('h1', colspan=3).h1()
h1.text("r/%d: " % review.id)
h1.span(id="summary").text("%s" % review.summary, linkify=linkify.Context(db=db, review=review))
h1.a("edit", href="javascript:editSummary();").text("[edit]")
def linkToCommit(commit):
cursor.execute("SELECT 1 FROM commits JOIN changesets ON (child=commits.id) JOIN reviewchangesets ON (changeset=changesets.id) WHERE sha1=%s AND review=%s", (commit.sha1, review.id))
if cursor.fetchone():
return "%s/%s?review=%d" % (review.repository.name, commit.sha1, review.id)
return "%s/%s" % (review.repository.name, commit.sha1)
def row(heading, value, help, right=None, linkify=False, cellId=None):
main_row = basic.tr('line')
main_row.td('heading').text("%s:" % heading)
if right is False: colspan = 2
else: colspan = None
if callable(value): value(main_row.td('value', id=cellId, colspan=colspan).preformatted())
else: main_row.td('value', id=cellId, colspan=colspan).preformatted().text(value, linkify=linkify, repository=review.repository)
if right is False: pass
elif callable(right): right(main_row.td('right', valign='bottom'))
else: main_row.td('right').text()
if help: basic.tr('help').td('help', colspan=3).text(help)
def renderBranchName(target):
classes = "branch inset"
if review.branch.archived:
classes += " archived"
target.code(classes).text(review.branch.name, linkify=linkify.Context())
if repository.name != user.getPreference(db, "defaultRepository"):
target.text(" in ")
target.code("repository inset").text(repository.getURL(db, user))
buttons = target.div("buttons")
cursor.execute("""SELECT id, remote, remote_name, disabled, previous
FROM trackedbranches
WHERE repository=%s
AND local_name=%s""",
(repository.id, review.branch.name))
row = cursor.fetchone()
if row:
trackedbranch_id, remote, remote_name, disabled, previous = row
target.p("tracking disabled" if disabled else "tracking").text("tracking")
target.code("branch inset").text(remote_name, linkify=linkify.Context(remote=remote))
target.text(" in ")
target.code("repository inset").text(remote, linkify=linkify.Context())
if previous:
target.span("lastupdate").script(type="text/javascript").text("document.write('(last fetched: ' + shortDate(new Date(%d)) + ')');" % (calendar.timegm(previous.utctimetuple()) * 1000))
if user in review.owners or user.hasRole(db, "administrator"):
if review.state == "open":
if disabled:
button = buttons.button("enabletracking",
onclick=("enableTracking(%d, %s, %s);"
% (trackedbranch_id,
htmlutils.jsify(remote),
htmlutils.jsify(remote_name))))
button.text("Enable Tracking")
else:
buttons.button("disabletracking", onclick="triggerUpdate(%d);" % trackedbranch_id).text("Update Now")
buttons.button("disabletracking", onclick="disableTracking(%d);" % trackedbranch_id).text("Disable Tracking")
buttons.button("rebasereview", onclick="location.assign('/rebasetrackingreview?review=%d');" % review.id).text("Rebase Review")
if review.state != "open" and review.branch.archived:
buttons.button("resurrect").text("Resurrect Branch")
def renderPeople(target, list):
for index, person in enumerate(list):
if index != 0: target.text(", ")
span = target.span("user %s" % person.status)
span.span("name").text(person.fullname)
if person.status == 'absent':
span.span("status").text(" (%s)" % person.getAbsence(db))
elif person.status == 'retired':
span.span("status").text(" (retired)")
def renderOwners(target):
renderPeople(target, review.owners)
def renderReviewers(target):
if review.reviewers:
renderPeople(target, review.reviewers)
else:
target.i().text("No reviewers.")
cursor.execute("""SELECT reviewfilters.id, reviewfilters.uid, reviewfilters.path
FROM reviewfilters
JOIN users ON (reviewfilters.uid=users.id)
WHERE reviewfilters.review=%s
AND reviewfilters.type='reviewer'
AND users.status!='retired'""",
(review.id,))
rows = cursor.fetchall()
reviewer_filters_hidden = []
if rows:
table = target.table("reviewfilters reviewers")
row = table.thead().tr("h1")
row.th("h1", colspan=4).text("Custom filters:")
filter_data = {}
reviewfilters = {}
for filter_id, user_id, path | |
<reponame>duc90/marvin
# !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: <NAME>
# @Date: 2017-08-21 17:11:22
# @Last modified by: <NAME>
# @Last Modified time: 2018-02-26 13:46:30
from __future__ import print_function, division, absolute_import
from marvin import config
from marvin.utils.datamodel.dap import datamodel
from marvin.core.exceptions import MarvinUserWarning
from marvin.utils.datamodel.query.base import QueryParameter
from marvin.utils.datamodel.dap.base import Property
from marvin.utils.general import invalidArgs, isCallableWithArgs
from matplotlib.gridspec import GridSpec
from collections import defaultdict, OrderedDict
from astropy.visualization import hist as ahist
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import six
import pandas as pd
import itertools
import warnings
try:
import mpl_scatter_density as msd
except ImportError as e:
msd = None
msderr = ('mpl-scatter-density is required to plot large results and was not found. '
'To use this feature, please install the python package!')
def compute_stats(data):
''' Compute some statistics given a data array
Computes some basic statistics given a data array, excluding NaN values.
Computes and returns the following Numpy statistics: mean, standard deviation,
median, and the 10th, 25th, 75th, and 90th percentiles.
Parameters:
data (list|ndarray):
A list or Numpy array of data
Returns:
A dictionary of statistics values
'''
stats = {'mean': np.nanmean(data), 'std': np.nanstd(data), 'median': np.nanmedian(data),
'per10': np.nanpercentile(data, 10), 'per25': np.nanpercentile(data, 25),
'per75': np.nanpercentile(data, 75), 'per90': np.nanpercentile(data, 90)}
return stats
def _make_masked(data, mask=None):
''' Makes a masked array '''
arr_data = data
if not isinstance(data, np.ma.MaskedArray):
# mask out NaN values if a mask not provided
warnings.warn("Masking out NaN values!", MarvinUserWarning)
mask = mask if mask else np.isnan(data)
# create array
arr_data = np.ma.MaskedArray(data, mask=mask)
return arr_data
def _create_figure(hist=None, hist_axes_visible=None, use_density=None):
''' Create a generic figure and axis '''
# use a scatter density projection or not
projection = 'scatter_density' if use_density else None
# check if mpl-scatter-density if installed
if not msd:
raise ImportError(msderr)
# create the figure
fig = plt.figure()
ax_hist_x = None
ax_hist_y = None
# create axes with or without histogram
if hist:
if hist is True:
gs = GridSpec(4, 4)
ax_scat = fig.add_subplot(gs[1:4, 0:3], projection=projection)
ax_hist_x = fig.add_subplot(gs[0, 0:3])
ax_hist_y = fig.add_subplot(gs[1:4, 3])
elif hist == 'x':
gs = GridSpec(2, 1, height_ratios=[1, 2])
ax_scat = fig.add_subplot(gs[1], projection=projection)
ax_hist_x = fig.add_subplot(gs[0])
elif hist == 'y':
gs = GridSpec(1, 2, width_ratios=[2, 1])
ax_scat = fig.add_subplot(gs[0], projection=projection)
ax_hist_y = fig.add_subplot(gs[1])
else:
ax_scat = fig.add_subplot(1, 1, 1, projection=projection)
# turn off histogram axes
if ax_hist_x:
plt.setp(ax_hist_x.get_xticklabels(), visible=hist_axes_visible)
if ax_hist_y:
plt.setp(ax_hist_y.get_yticklabels(), visible=hist_axes_visible)
return fig, ax_scat, ax_hist_x, ax_hist_y
def _create_hist_title(data):
''' create a title for the histogram '''
stats = compute_stats(data)
hist_title = 'Stats: $\\mu={mean:.3f}, \\sigma={std:.3f}$'.format(**stats)
return hist_title
def _get_dap_datamodel_property_label(quantity):
''' Format a DAP datamodel property string label '''
return '{0} [{1}]'.format(quantity.to_string('latex'), quantity.unit.to_string('latex'))
def _get_axis_label(column, axis=''):
''' Create an axis label '''
if isinstance(column, QueryParameter):
if hasattr(column, 'property') and column.property:
label = _get_dap_datamodel_property_label(column.property)
else:
label = column.display
elif isinstance(column, Property):
label = _get_dap_datamodel_property_label(column)
elif isinstance(column, six.string_types):
label = column
else:
# label = '{0} axis'.format(axis).strip()
label = ''
return label
def _set_options():
''' Set some default Matplotlib options '''
mpl.rcParams['axes.axisbelow'] = True
mpl.rcParams['grid.color'] = 'gray'
mpl.rcParams['grid.linestyle'] = 'dashed'
mpl.rcParams['grid.alpha'] = 0.8
def _set_limits(column, lim=None, sigma_cutoff=50, percent_clip=1):
''' Set an axis limit
Determines whether to apply percentile clipping or not if any data
has a zscore value above the sigma_cutoff value. Applies percentile clipping
centered around the mean.
Parameters:
column:
The array of data to get limits of
lim (list|tuple):
A user provided range
sigma_cutoff (int):
The number of sigma away from the mean to cutoff
percent_clip (int|tuple):
The percent to clip off the data array. Input values are taken as percentages.
Can either be integer value (halved for lo,hi) or a tuple specifying lo,hi values.
Default is 1%.
Returns:
A list of axis range values to use
'''
if lim is not None:
assert len(lim) == 2, 'range must be a list or tuple of 2'
else:
# get percent clips
if isinstance(percent_clip, (list, tuple)):
lo, hi = percent_clip
else:
lo = percent_clip / 2.
hi = 100 - lo
zscore = stats.zscore(column)
# use percentile limits if the max zscore is > 50 sigma away from mean/stdev
if np.max(zscore) > sigma_cutoff:
lim = [np.percentile(column, lo), np.percentile(column, hi)]
else:
pass
return lim
def _check_input_data(coldim, col, data=None):
''' Check the input data
Parameters:
coldim (str):
Name of the dimension
col (str|array):
The list or array of values. If data keyword is specified, col is a string name
data (Pandas.DataFrame)
A Pandas dataframe
Returns:
The column of data
'''
# check data
assert col is not None, 'Must provide an {0} column'.format(coldim)
if data is not None:
assert isinstance(col, str), '{0} must be a string name if Dataframe provided'.format(coldim)
assert isinstance(data, pd.core.frame.DataFrame), 'data must be Pandas dataframe'
assert col in data.columns, '{0} must be a specified column name in Pandas dataframe'.format(coldim)
col = data[col]
else:
assert isinstance(col, (list, np.ndarray, pd.core.series.Series)), '{0} data must be a list, Pandas Series, or Numpy array'.format(coldim)
return col
def _format_hist_kwargs(axis, **kwargs):
''' Format the histogram kwargs from plot '''
kwargs['color'] = kwargs.get('hist_color', 'lightblue')
if axis == 'x':
kwargs['ylabel'] = kwargs.get('xhist_label', 'Counts')
kwargs['title'] = kwargs.get('xhist_title', None)
elif axis == 'y':
kwargs['ylabel'] = kwargs.get('yhist_label', 'Counts')
kwargs['title'] = kwargs.get('yhist_title', None)
kwargs['color'] = kwargs.get('hist_color', 'lightblue')
kwargs['edgecolor'] = kwargs.get('edgecolors', None)
return kwargs
def _prep_func_kwargs(func, kwargs):
''' Prepare the keyword arguments for the proper function input
Checks an input dictionary against allowed keyword arguments
for a given function. Returns only those usable in that function.
Parameters:
func:
The name of the function to check keywords against
kwargs (dict):
A dictionary of keyword arguments to test
Returns:
A new dictionary of usable keyword arguments
'''
invalid = invalidArgs(func, kwargs)
new_kwargs = kwargs.copy()
for key in invalid:
__ = new_kwargs.pop(key)
if isCallableWithArgs(func, new_kwargs):
return new_kwargs
else:
raise MarvinUserWarning('Cannot call func {0} with current kwargs {1}. Check your inputs'.format(func, new_kwargs))
def plot(x, y, **kwargs):
''' Create a scatter plot given two columns of data
Creates a Matplotlib plot using two input arrays of data. Creates either a Matplotlib scatter
plot, hexbin plot, or scatter density plot depending on the size of the input data.
For data with < 1000 values, creates a scatter plot. For data with values between
1000 and 500,000, creates a hexbin plot. For data with > 500,000 values, creates
a scatter density plot.
By default, will also create and display histograms for the x and y data. This can be disabled
setting the "with_hist" keyword to False, or "x", or "y" for displaying only that column.
Accepts all the same keyword arguments as matplotlib scatter, hexbin, and hist methods.
See `scatter-density <https://github.com/astrofrog/mpl-scatter-density>`_
See `matplotlib.pyplot.scatter <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter>`_
See `matplotlib.pyplot.hexbin <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hexbin>`_
Parameters:
x (str|list|ndarray):
The x array of data
y (str|list|ndarray):
The y array of data
data (Pandas dataframe):
Optional Pandas Dataframe. x, y specify string column names in the dataframe
xmask (ndarray):
A mask to apply to the x-array of data
ymask (ndarray):
A mask to apply to the y-array of data
with_hist (bool|str):
If True, creates the plot with both x,y histograms. False, disables it. If 'x' or 'y',
only creates that histogram. Default is True.
hist_axes_visible (bool):
If True, disables the x-axis ticks for each histogram. Default is True.
xlim (tuple):
A tuple limited the range of the x-axis
ylim (tuple):
A tuple limited the range of the y-axis
xlabel (str|Marvin column):
The x axis label or a Marvin DataModel Property or QueryParameter to use for display
ylabel (str|Marvin column):
The y axis label or a Marvin DataModel Property or QueryParameter to use for display
bins (int|tuple):
A number or tuple specifying the number of bins to use in the histogram. Default is 50. An integer
number is adopted for both x and y bins. A tuple is used to customize per axis.
return_figure (bool):
If True, return the figure and axis object. Default is True.
kwargs (dict):
Any other keyword arguments to be passed to `matplotlib.pyplot.scatter <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter>`_
or `matplotlib.pyplot.hist <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist>`_ or
`matplotlib.pyplot.hexbin <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hexbin>`_.
Returns:
A tuple of the matplotlib figure, axes, and histogram data (if returned)
Example:
>>> # create a | |
693 695
1 695 0 0
1 696 1 0 693
1 697 1 0 690
1 698 1 0 687
1 699 1 0 684
1 700 1 0 681
1 701 1 0 678
1 702 1 0 675
1 703 1 0 672
1 704 1 0 669
1 705 1 0 666
1 706 1 0 663
1 707 1 0 660
1 708 1 0 657
1 709 1 0 654
1 710 1 0 651
1 711 1 0 648
1 712 1 0 645
1 713 1 0 642
1 714 1 0 639
1 715 1 0 636
1 716 1 0 633
1 717 1 0 630
1 718 1 0 627
1 719 1 0 624
1 720 1 0 621
1 721 1 0 618
1 722 1 0 615
1 723 1 0 612
1 724 1 0 609
1 725 1 0 606
1 726 1 0 603
1 727 1 0 600
1 728 1 0 597
1 729 1 0 594
1 730 1 0 591
1 731 1 0 588
1 732 1 0 585
1 733 1 0 582
1 734 1 0 579
1 735 1 0 576
1 696 1 0 573
1 697 1 0 570
1 698 1 0 567
1 699 1 0 564
1 700 1 0 561
1 701 1 0 558
1 702 1 0 555
1 703 1 0 552
1 704 1 0 549
1 705 1 0 546
1 706 1 0 543
1 707 1 0 540
1 708 1 0 537
1 709 1 0 534
1 710 1 0 531
1 711 1 0 528
1 712 1 0 525
1 713 1 0 522
1 714 1 0 519
1 715 1 0 516
1 716 1 0 513
1 717 1 0 510
1 718 1 0 507
1 719 1 0 504
1 720 1 0 501
1 721 1 0 498
1 722 1 0 495
1 723 1 0 492
1 724 1 0 489
1 725 1 0 486
1 726 1 0 483
1 727 1 0 480
1 728 1 0 477
1 729 1 0 474
1 730 1 0 471
1 731 1 0 468
1 732 1 0 465
1 733 1 0 462
1 734 1 0 459
1 735 1 0 456
1 696 1 0 453
1 697 1 0 450
1 698 1 0 447
1 699 1 0 444
1 700 1 0 441
1 701 1 0 438
1 702 1 0 435
1 703 1 0 432
1 704 1 0 429
1 705 1 0 426
1 706 1 0 423
1 707 1 0 420
1 708 1 0 417
1 709 1 0 414
1 710 1 0 411
1 711 1 0 408
1 712 1 0 405
1 713 1 0 402
1 714 1 0 399
1 715 1 0 396
1 716 1 0 393
1 717 1 0 390
1 718 1 0 387
1 719 1 0 384
1 720 1 0 381
1 721 1 0 378
1 722 1 0 375
1 723 1 0 372
1 724 1 0 369
1 725 1 0 366
1 726 1 0 363
1 727 1 0 360
1 728 1 0 357
1 729 1 0 354
1 730 1 0 351
1 731 1 0 348
1 732 1 0 345
1 733 1 0 342
1 734 1 0 339
1 735 1 0 336
1 1 1 1 735
1 1 1 1 734
1 1 1 1 733
1 1 1 1 732
1 1 1 1 731
1 1 1 1 730
1 1 1 1 729
1 1 1 1 728
1 1 1 1 727
1 1 1 1 726
1 1 1 1 725
1 1 1 1 724
1 1 1 1 723
1 1 1 1 722
1 1 1 1 721
1 1 1 1 720
1 1 1 1 719
1 1 1 1 718
1 1 1 1 717
1 1 1 1 716
1 1 1 1 715
1 1 1 1 714
1 1 1 1 713
1 1 1 1 712
1 1 1 1 711
1 1 1 1 710
1 1 1 1 709
1 1 1 1 708
1 1 1 1 707
1 1 1 1 706
1 1 1 1 705
1 1 1 1 704
1 1 1 1 703
1 1 1 1 702
1 1 1 1 701
1 1 1 1 700
1 1 1 1 699
1 1 1 1 698
1 1 1 1 697
1 1 1 1 696
1 1 2 0 693 573
1 1 2 0 693 453
1 1 2 0 690 570
1 1 2 0 690 450
1 1 2 0 687 567
1 1 2 0 687 447
1 1 2 0 684 564
1 1 2 0 684 444
1 1 2 0 681 561
1 1 2 0 681 441
1 1 2 0 678 558
1 1 2 0 678 438
1 1 2 0 675 555
1 1 2 0 675 435
1 1 2 0 672 552
1 1 2 0 672 432
1 1 2 0 669 549
1 1 2 0 669 429
1 1 2 0 666 546
1 1 2 0 666 426
1 1 2 0 663 543
1 1 2 0 663 423
1 1 2 0 660 540
1 1 2 0 660 420
1 1 2 0 657 537
1 1 2 0 657 417
1 1 2 0 654 534
1 1 2 0 654 414
1 1 2 0 651 531
1 1 2 0 651 411
1 1 2 0 648 528
1 1 2 0 648 408
1 1 2 0 645 525
1 1 2 0 645 405
1 1 2 0 642 522
1 1 2 0 642 402
1 1 2 0 639 519
1 1 2 0 639 399
1 1 2 0 636 516
1 1 2 0 636 396
1 1 2 0 633 513
1 1 2 0 633 393
1 1 2 0 630 510
1 1 2 0 630 390
1 1 2 0 627 507
1 1 2 0 627 387
1 1 2 0 624 504
1 1 2 0 624 384
1 1 2 0 621 501
1 1 2 0 621 381
1 1 2 0 618 498
1 1 2 0 618 378
1 1 2 0 615 495
1 1 2 0 615 375
1 1 2 0 612 492
1 1 2 0 612 372
1 1 2 0 609 489
1 1 2 0 609 369
1 1 2 0 606 486
1 1 2 0 606 366
1 1 2 0 603 483
1 1 2 0 603 363
1 1 2 0 600 480
1 1 2 0 600 360
1 1 2 0 597 477
1 1 2 0 597 357
1 1 2 0 594 474
1 1 2 0 594 354
1 1 2 0 591 471
1 1 2 0 591 351
1 1 2 0 588 468
1 1 2 0 588 348
1 1 2 0 585 465
1 1 2 0 585 345
1 1 2 0 582 462
1 1 2 0 582 342
1 1 2 0 579 459
1 1 2 0 579 339
1 1 2 0 576 456
1 1 2 0 576 336
1 1 2 0 573 693
1 1 2 0 573 453
1 1 2 0 570 690
1 1 2 0 570 450
1 1 2 0 567 687
1 1 2 0 567 447
1 1 2 0 564 684
1 1 2 0 564 444
1 1 2 0 561 681
1 1 2 0 561 441
1 1 2 0 558 678
1 1 2 0 558 438
1 1 2 0 555 675
1 1 2 0 555 435
1 1 2 0 552 672
1 1 2 0 552 432
1 1 2 0 549 669
1 1 2 0 549 429
1 1 2 0 546 666
1 1 2 0 546 426
1 1 2 0 543 663
1 1 2 0 543 423
1 1 2 0 540 660
1 1 2 0 540 420
1 1 2 0 537 657
1 1 2 0 537 417
1 1 2 0 534 654
1 1 2 0 534 414
1 1 2 0 531 651
1 1 2 0 531 411
1 1 2 0 528 648
1 1 2 0 528 408
1 1 2 0 525 645
1 1 2 0 525 405
1 1 2 0 522 642
1 1 2 0 522 402
1 1 2 0 519 639
1 1 2 0 519 399
1 1 2 0 516 636
1 1 2 0 516 396
1 1 2 0 513 633
1 1 2 0 513 393
1 1 2 0 510 630
1 1 2 0 510 390
1 1 2 0 507 627
1 1 2 0 507 387
1 1 2 0 504 624
1 | |
return target_name_list
def get_endpoint_url_for_workload(p_client, workload, timeout=600):
fqdn_available = False
url = ""
start = time.time()
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
workload_list = p_client.list_workload(uuid=workload.uuid).data
assert len(workload_list) == 1
workload = workload_list[0]
if hasattr(workload, 'publicEndpoints'):
assert len(workload.publicEndpoints) > 0
url = "http://"
url = url + workload.publicEndpoints[0]["addresses"][0] + ":"
url = url + str(workload.publicEndpoints[0]["port"])
fqdn_available = True
return url
def wait_until_lb_is_active(url, timeout=300):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for LB to become active')
return
def check_for_no_access(url):
try:
requests.get(url)
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return True
def validate_http_response(cmd, target_name_list, client_pod=None):
target_hit_list = target_name_list[:]
count = 5 * len(target_name_list)
for i in range(1, count):
if len(target_hit_list) == 0:
break
if client_pod is None:
curl_cmd = "curl " + cmd
result = run_command(curl_cmd)
else:
wget_cmd = "wget -qO- " + cmd
result = kubectl_pod_exec(client_pod, wget_cmd)
result = result.decode()
result = result.rstrip()
print("cmd: \t" + cmd)
print("result: \t" + result)
assert result in target_name_list
if result in target_hit_list:
target_hit_list.remove(result)
print("After removing all, the rest is: ", target_hit_list)
assert len(target_hit_list) == 0
def validate_cluster(client, cluster, intermediate_state="provisioning",
check_intermediate_state=True, skipIngresscheck=True,
nodes_not_in_active_state=[], k8s_version=""):
cluster = validate_cluster_state(
client, cluster,
check_intermediate_state=check_intermediate_state,
intermediate_state=intermediate_state,
nodes_not_in_active_state=nodes_not_in_active_state)
# Create Daemon set workload and have an Ingress with Workload
# rule pointing to this daemonset
create_kubeconfig(cluster)
if k8s_version != "":
check_cluster_version(cluster, k8s_version)
if hasattr(cluster, 'rancherKubernetesEngineConfig'):
check_cluster_state(len(get_role_nodes(cluster, "etcd")))
project, ns = create_project_and_ns(ADMIN_TOKEN, cluster)
p_client = get_project_client_for_token(project, ADMIN_TOKEN)
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
if not skipIngresscheck:
host = "test" + str(random_int(10000, 99999)) + ".com"
path = "/name.html"
rule = {"host": host,
"paths":
[{"workloadIds": [workload.id], "targetPort": "80"}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
wait_for_ingress_to_active(p_client, ingress)
validate_ingress(p_client, cluster, [workload], host, path)
return cluster
def check_cluster_version(cluster, version):
cluster_k8s_version = \
cluster.appliedSpec["rancherKubernetesEngineConfig"][
"kubernetesVersion"]
assert cluster_k8s_version == version, \
"cluster_k8s_version: " + cluster_k8s_version + \
" Expected: " + version
expected_k8s_version = version[:version.find("-")]
k8s_version = execute_kubectl_cmd("version")
kubectl_k8s_version = k8s_version["serverVersion"]["gitVersion"]
assert kubectl_k8s_version == expected_k8s_version, \
"kubectl version: " + kubectl_k8s_version + \
" Expected: " + expected_k8s_version
def check_cluster_state(etcd_count):
css_resp = execute_kubectl_cmd("get cs")
css = css_resp["items"]
components = ["scheduler", "controller-manager"]
for i in range(0, etcd_count):
components.append("etcd-" + str(i))
print("components to check - " + str(components))
for cs in css:
component_name = cs["metadata"]["name"]
assert component_name in components
components.remove(component_name)
assert cs["conditions"][0]["status"] == "True"
assert cs["conditions"][0]["type"] == "Healthy"
assert len(components) == 0
def validate_dns_record(pod, record, expected):
# requires pod with `dig` available - TEST_IMAGE
host = '{0}.{1}.svc.cluster.local'.format(
record["name"], record["namespaceId"])
validate_dns_entry(pod, host, expected)
def validate_dns_entry(pod, host, expected):
# requires pod with `dig` available - TEST_IMAGE
cmd = 'ping -c 1 -W 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
assert ping_validation_pass is True
assert " 0% packet loss" in str(ping_output)
dig_cmd = 'dig {0} +short'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
for expected_value in expected:
assert expected_value in str(dig_output)
def wait_for_nodes_to_become_active(client, cluster, exception_list=[],
retry_count=0):
nodes = client.list_node(clusterId=cluster.id).data
node_auto_deleted = False
for node in nodes:
if node.requestedHostname not in exception_list:
node = wait_for_node_status(client, node, "active")
if node is None:
print("Need to re-evalauate new node list")
node_auto_deleted = True
retry_count += 1
print("Retry Count:" + str(retry_count))
if node_auto_deleted and retry_count < 5:
wait_for_nodes_to_become_active(client, cluster, exception_list,
retry_count)
def wait_for_node_status(client, node, state):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
# Handle the case of nodes getting auto deleted when they are part of
# nodepools
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
while node_status != state:
if time.time() - start > MACHINE_TIMEOUT:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
return node
def wait_for_node_to_be_deleted(client, node, timeout=300):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
while node_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
def wait_for_cluster_node_count(client, cluster, expected_node_count,
timeout=300):
start = time.time()
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
while node_count != expected_node_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
def get_custom_host_registration_cmd(client, cluster, roles, node):
allowed_roles = ["etcd", "worker", "controlplane"]
cluster_tokens = client.list_cluster_registration_token(
clusterId=cluster.id).data
if len(cluster_tokens) > 0:
cluster_token = cluster_tokens[0]
else:
cluster_token = create_custom_host_registration_token(client, cluster)
cmd = cluster_token.nodeCommand
for role in roles:
assert role in allowed_roles
cmd += " --" + role
additional_options = " --address " + node.public_ip_address + \
" --internal-address " + node.private_ip_address
cmd += additional_options
return cmd
def create_custom_host_registration_token(client, cluster):
cluster_token = client.create_cluster_registration_token(
clusterId=cluster.id)
cluster_token = client.wait_success(cluster_token)
assert cluster_token.state == 'active'
return cluster_token
def get_cluster_type(client, cluster):
cluster_configs = [
"amazonElasticContainerServiceConfig",
"azureKubernetesServiceConfig",
"googleKubernetesEngineConfig",
"rancherKubernetesEngineConfig"
]
if "rancherKubernetesEngineConfig" in cluster:
nodes = client.list_node(clusterId=cluster.id).data
if len(nodes) > 0:
if nodes[0].nodeTemplateId is None:
return "Custom"
for cluster_config in cluster_configs:
if cluster_config in cluster:
return cluster_config
return "Imported"
def delete_cluster(client, cluster):
nodes = client.list_node(clusterId=cluster.id).data
# Delete Cluster
client.delete(cluster)
# Delete nodes(in cluster) from AWS for Imported and Custom Cluster
if (len(nodes) > 0):
cluster_type = get_cluster_type(client, cluster)
print(cluster_type)
if get_cluster_type(client, cluster) in ["Imported", "Custom"]:
nodes = client.list_node(clusterId=cluster.id).data
filters = [
{'Name': 'tag:Name',
'Values': ['testcustom*', 'teststess*']}]
ip_filter = {}
ip_list = []
ip_filter['Name'] = \
'network-interface.addresses.association.public-ip'
ip_filter['Values'] = ip_list
filters.append(ip_filter)
for node in nodes:
ip_list.append(node.externalIpAddress)
assert len(ip_filter) > 0
print(ip_filter)
aws_nodes = AmazonWebServices().get_nodes(filters)
for node in aws_nodes:
print(node.public_ip_address)
AmazonWebServices().delete_nodes(aws_nodes)
def check_connectivity_between_workloads(p_client1, workload1, p_client2,
workload2, allow_connectivity=True):
wl1_pods = p_client1.list_pod(workloadId=workload1.id).data
wl2_pods = p_client2.list_pod(workloadId=workload2.id).data
for pod in wl1_pods:
for o_pod in wl2_pods:
check_connectivity_between_pods(pod, o_pod, allow_connectivity)
def check_connectivity_between_workload_pods(p_client, workload):
pods = p_client.list_pod(workloadId=workload.id).data
for pod in pods:
for o_pod in pods:
check_connectivity_between_pods(pod, o_pod)
def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True):
pod_ip = pod2.status.podIp
cmd = "ping -c 1 -W 1 " + pod_ip
response = kubectl_pod_exec(pod1, cmd)
print("Actual ping Response from " + pod1.name + ":" + str(response))
if allow_connectivity:
assert pod_ip in str(response) and " 0% packet loss" in str(response)
else:
assert pod_ip in str(response) and " 100% packet loss" in str(response)
def kubectl_pod_exec(pod, cmd):
command = "exec " + pod.name + " -n " + pod.namespaceId + " -- " + cmd
return execute_kubectl_cmd(command, json_out=False, stderr=True)
def exec_shell_command(ip, port, cmd, password):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, username="root", password=password, port=port)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
return response
def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
while ns.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
return ns
def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods,
timeout=DEFAULT_TIMEOUT):
start = time.time()
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for x in range(0, numofpods - 1):
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
while podimage != expectedimage:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for correct pod images")
time.sleep(.5)
pods = execute_kubectl_cmd(get_pods)
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
def wait_for_pods_in_workload(p_client, workload, pod_count,
timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = p_client.list_pod(workloadId=workload.id).data
while len(pods) != pod_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = p_client.list_pod(workloadId=workload.id).data
return pods
def get_admin_client_and_cluster():
client = get_admin_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def validate_cluster_state(client, cluster,
check_intermediate_state=True,
intermediate_state="provisioning",
nodes_not_in_active_state=[]):
if check_intermediate_state:
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == intermediate_state,
lambda x: 'State is: ' + x.state,
timeout=MACHINE_TIMEOUT)
assert cluster.state == intermediate_state
cluster = | |
<gh_stars>100-1000
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ManagedSslCertificateArgs', 'ManagedSslCertificate']
@pulumi.input_type
class ManagedSslCertificateArgs:
def __init__(__self__, *,
certificate_id: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
managed: Optional[pulumi.Input['ManagedSslCertificateManagedArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ManagedSslCertificate resource.
:param pulumi.Input[int] certificate_id: The unique identifier for the resource.
:param pulumi.Input[str] description: An optional description of this resource.
:param pulumi.Input['ManagedSslCertificateManagedArgs'] managed: Properties relevant to a managed certificate. These will be used if the
certificate is managed (as indicated by a value of `MANAGED` in `type`).
Structure is documented below.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and match
the regular expression `a-z?` which means the
first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] type: Enum field whose value is always `MANAGED` - used to signal to the API
which type this is.
Default value is `MANAGED`.
Possible values are `MANAGED`.
"""
if certificate_id is not None:
pulumi.set(__self__, "certificate_id", certificate_id)
if description is not None:
pulumi.set(__self__, "description", description)
if managed is not None:
pulumi.set(__self__, "managed", managed)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="certificateId")
def certificate_id(self) -> Optional[pulumi.Input[int]]:
"""
The unique identifier for the resource.
"""
return pulumi.get(self, "certificate_id")
@certificate_id.setter
def certificate_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "certificate_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional description of this resource.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def managed(self) -> Optional[pulumi.Input['ManagedSslCertificateManagedArgs']]:
"""
Properties relevant to a managed certificate. These will be used if the
certificate is managed (as indicated by a value of `MANAGED` in `type`).
Structure is documented below.
"""
return pulumi.get(self, "managed")
@managed.setter
def managed(self, value: Optional[pulumi.Input['ManagedSslCertificateManagedArgs']]):
pulumi.set(self, "managed", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the resource. Provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and match
the regular expression `a-z?` which means the
first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Enum field whose value is always `MANAGED` - used to signal to the API
which type this is.
Default value is `MANAGED`.
Possible values are `MANAGED`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class _ManagedSslCertificateState:
def __init__(__self__, *,
certificate_id: Optional[pulumi.Input[int]] = None,
creation_timestamp: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
expire_time: Optional[pulumi.Input[str]] = None,
managed: Optional[pulumi.Input['ManagedSslCertificateManagedArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
self_link: Optional[pulumi.Input[str]] = None,
subject_alternative_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ManagedSslCertificate resources.
:param pulumi.Input[int] certificate_id: The unique identifier for the resource.
:param pulumi.Input[str] creation_timestamp: Creation timestamp in RFC3339 text format.
:param pulumi.Input[str] description: An optional description of this resource.
:param pulumi.Input[str] expire_time: Expire time of the certificate.
:param pulumi.Input['ManagedSslCertificateManagedArgs'] managed: Properties relevant to a managed certificate. These will be used if the
certificate is managed (as indicated by a value of `MANAGED` in `type`).
Structure is documented below.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and match
the regular expression `a-z?` which means the
first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] self_link: The URI of the created resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subject_alternative_names: Domains associated with the certificate via Subject Alternative Name.
:param pulumi.Input[str] type: Enum field whose value is always `MANAGED` - used to signal to the API
which type this is.
Default value is `MANAGED`.
Possible values are `MANAGED`.
"""
if certificate_id is not None:
pulumi.set(__self__, "certificate_id", certificate_id)
if creation_timestamp is not None:
pulumi.set(__self__, "creation_timestamp", creation_timestamp)
if description is not None:
pulumi.set(__self__, "description", description)
if expire_time is not None:
pulumi.set(__self__, "expire_time", expire_time)
if managed is not None:
pulumi.set(__self__, "managed", managed)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if self_link is not None:
pulumi.set(__self__, "self_link", self_link)
if subject_alternative_names is not None:
pulumi.set(__self__, "subject_alternative_names", subject_alternative_names)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="certificateId")
def certificate_id(self) -> Optional[pulumi.Input[int]]:
"""
The unique identifier for the resource.
"""
return pulumi.get(self, "certificate_id")
@certificate_id.setter
def certificate_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "certificate_id", value)
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> Optional[pulumi.Input[str]]:
"""
Creation timestamp in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@creation_timestamp.setter
def creation_timestamp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "creation_timestamp", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional description of this resource.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="expireTime")
def expire_time(self) -> Optional[pulumi.Input[str]]:
"""
Expire time of the certificate.
"""
return pulumi.get(self, "expire_time")
@expire_time.setter
def expire_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expire_time", value)
@property
@pulumi.getter
def managed(self) -> Optional[pulumi.Input['ManagedSslCertificateManagedArgs']]:
"""
Properties relevant to a managed certificate. These will be used if the
certificate is managed (as indicated by a value of `MANAGED` in `type`).
Structure is documented below.
"""
return pulumi.get(self, "managed")
@managed.setter
def managed(self, value: Optional[pulumi.Input['ManagedSslCertificateManagedArgs']]):
pulumi.set(self, "managed", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the resource. Provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and match
the regular expression `a-z?` which means the
first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> Optional[pulumi.Input[str]]:
"""
The URI of the created resource.
"""
return pulumi.get(self, "self_link")
@self_link.setter
def self_link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "self_link", value)
@property
@pulumi.getter(name="subjectAlternativeNames")
def subject_alternative_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Domains associated with the certificate via Subject Alternative Name.
"""
return pulumi.get(self, "subject_alternative_names")
@subject_alternative_names.setter
def subject_alternative_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "subject_alternative_names", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Enum field whose value is always `MANAGED` - used to signal to the API
which type | |
<reponame>starofrainnight/ncstyler<gh_stars>0
#!/usr/bin/env python
import argparse
import CppHeaderParser
import re
import sys
import yaml
import copy
import six
import os.path
import traceback
class CppDefine(dict):
def __init__(self):
self["name"] = None
self["parameters"] = []
self["line_number"] = -1
class CppDefineParameter(dict):
def __init__(self):
self["name"] = None
self["line_number"] = -1
class CppNamespace(dict):
def __init__(self):
self["name"] = None
self["line_number"] = -1
class CppFileName(dict):
def __init__(self):
self["name"] = None
self["line_number"] = -1
class Application(object):
def __init__(self):
description='''A styler just target to naming conventions of source
code'''
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-c", "--config",
help="Configuration file path (In YAML format)",
required=True)
parser.add_argument("-o", "--output", help="Output file path")
parser.add_argument("-d", "--debug", action='store_true', help="Print trace stack")
parser.add_argument("file_path", help="Source file path")
self.__args = parser.parse_args()
# If user does not specific output path, we default it to input file
# path
if self.__args.output is None:
self.__args.output = self.__args.file_path
self.__config = yaml.load(open(self.__args.config))
old_base = self.__config["_base_"]
self.__config["_base_"] = {
"re":"[a-zA-Z0-9_]+",
"error": "",
}
self.__config["_base_"].update(old_base)
def parse_define(self, adefine):
matched = re.match(r"[^\w]*(\w+)(?:\(([^\)]*)\)|\s*).*", adefine)
name = matched.group(1)
parameters = []
if matched.group(2) is not None:
parameter_names = matched.group(2).split(',')
for parameter_name in parameter_names:
aparameter = CppDefineParameter()
aparameter["name"] = parameter_name.strip()
parameters.append(aparameter)
result = CppDefine()
result["name"] = name
result["parameters"] = parameters
return result
def _is_special_method(self, amethod):
if isinstance(amethod, six.string_types):
amethod_name = amethod
else:
amethod_name = amethod["name"]
founded = re.findall(r"(?:^|[^\w]+)operator[^\w]+", amethod_name)
if len(founded) <= 0:
if re.match(r"(?:^|.*\W)operator\W.*", amethod["debug"]) is not None:
return True
return False
return True
def _get_argument_name(self, an_argument):
if isinstance(an_argument, six.string_types):
return an_argument
if len(an_argument["name"]) > 0:
return an_argument["name"]
# If it's a functor?? with "class name::function" style
matched = re.match(r"^\w+\s*\(\w*::\*(\w+)\)\(.*$", an_argument["type"])
if matched is None:
# with normal "function" style
matched = re.match(r"[^\(]*\([^\)]*\W(\w+)\W.*\).*", an_argument["type"])
if matched is None:
return ""
else:
return matched.group(1)
def _get_config(self, name):
override_table = {
"class": "_base_",
"function": "_base_",
"variant": "_base_",
"namespace": "_base_",
"define": "_base_",
"filename": "_base_", # Special config use to define filename rule
"argument": "variant",
"static_variant": "variant",
"global_variant": "variant",
"function_argument": "argument",
"class_method_argument": "function_argument",
"struct_method_argument": "class_method_argument",
"define_function_argument": "function_argument",
"define_function": "function",
"class_method": "function",
"struct_method": "class_method",
"class_variant": "variant",
"struct_variant": "class_variant",
"typedef": "class",
"struct": "class",
"enum": "class",
"enum_value": "define",
"union": "struct",
}
my_config = dict()
if name in override_table:
base_name = override_table[name]
my_config.update(self._get_config(base_name))
if name in self.__config:
my_config.update(self.__config[name])
return my_config
def _is_valid_variable(self, cpp_variable):
if cpp_variable["type"] == "return":
return False
if len(cpp_variable["type"]) <= 0:
return False
return True
def _get_cpp_method_re(self, name):
prefix = "operator"
if not name.startswith(prefix):
return re.escape(name)
# Operator methods
chars = []
for achar in name[len(prefix):]:
chars.append("\\s*")
if achar.isalnum():
chars.append(achar)
else:
chars.append("\\")
chars.append(achar)
return "operator%s" % ''.join(chars)
def _validate_codes_of_cpp_method(self, cpp_method):
start_line_index = cpp_method["line_number"] - 1
# Extract cpp method codes
rest_lines = self._source_lines[start_line_index:]
content = '\n'.join(rest_lines)
code_lines = []
name_re = self._get_cpp_method_re(cpp_method["name"])
name_start_pos = re.search(name_re, content).span()[0]
parameters_start_pos = content.index('(', name_start_pos)
parameters_stop_pos = content.index(')', parameters_start_pos)
stack = []
try:
i = content.index('{', parameters_stop_pos + 1)
except ValueError:
return;
try:
semicolonPos = content.index(';', parameters_stop_pos + 1)
if semicolonPos <= i:
return;
except ValueError:
# Not found a semicolon, just ignored.
pass
skipped_lines = cpp_method["line_number"] + content.count("\n", 0, i) - 2
stack.append(i)
i += 1
first_i = i
last_i = 0
is_finding_block_comment = False
is_finding_single_comment = False
while (len(stack) > 0) and (i < len(content)):
c = content[i]
if is_finding_block_comment:
# If finding block comment, then skip all other searching
if (c == "*") and (content[i + 1] == "/"):
is_finding_block_comment = False
elif (c == "/") and (content[i + 1] == "*"):
is_finding_block_comment = True
elif is_finding_single_comment:
# If finding single comment, then skip all other searching
if c == "\n":
is_finding_single_comment = False
elif (c == "/") and (content[i + 1] == "/"):
is_finding_single_comment = True
elif c == "{":
stack.append(i)
elif c == "}":
last_i = i
del stack[len(stack) - 1]
i += 1
if len(stack) <= 0:
content = content[first_i:last_i]
founded = re.findall(r"\w+\W+(\w+)\s*=[^=]", content)
for aname in founded:
avariant = dict()
avariant["name"] = aname
avariant["line_number"] = cpp_method["line_number"]
self._validate_name(avariant, "variant")
def _validate_name(self, cpp_object, name_re):
cpp_object_name = ""
if isinstance(cpp_object, six.string_types):
cpp_object_name = cpp_object
cpp_object = dict()
cpp_object["name"] = cpp_object_name
cpp_object["line_number"] = -1
elif "name" in cpp_object:
cpp_object_name = cpp_object["name"]
if ('<' in cpp_object_name) and ("debug" in cpp_object):
matched = re.match(r".*?(\w+)\W+$", cpp_object["debug"])
if matched is not None:
cpp_object_name = matched.group(1)
else:
return
# Parse union like names
splitted = cpp_object_name.split()
if len(splitted) > 1:
cpp_object_name = splitted[-1]
if '...' in cpp_object_name:
# Does not have valid name, we must not check it .
return
if len(cpp_object_name) <= 0:
# Does not have valid name, we must not check it .
return
matched = re.match(self._get_config(name_re)["re"], cpp_object_name)
if matched is None:
filename = os.path.basename(self.__args.file_path)
error_message = self._get_config(name_re)["error"]
if len(error_message) > 0:
error_message = "%s %s" % (
' '.join([rule_name.capitalize() for rule_name in name_re.split("_")]),
error_message)
if self.__args.debug:
traceback.print_stack()
raise SyntaxError("%s:%s:error: Name '%s' isn't matched with rule : %s! %s" % (
filename,
cpp_object["line_number"],
cpp_object_name,
name_re,
error_message))
def _get_class_realname(self, class_name):
return re.match(r"(\w+).*", class_name).group(1)
def _validate_cpp_object(self, cpp_object):
cpp_object_type = type(cpp_object)
if cpp_object_type == CppDefine:
if len(cpp_object["parameters"]) <= 0:
# Normal Define Name
self._validate_name(cpp_object, "define")
else:
# Function Liked Define Name
self._validate_name(cpp_object, "define_function")
for aparameter in cpp_object["parameters"]:
self._validate_name(aparameter, "define_function_argument")
elif cpp_object_type == CppHeaderParser.CppClass:
if "struct" in cpp_object["declaration_method"]:
class_re = "struct"
class_method_re = "struct_method"
class_method_argument_re = "struct_method_argument"
class_variant_re = "struct_variant"
else:
class_re = "class"
class_method_re = "class_method"
class_method_argument_re = "class_method_argument"
class_variant_re = "class_variant"
self._validate_name(cpp_object, class_re)
for amethod in cpp_object.get_all_methods():
matched = re.match(r".*typedef\W[^\(]*\([^\)]*\W(\w+)\W.*\).*", amethod["debug"])
if matched is None:
self._validate_codes_of_cpp_method(amethod)
if not self._is_special_method(amethod):
if ((amethod["name"] != self._get_class_realname(cpp_object["name"]))
and (not amethod.get("constructor", False))
and (not amethod.get("destructor", False))):
try:
self._validate_name(amethod, class_method_re)
except SyntaxError:
is_need_reraise = True
try:
self._validate_name(amethod, "define_function")
is_need_reraise = False
except SyntaxError:
pass
if is_need_reraise:
raise
for aparameter in amethod["parameters"]:
an_object = dict()
an_object["line_number"] = aparameter["line_number"]
if (aparameter["type"].endswith("::*")
and (")" in aparameter["name"])):
an_object["name"] = re.match(r"(\w+).*", aparameter["name"]).group(1)
try:
self._validate_name(an_object,
class_method_re)
except SyntaxError:
is_need_reraise = True
try:
self._validate_name(amethod, "define_function")
is_need_reraise = False
except SyntaxError:
pass
if is_need_reraise:
raise
else:
an_object["name"] = self._get_argument_name(aparameter)
self._validate_name(an_object,
class_method_argument_re)
else:
self._validate_name(
{"name":matched.group(1), "line_number":amethod["line_number"]},
"typedef")
for access_specifier in CppHeaderParser.supportedAccessSpecifier:
for amember in cpp_object["properties"][access_specifier]:
is_skip_validate = False
if ("type" in amember) and (amember["type"] is not None):
internal_predeclares = ["class", "struct", "union"]
if amember["type"] in internal_predeclares:
is_skip_validate = True
if not is_skip_validate:
if amember["static"]:
self._validate_name(amember, "static_variant")
else:
self._validate_name(amember, class_variant_re)
for amember in cpp_object["structs"][access_specifier]:
self._validate_cpp_object(amember)
for amember in cpp_object["enums"][access_specifier]:
self._validate_cpp_object(amember)
elif cpp_object_type == CppHeaderParser.CppStruct:
self._validate_name(cpp_object, "struct")
elif cpp_object_type == CppHeaderParser.CppEnum:
self._validate_name(cpp_object, "enum")
line_number = -1
if "line_number" in cpp_object:
line_number = cpp_object["line_number"]
for amember in cpp_object["values"]:
# Use parent line number if enum value does not have it's line
# number
if "line_number" not in amember:
amember["line_number"] = line_number
self._validate_name(amember, "enum_value")
elif cpp_object_type == CppHeaderParser.CppVariable:
if cpp_object["type"] != "return":
if cpp_object["static"]:
self._validate_name(cpp_object, "static_variant")
elif cpp_object["type"] not in ["class", "struct", "union"]:
if not cpp_object["type"].endswith("::"):
# Don't parse variable that implemented outside of
# template class. It's already be parsed when parsing
# the class.
self._validate_name(cpp_object, "global_variant")
elif cpp_object_type == CppHeaderParser.CppMethod:
# Exclude "main" function while parsing global function
while True:
# FIXME: Parse special case : "struct RArraySize <T ( & ) [ N ]> {"
if "debug" in cpp_object:
if re.match(r".*\>\s*{$", cpp_object["debug"]) is not None:
break
self._validate_codes_of_cpp_method(cpp_object)
if cpp_object["name"] == "main":
break
if self._is_special_method(cpp_object):
break
if (cpp_object["class"] is None) or (len(cpp_object["class"]) <= 0):
if ">" in cpp_object["name"]:
regex = r"^[^<:]*?(?:(\w+)::)?(\w+)\s*<"
matched = re.search(regex, cpp_object["debug"])
if matched.group(1) is not None:
cpp_object["class"] = matched.group(1)
cpp_object["name"] = matched.group(2)
self._validate_name(cpp_object, "class_method")
elif len(cpp_object["returns"]) > 0:
# If a function does not have return value(at least
# "void"), it maybe macro invokes.
# FIXME: We just ignored this situation:
# Code Snippets: static RSignal<void(int)> sReceived;
if "<" not in cpp_object["name"]:
self._validate_name(cpp_object, "function")
break
if self._get_class_realname(cpp_object["class"]) == cpp_object["name"]:
# Constructor / Destructor will the same with class name
break
self._validate_name(cpp_object, "class_method")
break
elif cpp_object_type == CppHeaderParser.CppUnion:
self._validate_name(cpp_object, "union")
elif cpp_object_type == CppNamespace:
self._validate_name(cpp_object, "namespace")
elif cpp_object_type == CppFileName:
self._validate_name(cpp_object, "filename")
def exec_(self):
try:
with open(self.__args.file_path, "r") as source_file:
# For later parse by _validate_codes_of_cpp_method()
self._source_lines = source_file.readlines()
parsed_info = CppHeaderParser.CppHeader(self.__args.file_path)
# Verify File Names
filename = os.path.basename(self.__args.file_path)
cpp_object = CppFileName()
cpp_object["name"] = filename
self._validate_cpp_object(cpp_object)
# Verify Define Names
for define_text in parsed_info.defines:
| |
flag
for block in self.blocks.values():
block.behave_as_preset = val
def apply_preset(self, preset, voxel=(0,0,0), block_run=False, presetfile=''):
'''
Given a 'preset' dataset object (an actual dataset object that may
or may not have data in it depending on whether it was saved as a
preset file or dataset file), we extract the parameter settings for:
- the user_prior object
- each processing block and apply them to the current dataset
- we ensure that the data dimensionality between blocks is properly
maintained (e.g. zerofilling).
- Finally, we run each block.process() method
Things to know about Presets
Each object in the presets blocks list (raw, prep, spectral, fit) is
compared to the object class name in this dataset. If the names match
the Settings object is copied over. If the class names do not match,
no settings are copied over.
The 'spectral' object has a few extra values copied over, like the
phases and shift_frequencies. Both the 'spectral' and 'fit' objects
also need some run-time values recalculated after the settings are
copied over.
'''
if self.blocks['raw'].__class__.__name__ == preset.blocks['raw'].__class__.__name__:
self.blocks['raw'].set = copy.deepcopy(preset.blocks['raw'].set)
if self.blocks['prep'].__class__.__name__ == preset.blocks['prep'].__class__.__name__:
if not preset.blocks['prep'].is_identity:
block = self.blocks['prep']
block.set = copy.deepcopy(preset.blocks['prep'].set)
block._reset_dimensional_data(self)
if self.blocks['spectral'].__class__.__name__ == preset.blocks['spectral'].__class__.__name__:
block = self.blocks['spectral']
# We do a deep copy of all the settings from the preset dataset
# into the current dataset, and check the result array dimensions
block.set = copy.deepcopy(preset.blocks['spectral'].set)
block._phase_0 = copy.deepcopy(preset.blocks['spectral']._phase_0)
block._phase_1 = copy.deepcopy(preset.blocks['spectral']._phase_1)
block._frequency_shift = copy.deepcopy(preset.blocks['spectral']._frequency_shift)
block.frequency_shift_lock = preset.blocks['spectral'].frequency_shift_lock
block.phase_lock = preset.blocks['spectral'].phase_lock
block.phase_1_lock_at_zero = preset.blocks['spectral'].phase_1_lock_at_zero
# block.left_shift_correct = preset.blocks['spectral'].left_shift_correct
block._reset_dimensional_data(self)
if not preset.blocks['fit'].is_identity:
# create fit object if it does not exist
if isinstance(preset.blocks['fit'], block_fit_voigt.BlockFitVoigt):
self.add_voigt(force=True)
elif isinstance(preset.blocks['fit'], block_fit_giso.BlockFitGiso):
self.add_giso(force=True)
# copy preset values into fit block and recalc as needed
block = self.blocks['fit']
block.set = copy.deepcopy(preset.blocks['fit'].set)
prior = block.set.prior
if isinstance(preset.blocks['fit'], block_fit_voigt.BlockFitVoigt):
prior.calculate_full_basis_set(block.set.prior_ppm_start, block.set.prior_ppm_end, self)
elif isinstance(preset.blocks['fit'], block_fit_giso.BlockFitGiso):
prior.calculate_full_basis_set(None, None, self)
block._reset_dimensional_data(self)
if not preset.blocks['quant'].is_identity:
# create 'block_quant_watref' object if it does not exist
self.add_watref(force=True)
# copy preset values into watref block
block = self.blocks['quant']
block.set = copy.deepcopy(preset.blocks['quant'].set)
self.user_prior = copy.deepcopy(preset.user_prior)
self.user_prior.basis.update(self) # parent dataset may have different points/dwell
self.preset_filename = presetfile
def get_combo_results(self, voxel, quant=False):
block = self.blocks["quant"] if quant else self.blocks["fit"]
if block.is_identity:
return None
if quant:
res = block.watref_results[:,voxel[0],voxel[1],voxel[2]]
else:
res = block.fit_results[:,voxel[0],voxel[1],voxel[2]]
prior_list_unique = self.prior_list_unique
combos = []
all_combos = constants.FitPriorCalculateCombinations.choices
for item in all_combos:
met1, met2 = item.split('+')
if met1 in prior_list_unique and met2 in prior_list_unique:
imet1 = prior_list_unique.index(met1)
imet2 = prior_list_unique.index(met2)
combos.append([item, res[imet1]+res[imet2]])
if combos == []:
combos = None
return combos
def fit_results_as_html(self, voxel, lw=0.0, lwmin=0.0, lwmax=0.0,
data_source="", image=None):
"""
Given a voxel, linewidth params, and a data source (often a filename),
returns HTML-formatted results for that voxel. The HTML is appropriate
for the wx.Html control (which understand limited HTML) as well as for
writing to a file.
If the image param is populated, it should be a tuple of
(mime_type, image_data). The former should be a string like "image/png".
The latter should be base64-encoded image data.
"""
fit = self.blocks["fit"]
# First we assemble the data we need.
nmet = len(fit.set.prior_list)
names = fit.set.prior_list
res = fit.fit_results[:,voxel[0],voxel[1],voxel[2]]
crao = fit.cramer_rao[:,voxel[0],voxel[1],voxel[2]]
conf = fit.confidence[:,voxel[0],voxel[1],voxel[2]]
stats = fit.fit_stats[:,voxel[0],voxel[1],voxel[2]]
# both cramer-rao and confidence intervals may be off/on
if len(crao) != len(res):
crao = res * 0
if len(conf) != len(res):
conf = res * 0
table1 = [['Area Results', 'Area', ' CrRao[%]', ' CnfInt[%]']]
for i, item in enumerate(names):
table1.append([item, res[i], crao[i], conf[i]])
if fit.set.macromol_model == FitMacromoleculeMethod.SINGLE_BASIS_DATASET:
table1.append(['MMol', res[nmet*2+4], 0.0, 0.0])
if fit.set.prior_calculate_combinations:
combo = self.get_combo_results(voxel)
if combo is not None:
for item in combo:
table1.append([item[0], item[1], 0.0, 0.0])
table1 = _pretty_space_table(table1, places=4)
table2 = [['PPM Results', 'PPM', ' CrRao[ppm]', ' CnfInt[ppm]']]
for i,item in enumerate(names):
table2.append([item, res[i+nmet], crao[i+nmet], conf[i+nmet]])
if fit.set.macromol_model == FitMacromoleculeMethod.SINGLE_BASIS_DATASET:
table2.append(['MMol', res[nmet*2+5], 0.0, 0.0])
table2 = _pretty_space_table(table2, places=4)
table3 = [['Global Results', 'Value', ' CrRao[delta]', ' CnfInt[%]']]
table3.append(['Ta', res[nmet*2+0], crao[nmet*2+0], conf[nmet*2+0] ])
table3.append(['Tb', res[nmet*2+1], crao[nmet*2+1], conf[nmet*2+1] ])
table3.append(['Phase0', res[nmet*2+2], crao[nmet*2+2], conf[nmet*2+2] ])
table3.append(['Phase1', res[nmet*2+3], crao[nmet*2+3], conf[nmet*2+3] ])
table3 = _pretty_space_table(table3, places=5)
table4 = [['Calculation Results', ' Value', ' Max LW', ' Min LW']]
table4.append(['Linewidth', lw, lwmax, lwmin])
table4.append(['ChiSquare', stats[0], ' ', ' '])
table4.append(['Weighted ChiSquare', stats[1], ' ', ' '])
matherr = str(stats[2] != 0)
table4.append(['Math Finite Error', matherr, ' ', ' '])
table4 = _pretty_space_table(table4, places=5)
# Now that the data is assembled, we HTML-ify it.
html = ElementTree.Element("html")
head = ElementTree.SubElement(html, "head")
style = util_xml.TextSubElement(head, "style", _CSS)
style.set("type", "text/css")
body = ElementTree.SubElement(html, "body")
util_xml.TextSubElement(body, "h2", "Analysis Voigt Results")
e_div = ElementTree.SubElement(body, "div")
if data_source:
e_tt = util_xml.TextSubElement(e_div, "tt", "Data Source: ")
util_xml.TextSubElement(e_tt, "small", data_source)
ElementTree.SubElement(e_div, "br")
voxel = [x + 1 for x in voxel]
util_xml.TextSubElement(e_div, "tt", 'Voxel: (%d,%d,%d)' % tuple(voxel))
if image:
# If there's image data, we assume that this will be written to
# a file for display in a proper browser, so we can use slightly
# fancier HTML.
mime_type, image_data = image
e_div = ElementTree.SubElement(body, "div",
{ "id" : "image",
"style" : "float: right; width: 50%",
}
)
e_div.append(ElementTree.Comment(_IE_INCAPABLE_MSG))
# In order to keep the HTML + image in one file, we use the
# little-known "data" scheme.
# ref: http://en.wikipedia.org/wiki/Data_URI_scheme
src = "data:%s;base64,%s" % (mime_type, image_data)
ElementTree.SubElement(e_div, "img",
{"style" : "width: 90%",
"src" : src
})
e_div = ElementTree.SubElement(body, "div", {"id" : "table"})
tables = (table1, table2, table3, table4)
for table in tables:
title = table[0]
e_pre = ElementTree.SubElement(e_div, "pre")
e_u = ElementTree.SubElement(e_pre, "u")
util_xml.TextSubElement(e_u, "b", title)
e = util_xml.TextSubElement(e_div, "pre", '\n'.join(table[1:]))
# Keep in mind that HTML is whitespace sensitive, and if you call
# util_xml.indent() on the HTML, it will change the formatting.
return ElementTree.tostring(html)
def fit_results_as_csv(self, voxel, lw=0.0, lwmin=0.0, lwmax=0.0, source="", dsetname="", nzfill=2, decor1=False):
"""
Given a voxel, linewidth params, and a data source (often a filename),
returns CSV-formatted (comma separated variables)string containing both
the voxel fitting results and header string descriptions for each
column.
if decor1 is set, metabolite abbreviations are added into the CrRao and CnfInt header strings
"""
fit = self.blocks["fit"]
hdr = []
val = []
hdr.append('Filename')
source = source.replace(",","_") # some users have commas in filenames
val.append(source)
hdr.append('Dataset Name')
dsetname = dsetname.replace(",","_") # some users have commas in filenames
val.append(dsetname)
hdr.append('Voxel')
val.append(str(voxel[0]).zfill(nzfill)+' '+str(voxel[1]).zfill(nzfill)+' '+str(voxel[2]).zfill(nzfill))
nmet = len(fit.set.prior_list)
names = fit.set.prior_list
res = fit.fit_results[:,voxel[0],voxel[1],voxel[2]]
crao = fit.cramer_rao[ :,voxel[0],voxel[1],voxel[2]]
conf = fit.confidence[ :,voxel[0],voxel[1],voxel[2]]
stats = fit.fit_stats[ :,voxel[0],voxel[1],voxel[2]]
# both cramer-rao and confidence intervals may be off/on
if len(crao) != len(res):
crao = res * 0
if len(conf) != len(res):
conf = res * 0
for i, item in enumerate(names):
addstr = '' if not decor1 else ' '+item
hdr.append('Area '+item)
hdr.append('CrRao[%]'+addstr)
hdr.append('CnfInt[%]'+addstr)
val.append(str(res[i]))
val.append(str(crao[i]))
val.append(str(conf[i]))
if fit.set.macromol_model == FitMacromoleculeMethod.SINGLE_BASIS_DATASET:
addstr = '' if not decor1 else ' MMol'
hdr.append('Area MMol')
hdr.append('CrRao[%]'+addstr)
hdr.append('CnfInt[%]'+addstr)
val.append(str(res[nmet*2+4]))
val.append(str(0.0))
val.append(str(0.0))
if fit.set.prior_calculate_combinations:
combo = self.get_combo_results(voxel)
if combo is not None:
for item in combo:
addstr = '' if not decor1 else ' '+item[0]
hdr.append('Area '+item[0])
hdr.append('CrRao[%]'+addstr)
hdr.append('CnfInt[%]'+addstr)
val.append(str(item[1]))
val.append(str(0.0))
val.append(str(0.0))
for i,item in enumerate(names):
addstr = '' if not decor1 else ' '+item
hdr.append('PPM '+item)
hdr.append('CrRao[%]'+addstr)
hdr.append('CnfInt[%]'+addstr)
val.append(str(res[i+nmet]))
val.append(str(crao[i+nmet]))
val.append(str(conf[i+nmet]))
if fit.set.macromol_model == FitMacromoleculeMethod.SINGLE_BASIS_DATASET:
addstr = '' if not decor1 else ' MMol'
hdr.append('PPM MMol')
hdr.append('CrRao[%]'+addstr)
hdr.append('CnfInt[%]'+addstr)
val.append(str(res[nmet*2+4]))
val.append(str(0.0))
val.append(str(0.0))
hdr.append('Ta ')
hdr.append('CrRao[%]')
hdr.append('CnfInt[%]')
val.append(str(res[nmet*2+0]))
val.append(str(crao[nmet*2+0]))
val.append(str(conf[nmet*2+0]))
hdr.append('Tb ')
hdr.append('CrRao[%]')
hdr.append('CnfInt[%]')
val.append(str(res[nmet*2+1]))
val.append(str(crao[nmet*2+1]))
val.append(str(conf[nmet*2+1]))
hdr.append('Phase0 ')
hdr.append('CrRao[%]')
hdr.append('CnfInt[%]')
val.append(str(res[nmet*2+2]))
val.append(str(crao[nmet*2+2]))
val.append(str(conf[nmet*2+2]))
hdr.append('Phase1 ')
hdr.append('CrRao[%]')
hdr.append('CnfInt[%]')
val.append(str(res[nmet*2+3]))
val.append(str(crao[nmet*2+3]))
val.append(str(conf[nmet*2+3]))
hdr.append('Linewidth ')
hdr.append('Max LW')
hdr.append('Min LW')
val.append(str(lw))
val.append(str(lwmax))
val.append(str(lwmin))
hdr.append('ChiSquare ')
val.append(str(stats[0]))
hdr.append('WtChiSquare ')
val.append(str(stats[1]))
hdr.append('Math Finite Error ')
matherr = str(stats[2] != 0)
val.append(str(matherr))
return val, hdr
def fit_results_in_table(self, voxel, lw=0.0, lwmin=0.0, lwmax=0.0,
nozeros=False, noppm=False, fixphase=False,
no_conf=False, places=5, pad=2,
short_form=False, format_float=False):
"""
Given a voxel, linewidth params, and a data source (often a filename),
returns formatted results for that voxel.
"""
fit = self.blocks["fit"]
# First we assemble the data | |
<reponame>asanchezyali/optrees<gh_stars>0
from __future__ import annotations
import numpy as np
class Vertex:
def __init__(self, label: str):
self.__label = label
self.__neighbors = dict()
self.__edges = dict()
self.__loops = dict()
def __del__(self):
"""
Delete the vertex.
"""
print(f'Vertex {self.label} is deleted.')
@property
def label(self) -> str:
"""
Returns the label of the vertex.
"""
return self.__label
@property
def neighbors(self) -> dict:
"""
Return the neighbors of the vertex.
"""
return self.__neighbors
def add_neighbor(self, vertex: Vertex, weight: float = None):
"""
Add a neighbor to the vertex.
"""
if self is not vertex:
self.__neighbors[vertex.label] = vertex
edge = Edge(label='{}-{}'.format(self.label, vertex.label), lvertex=self, rvertex=vertex)
edge.weight = weight
self.add_edge(edge)
else:
raise ValueError('It is the same vertex.')
def delete_neighbor(self, vertex_label: str):
"""
Delete a neighbor from the vertex.
"""
try:
del self.__neighbors[vertex_label]
for edge in self.__edges.values():
if edge.is_vertex(vertex_label):
del self.__edges[edge.label]
del self.__loops[edge.label]
except KeyError:
raise KeyError(f'Vertex {vertex_label} does not exist.')
def neighbor(self, label: str) -> Vertex:
"""
Return the neighbor vertex of the vertex.
"""
return self.neighbors.get(label)
def remove_edge(self, edge: Edge):
"""
Remove an edge from the vertex.
"""
if edge.label in self.__edges.keys():
del self.__edges[edge.label]
else:
raise ValueError('This edge does not exist.')
def get_weight(self, vertex: Vertex) -> float:
"""
Return the weight of the edge.
"""
return self.__edges.get(vertex.label).weight
@property
def edges(self) -> dict:
"""
Return the edges of the vertex.
"""
return self.__edges
def add_edge(self, edge: Edge):
"""
Add an edge to the vertex.
"""
if not edge.loop and edge.label not in self.__edges.keys() and edge not in self.__edges.values():
self.__edges[edge.label] = edge
elif edge.loop and edge.label not in self.__edges.keys() and edge not in self.__edges.values():
self.__loops[edge.label] = edge
self.__edges[edge.label] = edge
elif edge.loop and edge.label in self.__edges.keys() and edge in self.__edges.values():
pass
else:
raise ValueError('This edge or loop already exists')
def edge(self, label: str) -> str:
"""
Return the edge of the vertex.
"""
return self.edges.get(label)
@property
def loops(self) -> dict:
"""
Return the loops of the vertex.
"""
return self.__loops
def loop(self, label: str) -> str:
"""
Return the loop of the vertex.
"""
return self.loops.get(label)
def is_isolated(self) -> bool:
"""
Return True if the vertex is isolated.
"""
return len(self.neighbors) == 0
class Edge:
def __init__(self, label: str, lvertex: Vertex, rvertex: Vertex, weight: float = None, orientation: str = None):
orientations = {'lr': {'start': lvertex, 'end': rvertex}, 'rl': {'start': rvertex, 'end': lvertex}}
self.__label = label
self.__lvertex = lvertex
self.__rvertex = rvertex
self.__weight = weight
self.__start = None if orientation not in ['lr', 'rl'] else orientations.get(orientation).get('start')
self.__end = None if orientation not in ['lr', 'rl'] else orientations.get(orientation).get('end')
self.__loop = True if lvertex == rvertex else False
lvertex.add_edge(self)
rvertex.add_edge(self)
def __del__(self):
"""
Delete the edge.
"""
print(f'Edge {self.label} is deleted.')
@property
def label(self) -> str:
"""
Return the label of the edge.
"""
return self.__label
@property
def lvertex(self) -> Vertex:
"""
Return the left vertex of the edge.
"""
return self.__lvertex
@property
def rvertex(self) -> Vertex:
"""
Return the right vertex of the edge.
"""
return self.__rvertex
@property
def weight(self) -> float:
"""
Return the weight of the edge.
"""
return self.__weight
@weight.setter
def weight(self, weight: float):
"""
Set the weight of the edge.
"""
self.__weight = weight
@property
def start(self) -> Vertex:
"""
Return the start vertex of the edge.
"""
return self.__start
@property
def end(self) -> Vertex:
"""
Return the end vertex of the edge.
"""
return self.__end
@property
def loop(self) -> bool:
"""
Return True if the edge is a loop.
"""
return self.__loop
def is_vertex(self, vertex_label: str) -> bool:
"""
Return True if the vertex is the start or end vertex of the edge.
"""
return self.lvertex.label == vertex_label or self.rvertex.label == vertex_label
class Graph:
def __init__(self, name: str):
self.__name = name
self.__vertices = dict()
self.__edges = dict()
@property
def name(self) -> str:
"""
Return the name of the graph.
"""
return self.__name
@property
def vertices(self) -> dict:
"""
Return the vertices of the graph.
"""
return self.__vertices
@property
def edges(self) -> dict:
"""
Return the edges of the graph.
"""
return self.__edges
def add_edge(self, edge: Edge):
"""
Add an edge to the graph.
"""
if edge.label not in self.__edges.keys():
self.__edges[edge.label] = edge
self.__vertices[edge.lvertex.label] = edge.lvertex
self.__vertices[edge.rvertex.label] = edge.rvertex
else:
raise ValueError('This edge already exists')
def add_vertex(self, vertex: Vertex):
"""
Add a vertex to the graph.
"""
self.__vertices[vertex.label] = vertex
def add_edges(self, edges: list[Edge]):
"""
Add a list of edges to the graph.
"""
for edge in edges:
self.add_edge(edge)
def build(self, edges: list[(str, str, str, float, str)]):
"""
Builds a graph from a list of edges.
"""
label, lvertex, rvertex, weight, orientation = range(5) # ids of the elements in the list
for edge in edges:
if edge[label] not in self.__edges.keys():
left_vertex = Vertex(edge[lvertex])
right_vertex = Vertex(edge[rvertex])
edge = Edge(
label=edge[label],
lvertex=left_vertex,
rvertex=right_vertex,
weight=edge[weight] if len(edge) > 3 else None,
orientation=edge[orientation] if len(edge) > 4 else None
)
self.add_edge(edge)
self.add_vertex(left_vertex)
self.add_vertex(right_vertex)
else:
raise ValueError('This edge already exists')
def is_vertex(self, vertex_label: str) -> bool:
"""
Return True if the vertex is in the graph.
"""
return vertex_label in self.__vertices.keys()
def is_edge(self, edge_label: str) -> bool:
"""
Return True if the edge is in the graph.
"""
return edge_label in self.__edges.keys()
def get_vertex(self, vertex_label: str) -> Vertex:
"""
Return the vertex of the graph.
"""
return self.__vertices.get(vertex_label)
def get_edge(self, edge_label: str) -> Edge:
"""
Return the edge of the graph.
"""
return self.__edges.get(edge_label)
def remove_vertex(self, vertex_label: str):
"""
Remove the vertex from the graph.
"""
if self.is_vertex(vertex_label):
del self.__vertices[vertex_label]
for edge in self.__edges.values():
if edge.is_vertex(vertex_label):
del self.__edges[edge.label]
edge.lvertex.remove_edge(edge)
edge.rvertex.remove_edge(edge)
break
else:
raise ValueError('This vertex does not exist')
def remove_edge(self, edge_label: str):
"""
Remove the edge from the graph.
"""
if self.is_edge(edge_label):
del self.__edges[edge_label]
for edge in self.__edges.values():
if edge.label == edge_label:
edge.lvertex.remove_edge(edge)
edge.rvertex.remove_edge(edge)
break
else:
raise ValueError('This edge does not exist')
def get_isolate_vertices(self) -> list[Vertex]:
"""
Return the list of vertices that are isolated.
"""
return [vertex for vertex in self.__vertices.values() if vertex.is_isolated()]
def adjacency_matrix(self) -> tuple:
"""
Return the adjacency matrix of the graph.
"""
vertices = self.__vertices.values()
size = len(vertices)
matrix = np.zeros((size, size))
for i, row_vertex in enumerate(vertices):
for j, col_vertex in enumerate(vertices):
if col_vertex.id in row_vertex.neighbors():
matrix[i, j] = 1
matrix[j, i] = 1
else:
matrix[i, j] = 0
matrix[j, i] = 0
indices = self.vertices.keys()
return matrix, list(indices)
def weight_matrix(self) -> tuple:
"""
Return the weight matrix of the graph.
"""
vertices = self.vertices.values()
matrix = list()
for row_vertex in vertices:
row = list()
for col_vertex in vertices:
if col_vertex.label in row_vertex.neighbors():
row.append(row_vertex.get_weight(col_vertex.label))
else:
row.append(np.inf)
matrix.append(row)
indices = self.vertices.keys()
return np.matrix(matrix), list(indices)
class GraphReader:
"""
A class for reading a graph from a file.
"""
def __init__(self):
self.__graph = Graph()
def read(self, file_path: str):
"""
Reads a graph from a file.
"""
with open(file_path, 'r') as file:
lines = file.readlines()
edges = list()
for line in lines:
edge = line.split()
edges.append((edge[0], edge[1], edge[2], float(edge[3]), edge[4]))
self.__graph.build(edges)
def get_graph(self) -> Graph:
"""
Return the graph.
"""
return self.__graph
class GraphWriter:
"""
A class for writing a graph to a file.
"""
def __init__(self, graph: Graph):
self.__graph = graph
def write(self, file_path: str):
"""
Writes a graph to a file.
"""
with open(file_path, 'w') as file:
for edge in self.__graph.edges:
file.write(edge.label + ' ' + edge.lvertex.label + ' ' + edge.rvertex.label + ' ' + str(edge.weight) + ' ' + edge.orientation + '\n')
def get_graph(self) -> Graph:
"""
Return the graph.
"""
return self.__graph
class GraphUtils:
"""
A class for performing graph operations.
"""
@staticmethod
def read_graph(file_path: str) -> Graph:
"""
Reads a graph from a file.
"""
reader = GraphReader()
reader.read(file_path)
return reader.get_graph()
@staticmethod
def write_graph(graph: Graph, file_path: str):
"""
Writes a graph to a file.
"""
writer = GraphWriter()
writer.write(file_path)
return writer.get_graph()
@staticmethod
def get_isolate_vertices(graph: Graph) -> list[Vertex]:
"""
Return the list of vertices that are isolated.
"""
return graph.get_isolate_vertices()
@staticmethod
def get_weight_matrix(graph: Graph) -> tuple:
"""
| |
<reponame>SyyBach/slybot
#!/usr/bin/python3
import discord
from discord.ext import commands
import asyncio
#from concurrent.futures import CancelledError
#from json import load as jsonloadfile
#from gc import collect as gc_collect
#from os import makedirs
#from os.path import exists as path_exists, dirname
from random import randrange
#from math import exp
import logging
loc_log = logging.getLogger('main.rpg')
import re
float_str="[+-]?(?:[0-9]*\.)?[0-9]+"
rollMod_str="(?:(?P<modifier>(?:<|>){1,2})(?P<inclusive>=)?\s*(?P<modValue>[+-]?(?:[0-9]*\.)?[0-9]+))?"
# +/- space number space d space number
diceParser=re.compile("\s*(?P<operation>[+-])?\s*(?:(?P<diceNumber>[0-9]*)\s*(?P<isDice>d)\s*)?(?P<value>[0-9]+)",re.I)
diceExp=re.compile("(?P<diceExp>(?:\s*[+-]?\s*(?:[0-9]*\s*d\s*)?[0-9]+)+)\s*"+rollMod_str+"\s*",re.I)
inlineTester=re.compile("(?<![a-z])"+"(?P<diceExp>(?:\s*[+-]?\s*(?:[0-9]*\s*d\s*)?[0-9]+)*\s*"+"[+-]?\s*[0-9]*\s*d\s*[0-9]+"+"(?:\s*[+-]?\s*(?:[0-9]*\s*d\s*)?[0-9]+)*)\s*"+rollMod_str,re.I)
#This guy should be put in some separate file, it is currently a copy-paste of chat.CooldownManager
class CooldownManager:
"""Ideally, this guy should be either a singleton, or a static instance in the context it gets instanciated."""
#TODO use "secure" centralization of clear/set methods? Current implementation bypasses the need to go through _dict
_dict={} #only countermeasure to messing around with stuff if singleton-ness not respected
dict_lock=asyncio.Lock()
timeout = 5
def _cd_exists(self,resource_key,caller_key=None):
"""Internal stuff to check on existence of keys. Read only, so no lock logic necessary."""
return CooldownManager._dict.get(caller_key) and CooldownManager._dict[caller_key].get(resource_key)
def _get_cd(self,resource_key,caller_key=None):
"""Internal stuff without fail checks."""
return CooldownManager._dict[caller_key][resource_key]
@asyncio.coroutine
def _prepare_key(self,resource_key,caller_key=None):
"""...internal stuff, didn't you guess? Ensures existence of the CD items, aka the event object.
Returns False if the asyncio.Event object could not be created, including if it already existed."""
yield from CooldownManager.dict_lock.acquire()
#need to invert the logic on asyncio.Event's flag... We want people to wait/block when our "custom flag" is set
d=CooldownManager._dict.get( caller_key )
if not d:
cd = asyncio.Event()
cd.set()
CooldownManager._dict[ caller_key ] = { resource_key : cd }
CooldownManager.dict_lock.release()
return True
resource=d.get( resource_key )
if not resource:
cd = asyncio.Event()
cd.set()
d[ resource_key ] = cd
CooldownManager.dict_lock.release()
return True
CooldownManager.dict_lock.release()
return False #The cd already exists
<EMAIL>
def is_on_cd(self,resource_key,caller_key=None):
"""Returns true only if the specific <resource_key>,<caller_key> was asked to be put on cooldown, and timer didn't complete."""
#If the specified timer doesn't exist, raise a CooldownManager.CooldownKeyError"""
if not self._cd_exists(resource_key,caller_key):
#raise CooldownManager.CooldownKeyError(resource_key,caller_key)
return False
#need to invert the logic on asyncio.Event's flag... We want people to wait/block when our "custom flag" is set
return not self._get_cd( resource_key,caller_key ).is_set()
@asyncio.coroutine
def put_on_cd(self, duration, resource_key, caller_key=None):
"""Starts a cooldown countdown for <duration> seconds. Cooldown can be checked on via Boolean only through
the .is_on_cd command. Each cooldown is identified by a <resource_key>, and an optional <caller_key> may
be provided if it is necessary to use several cooldowns on the same resource.
If <caller_key> is <None>, the cooldown will be put in the common pool of cooldowns.
If the cooldown specified by the <resource_key>,<caller_key> pair is already on cooldown,
this method will raise a CooldownManager.OnCooldownError"""
#make sure the CD exists
if not self._cd_exists(resource_key,caller_key):
try:
#if a timeout occurs here, either pi is overloaded or there was a bug in the programmation of the lock's logic
tmp = yield from asyncio.wait_for( self._prepare_key(resource_key,caller_key), CooldownManager.timeout )
if not tmp:
#key preparation failed
raise CooldownManager.KeyCreationError( resource_key,caller_key )
except:
raise
#if already on CD, raise an error
if self.is_on_cd(resource_key,caller_key):
raise CooldownManager.OnCooldownError( resource_key,caller_key )
#at this point, the key should be valid and point to an asyncio.Event object that is SET
#reminder: need to invert the logic on asyncio.Event's flag... We want people to wait/block when our "custom flag" is set
asyncio.async( self.cd_task( duration, self._get_cd(resource_key,caller_key) ) ) #delegate the wait task
return
@asyncio.coroutine
def cd_task(self, duration, cd):
yield from CooldownManager.dict_lock.acquire()
cd.clear() #clear flag to make people wait
CooldownManager.dict_lock.release()
yield from asyncio.sleep(duration) #sleep for given duration
yield from CooldownManager.dict_lock.acquire()
cd.set() #set flag back to unblock people
CooldownManager.dict_lock.release()
return
class OnCooldownError(Exception):
def __init__(self, resource_key, caller_key=None):
msg="The specified resource (resource_key={}, caller_key={}) is already on cooldown!".format(resource_key,caller_key)
super().__init__(self,msg,resource_key,caller_key)
#class CooldownKeyError(Exception):
# def __init__(self, resource_key, caller_key=None):
# msg="The specified cooldown (resource_key={}, caller_key={}) does not exist!".format(resource_key,caller_key)
# super().__init__(self,msg,resource_key,caller_key)
class KeyCreationError(Exception):
def __init__(self, resource_key, caller_key=None):
msg="The cooldown creation for (resource_key={}, caller_key={}) failed! (You're in trouble)".format(resource_key,caller_key)
super().__init__(self,msg,resource_key,caller_key)
def diceRoll(maxInt):
return randrange(1,maxInt+1)
def parseAndRollDice(expression):
#init
res=0
minVal=0
maxVal=0
#main loop
for match in diceParser.finditer(expression):
#retrieve relevant dice info
value = int( match.group("value") )
#check if is a dice or constant
if match.group("isDice"):
tmp = match.group("diceNumber")
if tmp:
diceNumber = int( tmp )
else:
diceNumber = 1
#compute dice values
tmp = 0
for x in range( diceNumber ):
tmp += diceRoll( value )
else:
#trickery to not change what is below
tmp = value
diceNumber=value
value=1
#perform add/substract operation
logMsg="parseAndRollDice: Performed a series of roll for '{}' which resulted in "
logMsg.format( match.group() )
if match.group("operation") == "-": #if "operation" didn't produce a match, default to 'add'
res -= tmp
minVal -= diceNumber*value
maxVal -= diceNumber
logMsg+="res-={}, minVal-={}, maxVal-={}.".format( tmp, (diceNumber*value), diceNumber )
else:
res += tmp
minVal += diceNumber
maxVal += diceNumber*value
logMsg+="res+={}, minVal+={}, maxVal+={}.".format( tmp, diceNumber, (diceNumber*value) )
#logMsg
loc_log.debug( logMsg )
return res,minVal,maxVal
def compareThresh(value,threshold,mode,inclusive, *, default=False):
"""Returns a boolean only if value satisfies the threshold test. In case of failure of any sort, returns the default value (which defaults to 'False').
Accepted mode values are '<', '>', '<=' and '>='."""
#normalizing input
if inclusive:
if mode == ">":
mode = ">="
elif mode == "<":
mode = "<="
if mode == "<": return value < threshold
elif mode == ">": return value > threshold
elif mode == "<=": return value <= threshold
elif mode == ">=": return value >= threshold
return default
def thresholdRoll(expression,threshold,mode, *, inclusive=False):
"""Performs rolls according to 'expression' and tests if the resulting value passes the threshold test. Returns a tuple (success,value)."""
val,minVal,maxVal = parseAndRollDice(expression)
success = compareThresh(val,threshold,mode,inclusive)
return success,val
def successRoll(expression,threshold,mode, *, inclusive=False):
"""Counts how many rolls are below/above threshold value, and returns a tuple of (successCount,tries) .
All dice expression should be of the "+" kind. If any "-" expression or constant is present, returns None.
If mode is not '<<' or '>>', returns None."""
#mode check
if mode == "<<": mode = "<"
elif mode == ">>": mode = ">"
else:
loc_log.debug("successRoll: Invalid mode '{}', returning None.".format(mode))
return None
#init
count=0
tries=0
#main loop
for match in diceParser.finditer(expression):
#check if valid expression for counting
if not match.group("isDice") or match.group("operation") == "-":
loc_log.debug("successRoll: Invalid dice expression '{}', returning None.".format(match.group()) )
return None
#retrieve relevant dice info
value = int( match.group("value") )
tmp = match.group("diceNumber")
if tmp:
diceNumber = int( tmp )
else:
diceNumber = 1
#roll dice
for x in range( diceNumber ):
tmp = diceRoll( value )
if compareThresh( tmp, threshold, mode, inclusive):
count += 1
tries+=diceNumber
#perform logging
logMsg="successRoll: Performed a series of roll for '{}', current status is "
logMsg.format( match.group() )
logMsg+="count={}, tries={}.".format( count, tries )
loc_log.debug( logMsg )
return count, tries
def diceExpressionRoll(expression):
"""Filters out invalid expression, and delegate to the appropriate rolling function depending on detected mode.
See those functions for possible return Values. Should always returns 'None' in case of failure."""
loc_log.debug("diceExpressionRoll: Invoked with argument '{}'.".format(expression) )
m=diceExp.fullmatch(expression)
if not m:
loc_log.debug("diceExpressionRoll: Invalid expression, returning None.")
return None
mod = m.group("modifier")
if mod:
inclusive = True if m.group("inclusive") else False #force conversion to bool
threshold = float(m.group("modValue"))
if len(mod) == 2: #counting mode
loc_log.debug("diceExpressionRoll: Delegating to successRoll.")
return successRoll( m.group("diceExp"), threshold, mod, inclusive=inclusive)
if len(mod) == 1: #threshold mode
loc_log.debug("diceExpressionRoll: Delegating to thresholdRoll.")
return thresholdRoll( m.group("diceExp"), threshold, mod, inclusive=inclusive)
else: #default mode
loc_log.debug("diceExpressionRoll: Delegating to parseAndRollDice (default mode).")
return parseAndRollDice( m.group("diceExp") )
def _internalInlineRollChecker(inline_expression):
"""Function to extract the appropriate dice expression (modifier included) from an inline string"""
m=inlineTester.search(inline_expression) #returns FIRST match
if m :
loc_log.debug("_inernalInlineRollChecker: Received an inline roll\n")
return diceExpressionRoll( m.group() )
else:
return None
def makeRollAnswerStr( roll_res, mention_str ):
"""Formats an answer string depending on the roll result. If provided with an invalid roll result, returns 'None'."""
answer = None
if roll_res == None:
answer = "Invalid dice expression !"
elif len(roll_res)==2: #either threshold or success roll
res,aux = roll_res
if isinstance(res,bool): #threshold roll
#care, bool apparently extand from int in python
if res:
answer = "{} succeeded ! (Roll value was: `{}`)".format(mention_str,aux)
else:
answer = "{} failed ! (Roll value was: `{}`)".format(mention_str,aux)
elif isinstance(res,int): #success roll
answer = "{} succeeded `{}` times ! (Number of attempts: `{}`)".format(mention_str,res,aux)
elif len(roll_res)==3: #default roll
res,minVal,maxVal = roll_res
answer | |
= ctx.clean_prefix
correct_usage = f"{p}automod whitelist add/remove @role/#channel"
example = f"{p}automod whitelist add @boosters"
you_idiot = error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Correct Usage: `{correct_usage}`\nExample: `{example}`"
)
if choice is None or setting is None:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=you_idiot)
final = await am_whitelist_func(ctx, choice, setting)
if final:
return await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Success!",
f"Users {'with' if isinstance(setting, discord.Role) else 'in'} {setting.mention} will {'no longer' if choice else 'now'} trigger automod."
))
else:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Failure!",
f"{setting.mention} is {'already' if choice else 'not'} a whitelisted {'role' if isinstance(setting, discord.Role) else 'channel'}."
))
@commands.command(help="Configure Mikuni antialts system.", aliases=['antiraid', 'antialt'])
@commands.cooldown(3, 30, commands.BucketType.user)
@commands.bot_has_guild_permissions(administrator=True)
@commands.has_permissions(administrator=True)
async def antialts(self, ctx, config=None, setting: Union[discord.TextChannel, discord.Role, int, str] = None):
# yes i know this is messy
# and i dont care
prefix = ctx.clean_prefix
g = await self.client.get_guild_config(ctx.guild.id)
aa = g['antialts']
enabled = False if not aa else True
info_embed = success_embed(
f"{BADGE_EMOJIS['bot_mod']} Alt protection",
f"""
Alt protection is current **{'Enabled' if enabled else 'Disabled'}**.
**Level:** `{'0' if not enabled else aa['level']}`
**Log channel:** {'None' if not enabled else '<#'+str(aa['log_channel'])+'>'}
**Minimum account age:** {'None' if not enabled else format_timespan(aa['min_account_age']*24*60*60)}
**Restricted Role:** {'None' if not enabled else '<@&'+str(aa['restricted_role'])+'>'}
"""
).add_field(
name="🔹 Level 01",
value="The bot will restrict the suspect from sending messages in the server and log their info.",
inline=True
).add_field(
name="💎 Level 02",
value="The bot will kick the suspect and log their info, they will be banned if they try to join again.",
inline=True
).add_field(
name="<a:diamond:862594390256910367> Level 03",
value="The bot will ban the suspect and log their info.",
inline=True
).add_field(
name="Commands:",
value=f"""
- `{prefix}antialt enable/disable` - To enable/disable alt protection.
- `{prefix}antialt minage <time>` - To set the minimum age.
- `{prefix}antialt level <number>` - To change the protection level.
- `{prefix}antialt channel #channel` - To change the log channel.
- `{prefix}antialt role @role` - To change the restricted role.
- `{prefix}kickalts` - Kicks all the users with the restricted role.
- `{prefix}banalts` - Bans all the users with the restricted role.
- `{prefix}grantaccess` - Grants server access to a restricted user.
"""
)
if config is None:
return await ctx.reply(embed=info_embed)
if config.lower() == 'enable':
if enabled:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Already enabled.", "Alt protection is already enabled."))
log_channel = None
min_account_age = None
restricted_role = None
view = AntiAltsSelectionView(context=ctx)
msg = await ctx.reply(f"""
**Antialts setup**
- {EMOJIS['idle']} Level.
- {EMOJIS['dnd']} Log channel.
- {EMOJIS['dnd']} Minimum account age.
- {EMOJIS['dnd']} Restricted role.
Please select a protection level.
""", view=view)
await view.wait()
if view.cancelled:
return await msg.edit(
content="",
embed=discord.Embed(title=f"{EMOJIS['tick_no']} Cancelled", color=RED_COLOR),
view=None
)
await msg.edit(f"""
**Antialts setup**
- {EMOJIS['online']} Level: `{view.level}`
- {EMOJIS['idle']} Log channel.
- {EMOJIS['dnd']} Minimum account age.
- {EMOJIS['dnd']} Restricted role.
Please enter a log channel.
Type `create` to automatically create a channel.
Type `cancel` to cancel the command.
""", view=None)
m = await wait_for_msg(ctx, 60, msg)
if m == 'pain':
return
if m.content.lower() == 'create':
overwrites = {
ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False),
ctx.guild.me: discord.PermissionOverwrite(read_messages=True)
}
created_channel = await ctx.guild.create_text_channel('alt-logs', overwrites=overwrites)
log_channel = created_channel.id
else:
try:
lul_channel = await commands.TextChannelConverter().convert(ctx=ctx, argument=m.content)
log_channel = lul_channel.id
except commands.ChannelNotFound:
return await msg.reply(content="", embed=error_embed(
f"{EMOJIS['tick_no']} Not found!",
f"I wasn't able to find the channel {m.content}, please try again."
), view=None)
await msg.edit(f"""
**Antialts setup**
- {EMOJIS['online']} Level: `{view.level}`
- {EMOJIS['online']} Log channel: <#{log_channel}>
- {EMOJIS['idle']} Minimum account age.
- {EMOJIS['dnd']} Restricted role.
Please enter the minimum account age requirement (in days).
Type `none` to have the default value (7 days).
Type `cancel` to cancel the setup.
""", view=None)
m = await wait_for_msg(ctx, 60, msg)
if m == 'pain':
return
try:
if m.content.lower() != 'none':
temp_acc_age = int(m.content)
if temp_acc_age <= 0:
return await msg.edit(content="", embed=error_embed(
f"{EMOJIS['tick_no']} Positive values only!",
"Account age can only be a positive number."
))
min_account_age = temp_acc_age
else:
min_account_age = 7
except Exception:
return await msg.edit(content="", embed=error_embed(
f"{EMOJIS['tick_no']} Integer values only!",
"Please enter an integer next time."
))
await msg.edit(f"""
**Antialts setup**
- {EMOJIS['online']} Level: `{view.level}`
- {EMOJIS['online']} Log channel: <#{log_channel}>
- {EMOJIS['online']} Minimum account age: {min_account_age} days.
- {EMOJIS['idle']} Restricted role.
Please enter a restricted role.
Type `create` to create one automatically.
Type `cancel` to cancel the setup.
""")
m = await wait_for_msg(ctx, 60, msg)
if m == 'pain':
return
if m.content.lower() != 'create':
try:
r_role = await commands.RoleConverter().convert(ctx=ctx, argument=m.content)
except Exception:
return await msg.edit(content="", embed=error_embed(
f"{EMOJIS['tick_no']} Not found!",
f"I wasn't able to find the role {m.content}\nPlease re-run the command."
))
restricted_role = r_role.id
else:
await msg.edit(f"Creating the role, this may take a while... {EMOJIS['loading']}")
r_role = await ctx.guild.create_role(name="Restricted", color=0x818386)
for channel in ctx.guild.channels:
try:
await channel.set_permissions(
r_role,
speak=False,
send_messages=False,
add_reactions=False
)
except Exception as e:
print(e)
restricted_role = r_role.id
await msg.edit(f"""
**Setup complete**
Here are you settings:
- {EMOJIS['online']} Level: `{view.level}`
- {EMOJIS['online']} Log channel: <#{log_channel}>
- {EMOJIS['online']} Minimum account age: {min_account_age} days.
- {EMOJIS['online']} Restricted role: <@&{restricted_role}>
""")
g.update({"antialts": {
"level": int(view.level),
"log_channel": log_channel,
"min_account_age": min_account_age,
"restricted_role": restricted_role
}})
return
if config.lower() == 'disable':
if not enabled:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Already disabled.", "Alt protection is already disabled."))
g.update({"antialts": False})
return await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Disabled",
"Alt protection has now been disabled."
))
if config.lower() == 'minage':
if not enabled:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Not enabled.", f"Please enable alt protection system first.\nUsage: `{prefix}antialts enable`"))
if config is None:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Invalid Usage", f"Please use `{prefix}antialts minage <number>`"))
if not isinstance(setting, int):
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Intergers only!", "The minimum age number should be an integer only!"))
if setting <= 0:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Positive integers only!", "The minimum account age number can only be positive."))
aa.update({"min_account_age": setting})
return await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Updated!",
f"The minimum account age has been updated to `{setting}` day(s)."
))
if config.lower() == 'level':
if not enabled:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Not enabled.", f"Please enable alt protection system first.\nUsage: `{prefix}antialts enable`"))
if config is None:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Invalid Usage", f"Please use `{prefix}antialts level <number>`"))
if not isinstance(setting, int):
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Intergers only!", "The level number should be an integer between 1 and 3 only!"))
if not 1 <= setting <= 3:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Invalid level value!", "The level number should be between and 1 and 3 only!"))
aa.update({"level": setting})
return await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Updated!",
f"The alt protection level has been updated to level `{setting}`"
))
if config.lower() == 'channel':
if not enabled:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Not enabled.", f"Please enable alt protection system first.\nUsage: `{prefix}antialts enable`"))
if config is None:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Invalid Usage", f"Please use `{prefix}antialts channel #channel`"))
if not isinstance(setting, discord.TextChannel):
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Not found!", f"I wasn't able to find channel {setting}, please try again."))
aa.update({"log_channel": setting.id})
return await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Updated!",
f"The log channel has been updated to {setting.mention}"
))
if config.lower() == 'role':
if not enabled:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Not enabled.", f"Please enable alt protection system first.\nUsage: `{prefix}antialts enable`"))
if config is None:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Invalid Usage", f"Please use `{prefix}antialts role @role`"))
if not isinstance(setting, discord.Role):
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Not found!", f"I wasn't able to find the role {setting}, please try again."))
aa.update({"restricted_role": setting.id})
return await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Updated!",
f"The restricted role has been updated to {setting.mention}"
))
else:
return await ctx.reply(embed=info_embed)
@commands.command(help="Kick all resitricted users by alt protection system.", aliases=['kickrestricted'])
@commands.has_permissions(kick_members=True)
@commands.bot_has_permissions(kick_members=True)
@commands.cooldown(3, 30, commands.BucketType.user)
async def kickalts(self, ctx: commands.Context):
prefix = ctx.clean_prefix
g = await self.client.get_guild_config(ctx.guild.id)
aa = g['antialts']
enabled = False if not aa else True
if not enabled:
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Alt protection not enabled!",
f"You can only use this command if alt protection is enabled.\nPlease use `{prefix}antialts enable` to enable it."
))
role = ctx.guild.get_role(aa['restricted_role'])
if role is None:
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Restricted role not found!",
"Looks like the restricted role has been deleted."
))
m = await ctx.reply(f"Working on it... {EMOJIS['loading']}")
kids = role.members
if len(kids) == 0:
return await m.edit(content="", embed=error_embed(
f"{EMOJIS['tick_no']} No restricted users found!",
f"There are no users having the role {role.mention} in this server."
))
kicked_count = 0
for kid in kids:
try:
await kid.kick(reason=f"Action done by user: {ctx.author} ({ctx.author.id})")
kicked_count += 1
await asyncio.sleep(0.5)
except Exception:
pass
await m.edit(f"I have kicked `{kicked_count}` restricted users out of `{len(kids)}`.")
log_channel = self.client.get_channel(aa['log_channel'])
if log_channel is None:
return
await log_channel.send(embed=success_embed(
"Alts kicked!",
f"**{kicked_count}** alts | |
# encoding: utf-8
from __future__ import unicode_literals
class Commands(object):
def __init__(self, lang):
if lang == "cz":
self.add = "add"
self.answer = "answer"
self.ask = "ask"
self.clear = "clear"
self.comment = "comment"
self.delete = "delete"
self.faq = "faq"
self.help = "help"
self.joke = "joke"
self.list = "list"
self.media = "media"
self.rooms = "rooms"
elif lang == "en":
self.add = "add"
self.answer = "answer"
self.ask = "ask"
self.clear = "clear"
self.comment = "comment"
self.delete = "delete"
self.faq = "faq"
self.help = "help"
self.joke = "joke"
self.list = "list"
self.media = "media"
self.rooms = "rooms"
else:
raise LangError("ERROR: Unsupported language - {0}.".format(lang))
class Answers(object):
def __init__(self, lang):
self.languages = (
"cz",
"en"
)
self.mention = "<@personEmail:{0}> "
if lang == "cz":
self.yes = "ANO"
self.no = "NE"
self.time = "%d.%m. v %H:%M"
self.unauthorized = "Lituji, ale nejste autorizován k používání tohoto příkazu."
self.inactive = "Lituji, ale do této místnosti již nemohu nic přidávat."
self.fallback = "Promiňte, ale nerozumím.\n\nZkuste {0} pro seznam příkazů."
self.welcome_direct = (
"Dobrý den **{0}**, já jsem bot moderátor.\n\nNapište **{1}** pro seznam příkazů."
)
self.welcome_group = (
"Dobrý den, já jsem bot moderátor této místnosti.\n\nNapište **{0}** pro seznam příkazů.\n\n"
)
self.room_name = "Osobní místnost s {0}"
self.sql_error = "Ajaj, interní chyba. Administrátor upozorněn. Hned to opravím."
self.help_all = (
"Zdravím Vás, **{0}**, rád byste se zeptal či zasmál?\n\n"
"Seznam příkazů:\n"
"- **{1} <dotaz>** - pro položení dotazu\n"
"- **{2} <číslo dotazu> <komentář>** - pro komentování daného dotazu\n"
"- **{3} [číslo dotazu]** - pro seznam dotazů a odpovědí v této místnosti\n"
"- **{4}** - pro zobrazení této nápovědy\n"
"- **{5}** - pro vtip ze světa IT\n"
"- **{6}** - pro zobrazení prezentací a jiných dokumentů\n\n"
"Vysvětlivky:\n"
"- **<argument>** - povinný argument\n"
"- **[argument]** - volitelný argument\n\n"
"Příklady příkazů:\n"
"- **{1} _Proč jsme tady_?** - položí dotaz a upozorní moderátory diskuze\n"
"- **{3}** - zobrazí seznam všech FAQ v dané místnosti\n"
"- **{3} 1** - zobrazí specifické FAQ vlákno včetně odpovědi a komentářů\n"
"- **{2} 1 _Abychom trpěli._** - okomentuje první FAQ vlákno a upozorní moderátory diskuze\n"
"- **{6}** - zobrazí seznam všech přednášek a dalších užitečných odkazů\n"
)
self.help_admin = (
"Toto je moderátorská místnost, jste autorizováni k následujícím příkazům:\n"
"- **{0} <objekt> [id místnosti] <data>** - pro vložení objektu\n"
"- **{1} <id dotazu> <odpověď>** - pro odpověď na dotaz\n"
"- **{2} <id místnosti>** - pro smazání všech generovaných zpráv z místnosti\n"
"- **{3} <id dotazu> <komentář>** - pro komentování daného dotazu\n"
"- **{4} <objekt> <id objektu>** - pro smazání objektu\n"
"- **{5} [id dotazu]** - pro zobrazení dané otázky a odpovědí\n"
"- **{6}** - pro zobrazení této nápovědy\n"
"- **{7}** - pro vtip ze světa IT\n"
"- **{8} <objekt> [id místnosti]** - pro zobrazení všech objektů obecně či pro danou místnost\n"
"- **{9}** - pro zobrazení všech místností\n\n"
"Typy objektů:\n"
"- **{5}** - FAQ\n"
"- **{7}** - vtip\n"
"- **{10}** - prezentace a jiné dokumenty\n\n"
"Vysvětlivky:\n"
"- **<argument>** - povinný argument\n"
"- **[argument]** - volitelný argument\n\n"
"Příklady příkazů:\n"
"- **{0} {10} 1 _https://example.com Example presentation_** "
"- vloží odkaz na prezentaci a odešle notifikaci do místnosti 1\n"
"- **{0} {5} 2 _Proč jsme tak drazí?_** - manuálně přidá FAQ vlákno do místnosti 2\n"
"- **{8} {5}** - zobrazí seznam všech FAQ vláken napříč všemi místnostmi\n"
"- **{1} 4 _Poskytujeme řešení._** - zodpoví otázku FAQ vlákna s číslem 4\n"
"- **delete faq 1** - odstraní celé FAQ vlákno včetně odpovědi, komentářů a generovaných zpráv\n"
)
self.help_group = (
"**Mějte na paměti, že toto je skupinová místnost. Reaguji pouze, když jsem zmíněn pomocí '@', "
"např. <@<EMAIL>:{0}> {1}**\n\nNeváhejte mě však kontaktovat i privátně ve vlastní místnosti."
)
self.add_empty = "Nelze vložit prázdný objekt."
self.add_help = "Zadejte prosím správně argumenty příkazu, nápověda: **{0}{1} <objekt> <data>**"
self.answer_bad = "Nelze vložit prázdnou odpověď."
self.answer_empty = "Je mi líto, ale na tento dotaz zatím neznám odpověď.\n\n"
self.answer_help = (
"Zadejte prosím správně argumenty příkazu, nápověda: "
"**{0}{1} <id dotazu> <odpověď>**"
)
self.answer_success = "Odpověď úspěšně zaznamenána, upozornění uživateli **{0}** zasláno."
self.answer_group = (
"Uživatel {0} odpověděl na dotaz od <@personEmail:{1}>.\n\n"
"- **Otázka:** \"{2}\"\n- **Odpověď:** \"{3}\""
)
self.answer_personal = (
"Uživatel {0} odpověděl na Váš dotaz.\n\n- **Otázka:** \"{1}\"\n- **Odpověď:** \"{2}\""
)
self.answer_text = "Otázka:\n\n{0}\nOdpověď"
self.ask_empty = "Nelze vložit prázdný dotaz."
self.ask_help = (
"Pro vložení dotazu z moderátorské místnosti napište **{0}{1} {2} <id místnosti> Text dotazu**"
)
self.ask_success = "Dotaz úspěšně zaznamenán, budete upozorněn při odpovědi na Váš dotaz."
self.ask_posted = (
"Uživatel **{0}** právě položil dotaz:\n\n**\"{1}\"**\n\n"
"Pro odpověď napište **<@personEmail:{2}> {3} {4} Text odpovědi**"
)
self.clear_help = (
"Pro smazaní všech generovaných zpráv z dané místnosti napište "
"**{0}{1} <id místnosti>**"
)
self.clear_success = "Zprávy úspěšně smazány."
self.comment_empty = "Nelze vložit prázdný komentář."
self.comment_header = "Komentáře:\n\n"
self.comment_help = (
"Zadejte prosím správně argumenty příkazu, nápověda: "
"**{0}{1} <číslo dotazu> <comment>**"
)
self.comment_success = "Komentář úspěšně zaznamenán, budete upozorněn při dalších komentářích."
self.comment_group = (
"Uživatel **{0}** právě okomentoval dotaz od **{1}**.\n\n"
"- **Otázka:** \"{2}\"\n- **Komentář:** \"{3}\"\n\n"
"Pro další komentář napište **{4}{5} {6} Text komentáře**"
)
self.comment_personal = (
"Uživatel **{0}** právě okomentoval Váš dotaz.\n\n- **Otázka:** \"{1}\"\n- **Komentář:** \"{2}\"\n\n"
"Pro další komentář napište **{3} {4} Text komentáře**"
)
self.comment_moderator = (
"Uživatel **{0}** právě okomentoval dotaz od **{1}**.\n\n"
"- **Otázka:** \"{2}\"\n- **Komentář:** \"{3}\"\n\n"
"Pro další komentář napište **{4}{5} {6} Text komentáře**"
)
self.delete_help = (
"Zadejte prosím správně argumenty příkazu, nápověda: "
"**{0}{1} <objekt> <id objektu>**"
)
self.faq_bad = "Zadejte prosím platné číslo dotazu, pro seznam všech dotazů napište **{0}{1}**"
self.faq_deleted = "Vlákno dotazu včetně komentářů úspěšně smazáno."
self.faq_empty = "Je mi líto, ale nebyly zatím položeny žádné dotazy."
self.faq_line = "{0} od uživatele **{1}**: \"{2}\" - Zodpovězena: {3}\n"
self.faq_thread = "Otázka:\n\n{0}\nOdpověď:\n\n{1}\n"
self.joke_bad = "Zadejte prosím platné ID vtipu, pro seznam všech vtipů napište **{0}{1} {2}**"
self.joke_deleted = "Vtip úspěšně smazán."
self.joke_empty = "Je mi líto, ale zatím neznám žádné vtipy."
self.joke_success = "Vtip úspěšně uložen, snad se každý pobaví."
self.list_help = (
"Zadejte prosím správně argumenty příkazu, nápověda: "
"**{0}{1} <objekt> [id místnosti]**"
)
self.list_empty = "Je mi líto, ale nebyly nalezeny žádné objekty."
self.media_bad = "Zadejte prosím platné ID dokumentu, pro seznam všech dokumentů napište **{0}{1} {2}**"
self.media_deleted = "Dokument úspěšně smazán."
self.media_empty = "Je mi líto, ale zatím nebyly publikovány žádné prezentace či jiné dokumenty."
self.media_posted = "Uživatel **{0}** právě přidal nový dokument - **[{1}]({2})**"
self.media_success = "Dokument úspěšně zaznamenán, uživatelé budou upozorněni"
self.room_bad = "Zadejte prosím platné ID místnosti, pro seznam všech místností napište **{0}{1}**"
self.room_deleted = "Bot úspěšně odebrán z místnosti úspěšně."
self.room_direct = "Nelze smazat privátní místnost s uživatelem."
self.room_empty = "Je mi líto, ale v této místnosti nebyly zatím generovány žádné zprávy."
elif lang == "en":
self.yes = "YES"
self.no = "NO"
self.time = "%m/%d at %H:%M"
self.unauthorized = "I am sorry, but you are not authorized to use this command."
self.inactive = "I am sorry, but I cannot interact with this room anymore."
self.fallback = "I am sorry, I do not understand.\n\nTry {0} for list of commands."
self.welcome_direct = (
"Hello <@personEmail:{0}>, I am the room moderator bot.\n\nType **{1}** for list of commands."
)
self.welcome_group = "Hello, I am the room moderator bot.\n\nType **{0}** for list of commands.\n\n"
self.room_name = "Personal room with {0}"
self.sql_error = "Oops, internal error. Admin notified. I will fix it right away."
self.help_all = (
"Welcome, **{0}**, would you like to ask or laugh?\n\n"
"List of commands:\n"
"- **{1} <question>** - to ask a question\n"
"- **{2} <faq number> <comment>** - to comment the given FAQ thread\n"
"- **{3} [faq number]** - to list all FAQs or given FAQ thread\n"
"- **{4}** - to show this help\n"
"- | |
sequence
"""
if start < 0:
raise ValueError(f'cannot pad DNA with negative start, but start = {start}')
elif start >= length:
raise ValueError(f'cannot pad DNA with start >= length, but start = {start} and '
f'length = {length}')
if len(sequence) > length:
sequence = sequence[start:start + length]
elif len(sequence) < length:
prefix = DNA_base_wildcard * start
suffix = DNA_base_wildcard * (length - len(sequence) - start)
sequence = prefix + sequence + suffix
return sequence
def _string_merge_wildcard(s1: str, s2: str, wildcard: str) -> str:
"""Takes a "union" of two equal-length strings `s1` and `s2`.
Whenever one has a symbol `wildcard` and the other does not, the result has the non-wildcard symbol.
Raises :py:class:`ValueError` if `s1` and `s2` are not the same length or do not agree on non-wildcard
symbols at any position."""
if len(s1) != len(s2):
raise ValueError(f'\ns1={s1} and\ns2={s2}\nare not the same length.')
union_builder = []
for i in range(len(s1)):
c1, c2 = s1[i], s2[i]
if c1 == wildcard:
union_builder.append(c2)
elif c2 == wildcard:
union_builder.append(c1)
elif c1 != c2:
raise ValueError(f's1={s1} and s2={s2} have unequal symbols {c1} and {c2} at position {i}.')
elif c1 == c2:
union_builder.append(c1)
else:
raise AssertionError('should be unreachable')
return ''.join(union_builder)
class IllegalDNADesignError(ValueError):
"""Indicates that some aspect of the :any:`DNADesign` object is illegal."""
def __init__(self, the_cause: str):
self.cause = the_cause
# __str__ is to print() the value
def __str__(self):
return repr(self.cause)
class StrandError(IllegalDNADesignError):
"""Indicates that the :any:`DNADesign` is illegal due to some specific :any:`Strand`.
Information about the :any:`Strand` is embedded in the error message when this exception is
raised that helps to identify which :any:`Strand` caused the problem."""
def __init__(self, strand: Strand, the_cause: str):
first_domain = strand.first_bound_domain()
last_domain = strand.last_bound_domain()
msg = (f'{the_cause}\n'
f'strand length = {strand.dna_length()}\n'
f'DNA length = {len(strand.dna_sequence) if strand.dna_sequence else "N/A"}\n'
f'DNA sequence = {strand.dna_sequence}'
f"strand 5' helix = {first_domain.helix if first_domain else 'N/A'}\n"
f"strand 5' end offset = {first_domain.offset_5p() if first_domain else 'N/A'}\n"
f"strand 3' helix = {last_domain.helix if last_domain else 'N/A'}\n"
f"strand 3' end offset = {last_domain.offset_3p() if last_domain else 'N/A'}\n")
super().__init__(msg)
# super(IllegalDNADesignError, self).__init__(msg)
def _plates(idt_strands):
plates = set()
for strand in idt_strands:
if strand.idt is not None and strand.idt.plate is not None:
plates.add(strand.idt.plate)
return list(plates)
_96WELL_PLATE_ROWS: List[str] = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
_96WELL_PLATE_COLS: List[int] = list(range(1, 13))
_384WELL_PLATE_ROWS: List[str] = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
'O',
'P']
_384WELL_PLATE_COLS: List[int] = list(range(1, 25))
@enum.unique
class PlateType(int, enum.Enum):
"""Represents two different types of plates in which DNA sequences can be ordered."""
wells96 = 96
"""96-well plate."""
wells384 = 384
"""384-well plate."""
def rows(self) -> List[str]:
return _96WELL_PLATE_ROWS if self is PlateType.wells96 else _384WELL_PLATE_ROWS
def cols(self) -> List[int]:
return _96WELL_PLATE_COLS if self is PlateType.wells96 else _384WELL_PLATE_COLS
class _PlateCoordinate:
def __init__(self, plate_type: PlateType):
self._plate_type = plate_type
self._plate: int = 1
self._row_idx: int = 0
self._col_idx: int = 0
def increment(self):
self._row_idx += 1
if self._row_idx == len(self._plate_type.rows()):
self._row_idx = 0
self._col_idx += 1
if self._col_idx == len(self._plate_type.cols()):
self._col_idx = 0
self._plate += 1
def plate(self) -> int:
return self._plate
def row(self) -> str:
return self._plate_type.rows()[self._row_idx]
def col(self) -> int:
return self._plate_type.cols()[self._col_idx]
def well(self) -> str:
return f'{self.row()}{self.col()}'
def remove_helix_idxs_if_default(helices: List[Dict]):
# removes indices from each helix if they are the default (order of appearance in list)
default = True
for expected_idx, helix in enumerate(helices):
idx = helix[idx_on_helix_key]
if idx != expected_idx:
default = False
break
if default:
for helix in helices:
del helix[idx_on_helix_key]
def add_quotes(string: str) -> str:
# adds quotes around a string
return f'"{string}"'
def mandatory_field(ret_type: Type, json_map: dict, main_key: str, *legacy_keys: str):
# should be called from function whose return type is the type being constructed from JSON, e.g.,
# DNADesign or Strand, given by ret_type. This helps give a useful error message
for key in (main_key,) + legacy_keys:
if key in json_map:
return json_map[key]
ret_type_name = ret_type.__name__
msg_about_keys = f'the key "{main_key}"'
if len(legacy_keys) > 0:
msg_about_keys += f" (or any of the following legacy keys: {', '.join(map(add_quotes, legacy_keys))})"
msg = f'I was looking for {msg_about_keys} in the JSON encoding of a {ret_type_name}, ' \
f'but I did not find it.' \
f'\n\nThis occurred when reading this JSON object:\n{json_map}'
raise IllegalDNADesignError(msg)
def optional_field(default_value, json_map: dict, main_key: str, *legacy_keys: str):
# like dict.get, except that it checks for multiple keys
for key in (main_key,) + legacy_keys:
if key in json_map:
return json_map[key]
return default_value
@dataclass
class Geometry(_JSONSerializable):
"""Parameters controlling some geometric visualization/physical aspects of Design."""
rise_per_base_pair: float = 0.332
"""Distance in nanometers between two adjacent base pairs along the length of a DNA double helix."""
helix_radius: float = 1.0
"""Radius of a DNA helix in nanometers."""
bases_per_turn: float = 10.5
"""Number of DNA base pairs in a full turn of DNA."""
minor_groove_angle: float = 150.0
"""Minor groove angle in degrees."""
inter_helix_gap: float = 0.5
"""Gap between helices in nanometers (due to electrostatic repulsion; needed to display to scale)."""
def is_default(self):
return self == _default_geometry
@staticmethod
def from_json(json_map: dict) -> 'Geometry': # remove quotes when Python 3.6 support dropped
geometry = Geometry()
geometry.rise_per_base_pair = optional_field(_default_geometry.rise_per_base_pair, json_map,
rise_per_base_pair_key, *legacy_rise_per_base_pair_keys)
geometry.helix_radius = optional_field(_default_geometry.helix_radius, json_map, helix_radius_key)
geometry.bases_per_turn = optional_field(_default_geometry.bases_per_turn, json_map,
bases_per_turn_key)
geometry.minor_groove_angle = optional_field(_default_geometry.minor_groove_angle, json_map,
minor_groove_angle_key)
geometry.inter_helix_gap = optional_field(_default_geometry.inter_helix_gap, json_map,
inter_helix_gap_key)
return geometry
@staticmethod
def keys() -> List[str]:
return [rise_per_base_pair_key, helix_radius_key, bases_per_turn_key, minor_groove_angle_key,
inter_helix_gap_key]
def values(self) -> List[float]:
return [self.rise_per_base_pair, self.helix_radius, self.bases_per_turn, self.minor_groove_angle,
self.inter_helix_gap]
@staticmethod
def default_values() -> List[float]:
return _default_geometry.values()
def to_json_serializable(self, suppress_indent: bool = True):
dct = OrderedDict()
for name, val, val_default in zip(Geometry.keys(), self.values(), Geometry.default_values()):
if val != val_default:
dct[name] = val
return dct
_default_geometry = Geometry()
@dataclass
class DNADesign(_JSONSerializable):
"""Object representing the entire design of the DNA structure."""
strands: List[Strand]
"""All of the :any:`Strand`'s in this :any:`DNADesign`.
Required field."""
helices: Dict[int, Helix] = None
"""All of the :any:`Helix`'s in this :any:`DNADesign`.
This is a dictionary mapping index to the :any:`Helix` with that index; if helices have indices
0, 1, ..., num_helices-1, then this can be used as a list of Helices.
Optional field. If not specified, then the number of helices will be just large enough to store the
largest index :py:data:`Domain.helix`
stored in any :any:`Domain`
in :py:data:`DNADesign.strands`."""
grid: Grid = Grid.square
"""Common choices for how to arrange helices relative to each other.
Optional field."""
major_tick_distance: int = -1
"""Distance between major ticks (bold) delimiting boundaries between bases.
Optional field.
If not specified, default value is 8 unless overridden by :py:data:`DNADesign.grid`.
If 0 then no major ticks are drawn.
If negative then the default value is assumed, but `major_tick_distance` is not stored in the JSON file
when serialized.
If :any:`DNADesign.grid` = :any:`Grid.square` then the default value is 8.
If :any:`DNADesign.grid` = :any:`Grid.hex` or :any:`Grid.honeycomb` then the default value is 7."""
helices_view_order: List[int] = None
"""A list of the order in which the helix should be displayed in the main view of scadnano.
This list must be a permutation containing each integer 0, 1, 2, ..., len(helices)-1 exactly once.
Optional field. If not specified, it will be set to the identity permutation [0, ..., len(helices)-1].
"""
geometry: Geometry = field(default_factory=lambda: Geometry())
"""Controls some geometric/physical aspects of this :any:`DNADesign`."""
automatically_assign_color: bool = field(repr=False, default=True)
"""If `automatically_assign_color` = ``False``, then for any :any:`Strand` such that
`Strand.color` = ``None``, do not automatically assign a :any:`Color` to it.
In this case color will be set to its default of ``None`` and will not be
written to the JSON with :py:meth:`DNADesign.write_scadnano_file` or :py:meth:`DNADesign.to_json`."""
color_cycler: ColorCycler = field(default_factory=lambda: ColorCycler(), init=False)
def __init__(self, *,
helices: Optional[Union[List[Helix], Dict[int, Helix]]] = None,
strands: List[Strand] = None,
helix_template: Optional[Helix] = None,
num_helices: Optional[int] = None,
grid: Grid = Grid.none,
major_tick_distance: int = -1,
helices_view_order: List[int] = None,
geometry: Geometry = None):
"""
:param helices: List of :any:`Helix`'s; if missing, set based on either `helix_template` and
`num_helices`, or based on `strands`.
Mutually exlusive with `helix_template` and `num_helices`
:param strands: List of :any:`Strand`'s. If missing, will be empty.
:param helix_template: | |
# Essentials
import numpy
import pandas
import datetime
import random
import time
# Plots
import seaborn
import matplotlib.pyplot as plt
# Models
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor, BaggingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import Ridge, RidgeCV
from sklearn.linear_model import ElasticNet, ElasticNetCV
from sklearn.svm import SVR
from mlxtend.regressor import StackingCVRegressor
# import lightgbm as lgb
# from lightgbm import LGBMRegressor
from xgboost import XGBRegressor
# Stats
from scipy.stats import skew, norm
from scipy.special import boxcox1p
from scipy.stats import boxcox_normmax
# Misc
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import scale
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.decomposition import PCA
from utils import utilities
pandas.set_option('display.max_columns', None)
# Ignore useless warnings
import warnings
warnings.filterwarnings(action="ignore")
pandas.options.display.max_seq_items = 8000
pandas.options.display.max_rows = 8000
# Taken from: https://www.kaggle.com/lavanyashukla01/how-i-made-top-0-3-on-a-kaggle-competition
TRAIN_FILE = '/Users/fpena/Datasets/house-prices/train.csv'
TEST_FILE = '/Users/fpena/Datasets/house-prices/test.csv'
SAMPLE_SUBMISSION_FILE = '/Users/fpena/Datasets/house-prices/sample_submission.csv'
SUBMISSION_FILE_1 = '/Users/fpena/Datasets/house-prices/submission_regression1.csv'
SUBMISSION_FILE_2 = '/Users/fpena/Datasets/house-prices/submission_regression2.csv'
# start = time.time()
# Read in the dataset as a dataframe
def load_data():
train = pandas.read_csv(TRAIN_FILE)[:60]
test = pandas.read_csv(TEST_FILE)[:60]
return train, test
def rescale_sale_price(train):
train["SalePrice"] = numpy.log1p(train["SalePrice"])
return train
# Remove outliers
def remove_outliers(train):
train.drop(train[(train['OverallQual' ] <5) & (train['SalePrice' ] >200000)].index, inplace=True)
train.drop(train[(train['GrLivArea' ] >4500) & (train['SalePrice' ] <300000)].index, inplace=True)
train.reset_index(drop=True, inplace=True)
return train
def split_features(train, test):
# Split features and labels
train_labels = train['SalePrice'].reset_index(drop=True)
train_features = train.drop(['SalePrice'], axis=1)
test_features = test
# Combine train and test features in order to apply the feature transformation pipeline to the entire dataset
all_features = pandas.concat([train_features, test_features]).reset_index(drop=True)
# Some of the non-numeric predictors are stored as numbers; convert them into strings
all_features['MSSubClass'] = all_features['MSSubClass'].apply(str)
all_features['YrSold'] = all_features['YrSold'].astype(str)
all_features['MoSold'] = all_features['MoSold'].astype(str)
all_features = handle_missing(all_features)
# missing = percent_missing(all_features)
# df_miss = sorted(missing.items(), key=lambda x: x[1], reverse=True)
# print('Percent of missing data')
# df_miss[0:10]
# Let's make sure we handled all the missing values
# missing = percent_missing(all_features)
# df_miss = sorted(missing.items(), key=lambda x: x[1], reverse=True)
return all_features, train_labels
# determine the threshold for missing values
def percent_missing(df):
data = pandas.DataFrame(df)
df_cols = list(pandas.DataFrame(data))
dict_x = {}
for i in range(0, len(df_cols)):
dict_x.update({df_cols[i]: round(data[df_cols[i]].isnull().mean() * 100, 2)})
return dict_x
def handle_missing(features):
# the data description states that NA refers to typical ('Typ') values
features['Functional'] = features['Functional'].fillna('Typ')
# Replace the missing values in each of the columns below with their mode
features['Electrical'] = features['Electrical'].fillna("SBrkr")
features['KitchenQual'] = features['KitchenQual'].fillna("TA")
features['Exterior1st'] = features['Exterior1st'].fillna(features['Exterior1st'].mode()[0])
features['Exterior2nd'] = features['Exterior2nd'].fillna(features['Exterior2nd'].mode()[0])
features['SaleType'] = features['SaleType'].fillna(features['SaleType'].mode()[0])
features['MSZoning'] = features.groupby('MSSubClass')['MSZoning'].transform(lambda x: x.fillna(x.mode()[0]))
# the data description stats that NA refers to "No Pool"
features["PoolQC"] = features["PoolQC"].fillna("None")
# Replacing the missing values with 0, since no garage = no cars in garage
for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'):
features[col] = features[col].fillna(0)
# Replacing the missing values with None
for col in ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond']:
features[col] = features[col].fillna('None')
# NaN values for these categorical basement features, means there's no basement
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
features[col] = features[col].fillna('None')
# Group the by neighborhoods, and fill in missing value by the median LotFrontage of the neighborhood
features['LotFrontage'] = features.groupby('Neighborhood')['LotFrontage'].transform(lambda x: x.fillna(x.median()))
# We have no particular intuition around how to fill in the rest of the categorical features
# So we replace their missing values with None
objects = []
for i in features.columns:
if features[i].dtype == object:
objects.append(i)
features.update(features[objects].fillna('None'))
# And we do the same thing for numerical features, but this time with 0s
numeric_dtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
numeric = []
for i in features.columns:
if features[i].dtype in numeric_dtypes:
numeric.append(i)
features.update(features[numeric].fillna(0))
return features
def normalize_numeric_features(all_features):
# Fetch all numeric features
numeric_dtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
numeric = []
for i in all_features.columns:
if all_features[i].dtype in numeric_dtypes:
numeric.append(i)
# Find skewed numerical features
skew_features = all_features[numeric].apply(lambda x: skew(x)).sort_values(ascending=False)
high_skew = skew_features[skew_features > 0.5]
skew_index = high_skew.index
print("There are {} numerical features with Skew > 0.5 :".format(high_skew.shape[0]))
skewness = pandas.DataFrame({'Skew' :high_skew})
# Normalize skewed features
for i in skew_index:
all_features[i] = boxcox1p(all_features[i], boxcox_normmax(all_features[i] + 1))
all_features['BsmtFinType1_Unf'] = 1 * (all_features['BsmtFinType1'] == 'Unf')
all_features['HasWoodDeck'] = (all_features['WoodDeckSF'] == 0) * 1
all_features['HasOpenPorch'] = (all_features['OpenPorchSF'] == 0) * 1
all_features['HasEnclosedPorch'] = (all_features['EnclosedPorch'] == 0) * 1
all_features['Has3SsnPorch'] = (all_features['3SsnPorch'] == 0) * 1
all_features['HasScreenPorch'] = (all_features['ScreenPorch'] == 0) * 1
all_features['YearsSinceRemodel'] = all_features['YrSold'].astype(int) - all_features['YearRemodAdd'].astype(int)
all_features['Total_Home_Quality'] = all_features['OverallQual'] + all_features['OverallCond']
all_features = all_features.drop(['Utilities', 'Street', 'PoolQC', ], axis=1)
all_features['TotalSF'] = all_features['TotalBsmtSF'] + all_features['1stFlrSF'] + all_features['2ndFlrSF']
all_features['YrBltAndRemod'] = all_features['YearBuilt'] + all_features['YearRemodAdd']
all_features['Total_sqr_footage'] = (all_features['BsmtFinSF1'] + all_features['BsmtFinSF2'] +
all_features['1stFlrSF'] + all_features['2ndFlrSF'])
all_features['Total_Bathrooms'] = (all_features['FullBath'] + (0.5 * all_features['HalfBath']) +
all_features['BsmtFullBath'] + (0.5 * all_features['BsmtHalfBath']))
all_features['Total_porch_sf'] = (all_features['OpenPorchSF'] + all_features['3SsnPorch'] +
all_features['EnclosedPorch'] + all_features['ScreenPorch'] +
all_features['WoodDeckSF'])
all_features['TotalBsmtSF'] = all_features['TotalBsmtSF'].apply(lambda x: numpy.exp(6) if x <= 0.0 else x)
all_features['2ndFlrSF'] = all_features['2ndFlrSF'].apply(lambda x: numpy.exp(6.5) if x <= 0.0 else x)
all_features['GarageArea'] = all_features['GarageArea'].apply(lambda x: numpy.exp(6) if x <= 0.0 else x)
all_features['GarageCars'] = all_features['GarageCars'].apply(lambda x: 0 if x <= 0.0 else x)
all_features['LotFrontage'] = all_features['LotFrontage'].apply(lambda x: numpy.exp(4.2) if x <= 0.0 else x)
all_features['MasVnrArea'] = all_features['MasVnrArea'].apply(lambda x: numpy.exp(4) if x <= 0.0 else x)
all_features['BsmtFinSF1'] = all_features['BsmtFinSF1'].apply(lambda x: numpy.exp(6.5) if x <= 0.0 else x)
all_features['haspool'] = all_features['PoolArea'].apply(lambda x: 1 if x > 0 else 0)
all_features['has2ndfloor'] = all_features['2ndFlrSF'].apply(lambda x: 1 if x > 0 else 0)
all_features['hasgarage'] = all_features['GarageArea'].apply(lambda x: 1 if x > 0 else 0)
all_features['hasbsmt'] = all_features['TotalBsmtSF'].apply(lambda x: 1 if x > 0 else 0)
all_features['hasfireplace'] = all_features['Fireplaces'].apply(lambda x: 1 if x > 0 else 0)
log_features = ['LotFrontage', 'LotArea', 'MasVnrArea', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF',
'TotalBsmtSF', '1stFlrSF', '2ndFlrSF', 'LowQualFinSF', 'GrLivArea',
'BsmtFullBath', 'BsmtHalfBath', 'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr',
'TotRmsAbvGrd', 'Fireplaces', 'GarageCars', 'GarageArea', 'WoodDeckSF', 'OpenPorchSF',
'EnclosedPorch', '3SsnPorch', 'ScreenPorch', 'PoolArea', 'MiscVal', 'YearRemodAdd', 'TotalSF']
all_features = logs(all_features, log_features)
squared_features = ['YearRemodAdd', 'LotFrontage_log',
'TotalBsmtSF_log', '1stFlrSF_log', '2ndFlrSF_log', 'GrLivArea_log',
'GarageCars_log', 'GarageArea_log']
all_features = squares(all_features, squared_features)
all_features = pandas.get_dummies(all_features).reset_index(drop=True)
# Remove any duplicated column names
all_features = all_features.loc[:, ~all_features.columns.duplicated()]
return all_features
def logs(res, ls):
m = res.shape[1]
for l in ls:
res = res.assign(newcol=pandas.Series(numpy.log(1.01 + res[l])).values)
res.columns.values[m] = l + '_log'
m += 1
return res
def squares(res, ls):
m = res.shape[1]
for l in ls:
res = res.assign(newcol=pandas.Series(res[l] * res[l]).values)
res.columns.values[m] = l + '_sq'
m += 1
return res
def create_x_sets(all_features, train_labels):
X = all_features.iloc[:len(train_labels), :]
X_test = all_features.iloc[len(train_labels):, :]
# Finding numeric features
numeric_dtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
numeric = []
for i in X.columns:
if X[i].dtype in numeric_dtypes:
if i in ['TotalSF', 'Total_Bathrooms', 'Total_porch_sf', 'haspool', 'hasgarage', 'hasbsmt', 'hasfireplace']:
pass
else:
numeric.append(i)
return X, X_test
###############
# Train model #
###############
# Define error metrics
def rmsle(y, y_pred):
return numpy.sqrt(mean_squared_error(y, y_pred))
def cv_rmse(model, X, train_labels):
# Setup cross validation folds
kf = KFold(n_splits=12, random_state=42, shuffle=True)
rmse = numpy.sqrt(-cross_val_score(model, X, train_labels, scoring="neg_mean_squared_error", cv=kf))
return rmse
def train_models(X, train_labels):
# Light Gradient Boosting Regressor
# lightgbm = LGBMRegressor(objective='regression',
# num_leaves=6,
# learning_rate=0.01,
# n_estimators=7000,
# max_bin=200,
# bagging_fraction=0.8,
# bagging_freq=4,
# bagging_seed=8,
# feature_fraction=0.2,
# feature_fraction_seed=8,
# min_sum_hessian_in_leaf = 11,
# verbose=-1,
# random_state=42)
# XGBoost Regressor
xgboost = XGBRegressor(learning_rate=0.01,
n_estimators=6000,
max_depth=4,
min_child_weight=0,
gamma=0.6,
subsample=0.7,
colsample_bytree=0.7,
objective='reg:linear',
nthread=-1,
scale_pos_weight=1,
seed=27,
reg_alpha=0.00006,
random_state=42)
# Ridge Regressor
ridge_alphas = [1e-15, 1e-10, 1e-8, 9e-4, 7e-4, 5e-4, 3e-4, 1e-4, 1e-3, 5e-2, 1e-2, 0.1, 0.3, 1, 3, 5, 10, 15, 18, 20,
30, 50, 75, 100]
kfold = KFold(n_splits=12, random_state=42, shuffle=True)
ridge = make_pipeline(RobustScaler(), RidgeCV(alphas=ridge_alphas, cv=kfold))
# Support Vector Regressor
svr = make_pipeline(RobustScaler(), SVR(C=20, epsilon=0.008, gamma=0.0003))
# Gradient Boosting Regressor
gbr = GradientBoostingRegressor(n_estimators=6000,
learning_rate=0.01,
max_depth=4,
max_features='sqrt',
min_samples_leaf=15,
min_samples_split=10,
loss='huber',
random_state=42)
# Random Forest Regressor
rf = RandomForestRegressor(n_estimators=1200,
max_depth=15,
min_samples_split=5,
min_samples_leaf=5,
max_features=None,
oob_score=True,
random_state=42)
# Stack up all the models above, optimized using xgboost
stack_gen = StackingCVRegressor(regressors=(xgboost, svr, ridge, gbr, rf),
meta_regressor=xgboost,
use_features_in_secondary=True)
scores = {}
# score = cv_rmse(lightgbm)
# print("lightgbm: {:.4f} ({:.4f})".format(score.mean(), score.std()))
# scores['lgb'] = (score.mean(), score.std())
score = cv_rmse(xgboost, X, train_labels)
print("xgboost: {:.4f} ({:.4f})".format(score.mean(), score.std()))
scores['xgb'] = (score.mean(), score.std())
score = cv_rmse(svr, X, train_labels)
print("SVR: {:.4f} ({:.4f})".format(score.mean(), score.std()))
scores['svr'] = (score.mean(), score.std())
score = cv_rmse(ridge, X, train_labels)
print("ridge: {:.4f} ({:.4f})".format(score.mean(), score.std()))
scores['ridge'] = (score.mean(), score.std())
score = cv_rmse(rf, X, train_labels)
print("rf: {:.4f} ({:.4f})".format(score.mean(), score.std()))
scores['rf'] = (score.mean(), score.std())
score = cv_rmse(gbr, X, train_labels)
print("gbr: {:.4f} ({:.4f})".format(score.mean(), score.std()))
scores['gbr'] = (score.mean(), score.std())
print('stack_gen')
stack_gen_model = stack_gen.fit(numpy.array(X), numpy.array(train_labels))
# print('lightgbm')
# lgb_model_full_data = lightgbm.fit(X, train_labels)
print('xgboost')
xgb_model_full_data = xgboost.fit(X, train_labels)
| |
_, record = input_tuple
records.append(record)
query.foreach(callback)
assert len(records) == 3
expected = [{'coordinates': [-122.0, 37.5], 'type': 'Point'},
{'coordinates': [-121.8, 37.7], 'type': 'Point'},
{'coordinates': [-121.6, 37.9], 'type': 'Point'}]
for r in records:
assert r['loc'].unwrap() in expected
def test_geospatial_dumps_positive(self):
"""
Perform a positive dumps. Verify using str
"""
geo_object = get_geo_object()
geojson_str = geo_object.dumps()
assert isinstance(geojson_str, str)
obj = aerospike.geojson(geojson_str)
assert obj.unwrap() == geo_object.unwrap()
def test_geojson_str(self):
"""
verify that str representation of geojson object is correct
"""
geo_object = get_geo_object()
geojson_str = str(geo_object)
assert isinstance(geojson_str, str)
obj = aerospike.geojson(geojson_str)
assert obj.unwrap() == geo_object.unwrap()
def test_geospatial_repr_positive(self):
"""
Perform a positive repr. Verify using eval()
"""
geo_object = get_geo_object()
geojson_str = eval(repr(geo_object))
assert isinstance(geojson_str, str)
obj = aerospike.geojson(geojson_str)
assert obj.unwrap() == geo_object.unwrap()
def test_geospatial_put_get_positive_with_geodata(self):
"""
Perform a get and put with multiple bins including geospatial bin
using geodata method
"""
key = ('test', 'demo', 'single_geo_put')
geo_object_single = aerospike.geodata(
{"type": "Point", "coordinates": [42.34, 58.62]})
geo_object_dict = aerospike.geodata(
{"type": "Point", "coordinates": [56.34, 69.62]})
self.as_connection.put(key, {
"loc": geo_object_single,
"int_bin": 2,
"string_bin": "str",
"dict_bin": {
"a": 1, "b": 2,
"geo": geo_object_dict
}
})
key, _, bins = self.as_connection.get(key)
expected = {'loc': {'coordinates': [42.34, 58.62], 'type': 'Point'},
"int_bin": 2, "string_bin": "str",
"dict_bin": {"a": 1, "b": 2,
"geo": {'coordinates': [56.34, 69.62], 'type':
'Point'}}}
for b in bins:
assert b in expected
self.as_connection.remove(key)
def test_geospatial_put_get_positive_with_geojson(self):
"""
Perform a get and put with multiple bins including geospatial bin
using geodata method
"""
key = ('test', 'demo', 'single_geo_put')
geo_object_single = aerospike.geojson(
'{"type": "Point", "coordinates": [42.34, 58.62] }')
geo_object_dict = aerospike.geojson(
'{"type": "Point", "coordinates": [56.34, 69.62] }')
self.as_connection.put(key, {"loc": geo_object_single, "int_bin": 2,
"string_bin": "str",
"dict_bin": {"a": 1, "b": 2, "geo":
geo_object_dict}})
key, _, bins = self.as_connection.get(key)
expected = {'loc': {'coordinates': [42.34, 58.62], 'type': 'Point'},
"int_bin": 2, "string_bin": "str",
"dict_bin": {"a": 1, "b": 2,
"geo": {'coordinates': [56.34, 69.62], 'type':
'Point'}}}
for b in bins:
assert b in expected
self.as_connection.remove(key)
def test_geospatial_positive_query_with_geodata(self):
"""
Perform a positive geospatial query for a polygon with geodata
"""
records = []
query = self.as_connection.query("test", "demo")
geo_object2 = aerospike.geodata({"type": "Polygon",
"coordinates": [[
[-122.500000, 37.000000],
[-121.000000, 37.000000],
[-121.000000, 38.080000],
[-122.500000, 38.080000],
[-122.500000, 37.000000]]]})
query.where(p.geo_within_geojson_region("loc", geo_object2.dumps()))
def callback(input_tuple):
_, _, record = input_tuple
records.append(record)
query.foreach(callback)
assert len(records) == 3
expected = [{'coordinates': [-122.0, 37.5], 'type': 'Point'},
{'coordinates': [-121.8, 37.7], 'type': 'Point'},
{'coordinates': [-121.6, 37.9], 'type': 'Point'}]
for r in records:
assert r['loc'].unwrap() in expected
def test_geospatial_positive_query_with_geojson(self):
"""
Perform a positive geospatial query for a polygon with geojson
"""
records = []
query = self.as_connection.query("test", "demo")
geo_object2 = aerospike.geojson(
'{"type": "Polygon", "coordinates": [[[-122.500000, 37.000000], \
[-121.000000, 37.000000], [-121.000000, 38.080000],\
[-122.500000, 38.080000], [-122.500000, 37.000000]]]}')
query.where(p.geo_within_geojson_region("loc", geo_object2.dumps()))
def callback(input_tuple):
_, _, record = input_tuple
records.append(record)
query.foreach(callback)
assert len(records) == 3
expected = [{'coordinates': [-122.0, 37.5], 'type': 'Point'},
{'coordinates': [-121.8, 37.7], 'type': 'Point'},
{'coordinates': [-121.6, 37.9], 'type': 'Point'}]
for r in records:
assert r['loc'].unwrap() in expected
def test_geospatial_2dindex_positive(self):
"""
Perform a positive 2d index creation
"""
try:
status = self.as_connection.index_remove('test', 'loc_index')
time.sleep(2)
except:
pass
status = self.as_connection.index_geo2dsphere_create(
"test", "demo", "loc", "loc_index")
assert status == 0
def test_geospatial_2dindex_positive_with_policy(self):
"""
Perform a positive 2d index creation with policy
"""
try:
status = self.as_connection.index_remove('test', 'loc_index')
time.sleep(2)
except:
pass
status = self.as_connection.index_geo2dsphere_create(
"test", "demo", "loc", "loc_index", {"timeout": 2000})
assert status == 0
def test_geospatial_positive_query_with_point(self):
"""
Perform a positive geospatial query for a point
"""
records = []
query = self.as_connection.query("test", "demo")
geo_object2 = aerospike.GeoJSON({"type": "Point", "coordinates":
[-121.700000, 37.200000]})
query.where(
p.geo_contains_geojson_point("loc_polygon", geo_object2.dumps()))
def callback(input_tuple):
_, _, record = input_tuple
records.append(record)
query.foreach(callback)
assert len(records) == 1
expected = [{'coordinates': [[[-122.500000, 37.000000],
[-121.000000, 37.000000],
[-121.000000, 38.080000],
[-122.500000, 38.080000],
[-122.500000, 37.000000]]],
'type': 'Polygon'}]
for r in records:
assert r['loc_polygon'].unwrap() in expected
def test_geospatial_positive_query_with_point_outside_polygon(self):
"""
Perform a positive geospatial query for a point outside polygon
"""
records = []
query = self.as_connection.query("test", "demo")
geo_object2 = aerospike.GeoJSON({"type": "Point", "coordinates":
[-123.700000, 37.200000]})
query.where(
p.geo_contains_geojson_point("loc_polygon", geo_object2.dumps()))
def callback(input_tuple):
_, _, record = input_tuple
records.append(record)
query.foreach(callback)
assert len(records) == 0
def test_geospatial_positive_query_with_point_in_aerocircle(self):
"""
Perform a positive geospatial query for a point in aerocircle
"""
if TestGeospatial.skip_old_server is True:
pytest.skip(
"Server does not support apply on AeroCircle for GeoJSON")
records = []
query = self.as_connection.query("test", "demo")
geo_object2 = aerospike.GeoJSON({"type": "Point", "coordinates":
[-122.000000, 37.000000]})
query.where(
p.geo_contains_geojson_point("loc_circle", geo_object2.dumps()))
def callback(input_tuple):
_, _, record = input_tuple
records.append(record)
query.foreach(callback)
assert len(records) == 1
expected = [
{'coordinates': [[-122.0, 37.0], 250.2], 'type': 'AeroCircle'}]
for r in records:
assert r['loc_circle'].unwrap() in expected
def test_geospatial_positive_query_with_point_in_aerocircle_int(self):
"""
Perform a positive geospatial query for a point in aerocircle
"""
if TestGeospatial.skip_old_server is True:
pytest.skip(
"Server does not support apply on AeroCircle for GeoJSON")
records = []
query = self.as_connection.query("test", "demo")
geo_object2 = aerospike.GeoJSON({"type": "Point", "coordinates":
[-122, 37]})
query.where(
p.geo_contains_geojson_point("loc_circle", geo_object2.dumps()))
def callback(input_tuple):
_, _, record = input_tuple
records.append(record)
query.foreach(callback)
assert len(records) == 1
expected = [
{'coordinates': [[-122.0, 37.0], 250.2], 'type': 'AeroCircle'}]
for r in records:
assert r['loc_circle'].unwrap() in expected
def test_geospatial_positive_query_with_point_outside_aerocircle(self):
"""
Perform a positive geospatial query for a point in aerocircle
"""
if TestGeospatial.skip_old_server is True:
pytest.skip(
"Server does not support apply on AeroCircle for GeoJSON")
records = []
query = self.as_connection.query("test", "demo")
geo_object2 = aerospike.GeoJSON({"type": "Point", "coordinates":
[-122.0, 48.0]})
query.where(
p.geo_contains_geojson_point("loc_circle", geo_object2.dumps()))
def callback(input_tuple):
_, _, record = input_tuple
records.append(record)
query.foreach(callback)
assert len(records) == 0
def test_geospatial_positive_query_with_point_helper_method(self):
"""
Perform a positive geospatial query for a point with helper method
"""
records = []
query = self.as_connection.query("test", "demo")
query.where(p.geo_contains_point("loc_polygon", -121.7, 37.2))
def callback(input_tuple):
_, _, record = input_tuple
records.append(record)
query.foreach(callback)
assert len(records) == 1
expected = [{'coordinates': [[[-122.500000, 37.000000],
[-121.000000, 37.000000],
[-121.000000, 38.080000],
[-122.500000, 38.080000],
[-122.500000, 37.000000]]],
'type': 'Polygon'}]
for r in records:
assert r['loc_polygon'].unwrap() in expected
@pytest.mark.parametrize(
"bin_name, idx_type",
(
('geo_list', aerospike.INDEX_TYPE_LIST),
('geo_map_keys', aerospike.INDEX_TYPE_MAPKEYS),
('geo_map_vals', aerospike.INDEX_TYPE_MAPVALUES)
)
)
def test_geospatial_within_radius_pred(self, bin_name, idx_type):
records = []
query = self.as_connection.query("test", "demo")
def callback(input_tuple):
_, _, record = input_tuple
records.append(record)
predicate = p.geo_within_radius(
bin_name, -122.0, 37.5, 250.2, idx_type)
query.where(predicate)
query.foreach(callback)
assert len(records) == 1
@pytest.mark.parametrize(
"bin_name, idx_type",
(
('geo_list', aerospike.INDEX_TYPE_LIST),
('geo_map_keys', aerospike.INDEX_TYPE_MAPKEYS),
('geo_map_vals', aerospike.INDEX_TYPE_MAPVALUES)
)
)
def test_geospatial_within_geojson_region_pred(self, bin_name, idx_type):
records = []
query = self.as_connection.query("test", "demo")
geo_object2 = aerospike.geodata({"type": "Polygon",
"coordinates": [[
[-122.500000, 37.000000],
[-121.000000, 37.000000],
[-121.000000, 38.080000],
[-122.500000, 38.080000],
[-122.500000, 37.000000]]]})
predicate = p.geo_within_geojson_region(
bin_name, geo_object2.dumps(), idx_type)
query.where(predicate)
def callback(input_tuple):
_, _, record = input_tuple
records.append(record)
query.foreach(callback)
assert len(records) == 3
def test_store_multipolygon(self):
polygons = [
[[
[-124.500000, 37.000000],
[-125.000000, 37.000000],
[-121.000000, 38.080000],
[-122.500000, 38.080000],
[-124.500000, 37.000000]
]],
[[
[-24.500000, 37.000000],
[-25.000000, 37.000000],
[-21.000000, 38.080000],
[-22.500000, 38.080000],
[-24.500000, 37.000000]
]]
]
geo_object = aerospike.GeoJSON(
{
"type": "MultiPolygon",
"coordinates": polygons
}
)
key = ('test', 'demo', 'multipoly')
self.as_connection.put(key, {'multi': geo_object})
_, _, bins = self.as_connection.get(key)
geo_returned = bins['multi'].unwrap()
assert geo_returned['type'] == 'MultiPolygon'
assert geo_returned['coordinates'] == polygons
self.as_connection.remove(key)
@pytest.mark.parametrize(
"bin_name, idx_type",
(
('geo_loc_list', aerospike.INDEX_TYPE_LIST),
('geo_loc_mk', aerospike.INDEX_TYPE_MAPKEYS),
('geo_loc_mv', aerospike.INDEX_TYPE_MAPVALUES)
)
)
def test_geospatial_contains_point_pred(self, bin_name, idx_type):
records = []
query = self.as_connection.query("test", "demo")
lat = -122.45
lon = 37.5
predicate = p.geo_contains_point(
bin_name, lat, lon, idx_type)
def callback(input_tuple):
_, _, record = input_tuple
records.append(record)
query.where(predicate)
query.foreach(callback)
assert len(records) == 1
@pytest.mark.parametrize(
"bin_name, idx_type",
(
('geo_loc_list', aerospike.INDEX_TYPE_LIST),
('geo_loc_mk', aerospike.INDEX_TYPE_MAPKEYS),
('geo_loc_mv', aerospike.INDEX_TYPE_MAPVALUES)
)
)
def test_geospatial_contains_json_point_pred(self, bin_name, idx_type):
records = []
query = self.as_connection.query("test", "demo")
lat = -122.45
lon = 37.5
point_list = [lat, lon]
point = aerospike.GeoJSON({'type': "Point",
'coordinates': point_list})
predicate = p.geo_contains_geojson_point(
bin_name, point.dumps(), idx_type)
def callback(input_tuple):
_, _, record = input_tuple
records.append(record)
query.where(predicate)
query.foreach(callback)
assert len(records) == 1
def test_geospatial_object_not_dict_or_string(self):
"""
The geospatial object is not a dictionary or string
"""
with pytest.raises(e.ParamError) as err_info:
aerospike.GeoJSON(1)
err_code = err_info.value.code
assert err_code == AerospikeStatus.AEROSPIKE_ERR_PARAM
def test_geospatial_object_non_json_serializable_string(self):
"""
The geospatial object is not a json serializable string
"""
with pytest.raises(e.ClientError) as err_info:
aerospike.GeoJSON("abc")
err_code = err_info.value.code
assert err_code == AerospikeStatus.AEROSPIKE_ERR_CLIENT
def test_geospatial_object_wrap_non_dict(self):
"""
The geospatial object provided to wrap() is not a dictionary
"""
geo_object = get_geo_object()
with pytest.raises(e.ParamError) as err_info:
geo_object.wrap("abc")
err_code = err_info.value.code
assert err_code == AerospikeStatus.AEROSPIKE_ERR_PARAM
def test_geospatial_object_loads_non_dict(self):
"""
The geospatial object provided to loads() is not a dictionary
"""
geo_object = get_geo_object()
with pytest.raises(e.ClientError) as err_info:
geo_object.loads('{"abc"}')
err_code = | |
"""tictac.py
This module defines objects for use in experimenting with reinforcement learning
as well as hand-coded bots for playing tic-tac-toe.
If you want to change what kind of player your bot is
playing against, you need to change some lines
at the very end of the program -- talk to me if you're not
sure what to do.
"""
from collections import OrderedDict
import pygame
from pygame.locals import *
class Agent():
def __init__(self, piece="X", mover="human", playbook=None):
self.piece = piece
self.move_history = {}
self.human = mover == "human"
if not playbook:
self.playbook = {}
else:
self.playbook = playbook
def random_move(self, positions):
"""Method for selecting a random option from the
available legal moves.
"""
from random import choice
not_filled = [i for i, x in enumerate(positions) if x == "-"]
return choice(not_filled[1:])
def quit(self):
"""Method for ending a game.
"""
raise Exception("User quit")
import sys
print(self.piece, " quits!")
sys.exit()
def update(self, win=None):
"""Dummy update method for non-learning agents"""
pass
class RandomAgent(Agent):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.mover = super().random_move
class Human(Agent):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def mover(self, positions):
"""Update method for a human, allows the human to resign.
"""
move_num = sum([1 if x != "-" else 0 for x in positions])
move = input("Move #" + str(move_num + 1) +':')
if move == "Q":
self.quit()
return 0
else:
return move
class Pyg(Agent):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def mover(self, positions):
"""Update method for a human using the pygame display,
allows the human to resign.
"""
#move_num = sum([1 if x != "-" else 0 for x in positions])
#move = input("Move #" + str(move_num + 1) +':')
#if move == "Q":
# self.quit()
# return 0
#else:
# return move
# determine where the user clicked and if the space is not already
# occupied, draw the appropriate piece there (X or O)
# ---------------------------------------------------------------
# board : the game board surface
#global grid, XO
running = 1
while (running == 1):
for event in pygame.event.get():
if event.type is QUIT:
running = 0
elif event.type is MOUSEBUTTONDOWN:
(mouseX, mouseY) = pygame.mouse.get_pos()
(row, col) = self.board.boardPos (mouseX, mouseY)
move = 3*row+col+1
return move
# make sure no one's used this space
#if ((grid[row][col] == "X") or (grid[row][col] == "O")):
# this space is in use
# return
# draw an X or O
#self.board.drawMove (self.board.board, row, col, self.piece)
# toggle XO to the other player's move
#if (XO == "X"):
# XO = "O"
#else:
# XO = "X"
class Menace(Agent):
def __init__(self, **kwargs):
super().__init__(mover="nonhuman", **kwargs)
def position_frequency(self):
"""Computes frequency of each position seen in the playbook"""
self.frequency = [[pos, sum([self.playbook[pos][x] for x in self.playbook[pos]])]
for pos in self.playbook]
self.frequency.sort(key=lambda x: x[1], reverse=True)
def display_playbook(self, position):
if position not in self.playbook:
new_positions = ['-'] + \
["1" if piece == '-' \
else piece for i, piece in enumerate(position[1:])]
else:
new_positions = ['-'] + \
[str(self.playbook[position][i+1]) \
if piece == '-' \
else piece for i, piece in enumerate(position[1:])]
for i in range(3):
print("\t".join(new_positions[1 + i * 3 : 1 + i * 3 + 3]))
def mover(self, positions):
"""Move method for menace, allows the use of a playbook to select a move.
"""
from random import choices
self.display_playbook(tuple(positions))
tup_positions = tuple(positions)
if tup_positions not in self.playbook:
move = self.random_move(positions)
return move
else:
move = choices(list(self.playbook[tup_positions].keys()),
weights = list(self.playbook[tup_positions].values()),
k=1)[0]
return move
def update(self, win):
"""Reinforcement update method for menace.
"""
if not win: # it was a tie
for position in self.move_history:
if position in self.playbook:
self.playbook[position][self.move_history[position]] += 1
else:
not_filled = [i for i, x in enumerate(position) if x == "-"][1:]
self.playbook[position] = OrderedDict([[x, 1] for x in not_filled])
self.playbook[position][self.move_history[position]] += 1
elif win[1] == self.piece: # it was a win, reinforce all the moves
for position in self.move_history:
if position in self.playbook:
self.playbook[position][self.move_history[position]] += 3
else:
not_filled = [i for i, x in enumerate(position) if x == "-"][1:]
self.playbook[position] = OrderedDict([[x, 1] for x in not_filled])
self.playbook[position][self.move_history[position]] += 2
elif win[1] != self.piece: # it was a loss, prune once
for position in self.move_history:
if position in self.playbook:
if self.playbook[position][self.move_history[position]] > 1:
self.playbook[position][self.move_history[position]] -= 1
else:
not_filled = [i for i, x in enumerate(position) if x == "-"][1:]
self.playbook[position] = OrderedDict([[x, 2] for x in not_filled])
self.playbook[position][self.move_history[position]] -= 1
class MyAgent(Agent):
def __init__(self, **kwargs):
super().__init__(mover="nonhuman", **kwargs)
def mover(self, positions):
"""This is the function for you to work on -- the last line
of the function should be a "return" that sends back a valid
move number. The body of the function should use the tup_positions
of pieces to decide where to move next.
Some things to think about:
1. If you select a space that is already filled or isn't
in the numbers 1-9, your program will crash because it
will keep making the same bad choice over and over again.
2. Think about how to simplify the problem with your initial
move choices so it is easier to think about what moves
your opponent might make.
3. Please talk to Mark if you're getting stuck.
"""
not_filled = [i for i, x in enumerate(positions) if x == "-"]
turn = 11-len (not_filled)
return not_filled[1]
def update(self, win):
for position in self.move_history:
if position in self.playbook:
self.playbook[position][self.move_history[position]] += 1
else:
not_filled = [i for i, x in enumerate(position) if x == "-"][1:]
self.playbook[position] = OrderedDict([[x, 0] for x in not_filled])
self.playbook[position][self.move_history[position]] += 1
class Board:
"""Board handles the display of the tic-tac-toe board state.
"""
def __init__(self, mode="text_display"):
if mode == "text_display":
self.display = self.text_display
else:
self.display = self.pyg_display
pygame.init()
self.ttt = pygame.display.set_mode ((300, 325))
pygame.display.set_caption ('Tic-Tac-Toe')
self.board = self.initBoard ()
self.showBoard()
self.previousPositions = ['-']*10
def text_display(self, positions, humanAgent=True):
from os import system
system("clear")
for i in range(3):
print("\t".join(positions[1 + i * 3 : 1 + i * 3 + 3]))
def pyg_display(self, positions):
new_move = [(i, x) for i,x in enumerate(positions)
if x != self.previousPositions[i]]
if len(new_move) != 0:
boardRow = (new_move[0][0]-1) // 3
boardCol = (new_move[0][0] - 1) % 3
piece = new_move[0][1]
self.previousPositions = positions[:]
self.drawMove(self.board, boardRow, boardCol, piece)
self.showBoard()
def drawMove (self, board, boardRow, boardCol, Piece):
# draw an X or O (Piece) on the board in boardRow, boardCol
# ---------------------------------------------------------------
# board : the game board surface
# boardRow,
# boardCol : the Row & Col in which to draw the piece (0 based)
# Piece : X or O
# determine the center of the square
centerX = ((boardCol) * 100) + 50
centerY = ((boardRow) * 100) + 50
# draw the appropriate piece
if (Piece == 'O'):
pygame.draw.circle (board, (0,0,0), (centerX, centerY), 44, 2)
else:
pygame.draw.line (board, (0,0,0), (centerX - 22, centerY - 22), \
(centerX + 22, centerY + 22), 2)
pygame.draw.line (board, (0,0,0), (centerX + 22, centerY - 22), \
(centerX - 22, centerY + 22), 2)
# mark the space as used
#grid [boardRow][boardCol] = Piece
def initBoard(self):
# initialize the board and return it as a variable
# ---------------------------------------------------------------
# ttt : a properly initialized pyGame display variable
# set up the background surface
background = pygame.Surface (self.ttt.get_size())
background = background.convert()
background.fill ((250, 250, 250))
# draw the grid lines
# vertical lines...
pygame.draw.line (background, (0,0,0), (100, 0), (100, 300), 2)
pygame.draw.line (background, (0,0,0), (200, 0), (200, 300), 2)
# horizontal lines...
pygame.draw.line (background, (0,0,0), (0, 100), (300, 100), 2)
pygame.draw.line (background, (0,0,0), (0, 200), (300, 200), 2)
# return the board
return background
def drawStatus (self, board):
# draw the status (i.e., player turn, etc) at the bottom of the board
# ---------------------------------------------------------------
# board : the initialized game board surface where the status will
# be drawn
# gain access to global variables
#global XO, winner
# determine the status message
#if (winner is None):
# message = XO + "'s turn"
#else:
# message = winner + " won!"
message = "Choose a move"
# render the status message
font = pygame.font.Font(None, 24)
| |
{mac} route announced by other EVPN proxy {originator_id.value} with different VTEP: {event.nexthop}" )
WithdrawRoute( state, mac_vrf, cur['vtep'], mac, cur['ip'] )
cur['vtep'] = "tbd" # Mark as withdrawn
else:
logging.warning( "TODO: Compare/update mobility sequence number, even if same VTEP nexthop?" )
else:
logging.info( "Not multicast and no VNI -> ignoring" )
# Never remove EVPN VTEP from list, assume once EVPN = always EVPN
except Exception as ex:
tb_str = ''.join(traceback.format_tb(ex.__traceback__))
logging.error( f"Exception in best_path_change_handler: {ex} ~ {tb_str}" )
def peer_up_handler(router_id, remote_as):
logging.warning( f'Peer UP: {router_id} {remote_as}' )
# Start ARP thread if not already
if not hasattr(state,'arp_threads') and state.params['vxlan_interfaces']!=[]:
logging.info( "Starting ARP listener thread(s)..." )
state.arp_threads = {}
for i in state.params['vxlan_interfaces']:
state.arp_threads[i] = {}
state.arp_threads[i]['thread'] = hub.spawn( ARP_receiver_thread, state, i, evpn_vteps )
def peer_down_handler(router_id, remote_as):
logging.warning( f'Peer DOWN: {router_id} {remote_as}' )
# need to create socket on localhost on a non-default port, not port 179
# Need to connect from loopback IP, not 127.0.0.x
# Router ID is used as tunnel endpoint in BGP UPDATEs
# => Code updated to allow any tunnel endpoint IP
# Wait for gNMI socket to exist
# while not os.path.exists('/opt/srlinux/var/run/sr_gnmi_server'):
# logging.info("Waiting for gNMI unix socket to be created...")
# eventlet.sleep(1)
# During system startup, wait for netns to be created
while not os.path.exists('/var/run/netns/srbase-default'):
logging.info("Waiting for srbase-default netns to be created...")
eventlet.sleep(1)
logging.info("Starting BGP thread in srbase-default netns...")
# Requires root permissions
# Ryu modified to support net_ns parameter, needed for reconnections
# with netns.NetNS(nsname="srbase-default"):
logging.info("Starting BGPSpeaker in netns...")
state.speaker = BGPSpeaker(bgp_server_hosts=[LOCAL_LOOPBACK],
bgp_server_port=1179,
net_ns="srbase-default", # custom addition
as_number=state.params['local_as'],
local_pref=state.params['local_preference'],
router_id=LOCAL_LOOPBACK,
best_path_change_handler=best_path_change_handler,
peer_up_handler=peer_up_handler,
peer_down_handler=peer_down_handler)
# Add any static VTEPs/VNIs, before starting ARP thread
for vni,mac_vrf in state.mac_vrfs.items():
UpdateMACVRF( state, mac_vrf )
logging.info( f"Connecting to neighbor {NEIGHBOR}..." )
# TODO enable_four_octet_as_number=True, enable_enhanced_refresh=True
state.speaker.neighbor_add( NEIGHBOR,
remote_as=state.params['peer_as'],
local_as=state.params['local_as'],
enable_ipv4=False, enable_evpn=True,
connect_mode='active') # iBGP with SRL
# After connecting to BGP peer, start ARP thread (in different netns)
eventlet.sleep(10) # Wait for peer_up event using peer_up_handler
# hub.spawn( ARP_receiver_thread, speaker, params, evpn_vteps )
while True:
logging.info( "eventlet sleep loop..." )
eventlet.sleep(30) # every 30s wake up
def AutoRouteDistinguisher( vtep_ip, mac_vrf ):
# For RD, use the static VTEP's IP, just as would happen when it would
# advertise the routes itself. This implies we need to create a VRF
# per static VTEP locally
return f"{vtep_ip}:{mac_vrf['evi']}"
def AutoRouteTarget( state, mac_vrf ):
return f"{state.params['local_as']}:{mac_vrf['evi']}"
def Add_Static_VTEP( state, mac_vrf, remote_ip, dynamic=False ):
rd = AutoRouteDistinguisher( remote_ip, mac_vrf )
if rd not in state.bgp_vrfs:
rt = AutoRouteTarget(state,mac_vrf)
logging.info(f"Add_Static_VTEP: Adding VRF...RD={rd} RT={rt}")
state.speaker.vrf_add(route_dist=rd,import_rts=[rt],export_rts=[rt],route_family=RF_L2_EVPN)
state.bgp_vrfs[ rd ] = remote_ip
else:
logging.info(f"Add_Static_VTEP: Assuming VRF for RD={rd} exists...")
js_path = f'.vxlan_proxy.static_vtep{{.vtep_ip=="{remote_ip}"}}'
now_ts = datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
data = {
'last_update' : { "value" : now_ts },
}
if dynamic:
data['dynamic'] = { "value" : True }
js_path2 = f'.vxlan_proxy.static_vtep{{.vtep_ip=="{remote_ip}"}}.mac_vrf{{.name=="{mac_vrf["name"]}"}}'
data2 = { 'evi': { 'value': mac_vrf['evi'] }, 'vni': { 'value': mac_vrf['vni'] } }
Add_Telemetry( [(js_path, data),(js_path2,data2)] )
logging.info("Adding EVPN multicast route...")
#
# For RD use the static VTEP's IP, just like it would do if it was
# EVPN enabled itself. That way, any proxy will announce the same
# route
#
AnnounceMulticastRoute( state, rd, remote_ip, mac_vrf['vni'] )
return True
def Remove_Static_VTEP( state, mac_vrf, remote_ip, clear_macs=True ):
rd = AutoRouteDistinguisher( remote_ip, mac_vrf )
if rd not in state.bgp_vrfs:
logging.warning( f"Remove_Static_VTEP: BGP MAC VRF does not exists: {rd}" )
return False
logging.info(f"Remove_Static_VTEP: Removing VRF...RD={rd}")
# Deleting the VRF should withdraw all routes too? Doesn't look like it
WithdrawMulticastRoute(state,rd,remote_ip)
state.speaker.vrf_del(route_dist=rd)
# This isn't sufficient
js_path = f'.vxlan_proxy.static_vtep{{.vtep_ip=="{remote_ip}"}}'
js_path2 = f'.vxlan_proxy.static_vtep{{.vtep_ip=="{remote_ip}"}}.mac_vrf{{.name=="{mac_vrf["name"]}"}}'
Remove_Telemetry( [js_path,js_path2] )
if clear_macs:
del mac_vrf['vxlan_vteps'][ remote_ip ]
del state.bgp_vrfs[ rd ]
return True
def HandleTCPTimestamps( ipHeaders, tcpHeaders, ancdata ):
ts_sec = 0
ts_ns = 0
if ( len(ancdata)>0 ):
for i in ancdata:
logging.info(f'HandleTCPTimestamps ancdata: cmsg_level={i[0]}, cmsg_type={i[1]}, cmsg_data({len(i[2])})={i[2]})');
if (i[0]!=socket.SOL_SOCKET or i[1]!=SO_TIMESTAMP): # Removed 'NS'
continue
tmp = (struct.unpack("iiii",i[2]))
ts_sec = tmp[0]
ts_ns = tmp[2]
break
ts = [ (o.ts_val,o.ts_ecr) for o in tcpHeaders.option if o.kind == tcp.TCP_OPTION_KIND_TIMESTAMPS ]
logging.info( f"HandleTCPTimestamps: {ts_sec}.{ts_ns}={ts} IP {ipHeaders.src}>{ipHeaders.dst}" )
def ARP_receiver_thread( state, vxlan_intf, evpn_vteps ):
logging.info( f"Starting ARP listener on interface={vxlan_intf} params {state.params}" )
# initialize BPF - load source code from filter-vxlan-arp.c
_self = state.arp_threads[vxlan_intf]
_self['bpf'] = bpf = BPF(src_file = "filter-vxlan-arp.c",debug = 0)
#load eBPF program http_filter of type SOCKET_FILTER into the kernel eBPF vm
#more info about eBPF program types
#http://man7.org/linux/man-pages/man2/bpf.2.html
function_arp_filter = bpf.load_func("vxlan_arp_filter", BPF.SOCKET_FILTER)
#create raw socket, bind it to interface
#attach bpf program to socket created
with netns.NetNS(nsname="srbase"):
BPF.attach_raw_socket(function_arp_filter, vxlan_intf)
socket_fd = function_arp_filter.sock
sock = socket.fromfd(socket_fd,socket.PF_PACKET,socket.SOCK_RAW,socket.IPPROTO_IP)
# sock.setsockopt(socket.SOL_SOCKET, SO_TIMESTAMP, 1) # Not NS
sock.setblocking(True)
# To make sendto work?
# sock.bind((vxlan_intf, 0x0800))
_self['socket'] = sock # Used for close()
try:
while 1:
packet_str = os.read(socket_fd,2048)
packet_bytearray = bytearray(packet_str)
try:
# or recvmmsg for multiple?
# raw_data, ancdata, flags, address = sock.recvmsg(65535, 1024)
# packet_bytearray = bytearray(raw_data)
pkt = packet.Packet( packet_bytearray )
#
# 6 layers:
# 0: ethernet
# 1: IP -> VTEP IP (other side, local VTEP)
# 2: UDP
# 3: VXLAN -> VNI
# 4: ethernet (inner)
# 5: ARP -> MAC, IP
#
for p in pkt:
logging.debug( f"ARP packet:{p.protocol_name}={p}" )
if p.protocol_name == 'vlan':
logging.debug( f'vlan id = {p.vid}' )
elif p.protocol_name == 'vxlan':
logging.info( f'vni = {p.vni}' )
_ip = pkt.get_protocol( ipv4.ipv4 )
#_tcp = pkt.get_protocol( tcp.tcp )
#if _tcp:
# HandleTCPTimestamps( _ip, _tcp, ancdata )
# continue
_vxlan = pkt.get_protocol( vxlan.vxlan )
_arp = pkt.get_protocol( arp.arp )
vni = _vxlan.vni
if vni not in state.mac_vrfs:
logging.info( f"VNI not enabled for proxy EVPN: {vni}" )
continue;
mac_vrf = state.mac_vrfs[ vni ]
# To compensate for lack of VXLAN flow hashing, we vary the src IP
# Correct it by removing the added entropy (IP ID) in 2nd octet
# if _arp.opcode == 24:
# digits = [ int(i) for i in _ip.src.split('.') ]
# digits[1] ^= _ip.identification % 256
# _ip.src = ".".join( map(str,digits) )
if _ip.src in evpn_vteps:
if (state.params['ecmp_path_probes'] and _ip.dst in evpn_vteps
and _arp.opcode==24): # Ignore regular responses
ReplyARPProbe( state, sock, pkt, _ip.src, _ip.dst, _arp.opcode, mac_vrf )
else:
logging.info( f"ARP({'req' if _arp.opcode==1 else 'res'}) from EVPN VTEP {_ip.src} -> ignoring" )
continue
elif _ip.dst in evpn_vteps: # typically == us, always? not when routing VXLAN to other VTEPs
static_vtep = _ip.src
mac = _arp.src_mac # Same field in both request and response packets
ip = _arp.src_ip
logging.info( f"ARP({'req' if _arp.opcode==1 else 'res'}) from static VTEP: {mac} {ip}" )
else:
logging.info( f"ARP packet:neither src={_ip.src} nor dst={_ip.dst} is EVPN vtep? {evpn_vteps}" )
continue;
# Check that the static VTEP is configured. Could dynamically add VTEPs
# upon discovery (but requires ARP snooping)
if static_vtep not in mac_vrf['vxlan_vteps']:
if not state.params[ "auto_discover_static_vteps" ]:
logging.info( f"VTEP {static_vtep} not configured in mac-vrf and auto-discovery disabled" )
continue
else:
logging.info( f"Dynamically adding auto-discovered VTEP {static_vtep}" )
Add_Static_VTEP( state, mac_vrf, static_vtep, dynamic=True )
mac_vrf['vxlan_vteps'][ static_vtep ] = "dynamic-from-arp"
# Announce EVPN route(s)
mobility_seq = None # First time: no attribute
if mac in mac_vrf['macs']:
cur = mac_vrf['macs'][ mac ]
logging.info( f"MAC {mac} already announced: {cur}, checking for MAC move" )
# TODO various cases: different IP, different VTEP, ...
if cur['vtep'] == static_vtep:
logging.info( f"VNI {vni}: MAC {mac} already announced with VTEP {static_vtep}" )
# If IP remains the same, do nothing
if cur['ip'] == ip:
continue
# Could also opt to keep both routes: MAC -> [ip],
# Spec says: "If there are multiple IP addresses associated with a MAC address,
# then multiple MAC/IP Advertisement routes MUST be generated, one for
# each IP address. For instance, this may be the case when there are
# both an IPv4 and an IPv6 address associated with the same MAC address
# for dual-IP-stack scenarios. When the IP address is dissociated with
# the MAC address, then the MAC/IP Advertisement route with that
# particular IP address MUST be withdrawn."
#
# For the purpose of this EVPN proxy | |
t: ErrorType) -> Type:
return t
def visit_any(self, t: AnyType) -> Type:
return t
def visit_void(self, t: Void) -> Type:
return t
def visit_none_type(self, t: NoneTyp) -> Type:
return t
def visit_uninhabited_type(self, t: UninhabitedType) -> Type:
return t
def visit_erased_type(self, t: ErasedType) -> Type:
return t
def visit_deleted_type(self, t: DeletedType) -> Type:
return t
def visit_instance(self, t: Instance) -> Type:
return Instance(t.type, self.translate_types(t.args), t.line)
def visit_type_var(self, t: TypeVarType) -> Type:
return t
def visit_partial_type(self, t: PartialType) -> Type:
return t
def visit_callable_type(self, t: CallableType) -> Type:
return t.copy_modified(arg_types=self.translate_types(t.arg_types),
ret_type=t.ret_type.accept(self),
variables=self.translate_variables(t.variables))
def visit_tuple_type(self, t: TupleType) -> Type:
return TupleType(self.translate_types(t.items),
cast(Any, t.fallback.accept(self)),
t.line)
def visit_star_type(self, t: StarType) -> Type:
return StarType(t.type.accept(self), t.line)
def visit_union_type(self, t: UnionType) -> Type:
return UnionType(self.translate_types(t.items), t.line)
def visit_ellipsis_type(self, t: EllipsisType) -> Type:
return t
def translate_types(self, types: List[Type]) -> List[Type]:
return [t.accept(self) for t in types]
def translate_variables(self,
variables: List[TypeVarDef]) -> List[TypeVarDef]:
return variables
def visit_overloaded(self, t: Overloaded) -> Type:
items = [] # type: List[CallableType]
for item in t.items():
new = item.accept(self)
if isinstance(new, CallableType):
items.append(new)
else:
raise RuntimeError('CallableType expectected, but got {}'.format(type(new)))
return Overloaded(items=items)
def visit_type_type(self, t: TypeType) -> Type:
return TypeType(t.item.accept(self), line=t.line)
class TypeStrVisitor(TypeVisitor[str]):
"""Visitor for pretty-printing types into strings.
This is mostly for debugging/testing.
Do not preserve original formatting.
Notes:
- Represent unbound types as Foo? or Foo?[...].
- Represent the NoneTyp type as None.
"""
def visit_unbound_type(self, t: UnboundType)-> str:
s = t.name + '?'
if t.args != []:
s += '[{}]'.format(self.list_str(t.args))
return s
def visit_type_list(self, t: TypeList) -> str:
return '<TypeList {}>'.format(self.list_str(t.items))
def visit_error_type(self, t: ErrorType) -> str:
return '<ERROR>'
def visit_any(self, t: AnyType) -> str:
return 'Any'
def visit_void(self, t: Void) -> str:
return 'void'
def visit_none_type(self, t: NoneTyp) -> str:
# Fully qualify to make this distinct from the None value.
return "builtins.None"
def visit_uninhabited_type(self, t: UninhabitedType) -> str:
return "<uninhabited>"
def visit_erased_type(self, t: ErasedType) -> str:
return "<Erased>"
def visit_deleted_type(self, t: DeletedType) -> str:
if t.source is None:
return "<Deleted>"
else:
return "<Deleted '{}'>".format(t.source)
def visit_instance(self, t: Instance) -> str:
s = t.type.fullname() if t.type is not None else '<?>'
if t.erased:
s += '*'
if t.args != []:
s += '[{}]'.format(self.list_str(t.args))
return s
def visit_type_var(self, t: TypeVarType) -> str:
if t.name is None:
# Anonymous type variable type (only numeric id).
return '`{}'.format(t.id)
else:
# Named type variable type.
return '{}`{}'.format(t.name, t.id)
def visit_callable_type(self, t: CallableType) -> str:
s = ''
bare_asterisk = False
for i in range(len(t.arg_types)):
if s != '':
s += ', '
if t.arg_kinds[i] == mypy.nodes.ARG_NAMED and not bare_asterisk:
s += '*, '
bare_asterisk = True
if t.arg_kinds[i] == mypy.nodes.ARG_STAR:
s += '*'
if t.arg_kinds[i] == mypy.nodes.ARG_STAR2:
s += '**'
if t.arg_names[i]:
s += t.arg_names[i] + ': '
s += str(t.arg_types[i])
if t.arg_kinds[i] == mypy.nodes.ARG_OPT:
s += ' ='
s = '({})'.format(s)
if not isinstance(t.ret_type, Void):
s += ' -> {}'.format(t.ret_type)
if t.variables:
s = '{} {}'.format(t.variables, s)
return 'def {}'.format(s)
def visit_overloaded(self, t: Overloaded) -> str:
a = []
for i in t.items():
a.append(i.accept(self))
return 'Overload({})'.format(', '.join(a))
def visit_tuple_type(self, t: TupleType) -> str:
s = self.list_str(t.items)
if t.fallback and t.fallback.type:
fallback_name = t.fallback.type.fullname()
if fallback_name != 'builtins.tuple':
return 'Tuple[{}, fallback={}]'.format(s, t.fallback.accept(self))
return 'Tuple[{}]'.format(s)
def visit_star_type(self, t: StarType) -> str:
s = t.type.accept(self)
return '*{}'.format(s)
def visit_union_type(self, t: UnionType) -> str:
s = self.list_str(t.items)
return 'Union[{}]'.format(s)
def visit_partial_type(self, t: PartialType) -> str:
if t.type is None:
return '<partial None>'
else:
return '<partial {}[{}]>'.format(t.type.name(),
', '.join(['?'] * len(t.type.type_vars)))
def visit_ellipsis_type(self, t: EllipsisType) -> str:
return '...'
def visit_type_type(self, t: TypeType) -> str:
return 'Type[{}]'.format(t.item.accept(self))
def list_str(self, a: List[Type]) -> str:
"""Convert items of an array to strings (pretty-print types)
and join the results with commas.
"""
res = []
for t in a:
if isinstance(t, Type):
res.append(t.accept(self))
else:
res.append(str(t))
return ', '.join(res)
# These constants define the method used by TypeQuery to combine multiple
# query results, e.g. for tuple types. The strategy is not used for empty
# result lists; in that case the default value takes precedence.
ANY_TYPE_STRATEGY = 0 # Return True if any of the results are True.
ALL_TYPES_STRATEGY = 1 # Return True if all of the results are True.
class TypeQuery(TypeVisitor[bool]):
"""Visitor for performing simple boolean queries of types.
This class allows defining the default value for leafs to simplify the
implementation of many queries.
"""
default = False # Default result
strategy = 0 # Strategy for combining multiple values (ANY_TYPE_STRATEGY or ALL_TYPES_...).
def __init__(self, default: bool, strategy: int) -> None:
"""Construct a query visitor.
Use the given default result and strategy for combining
multiple results. The strategy must be either
ANY_TYPE_STRATEGY or ALL_TYPES_STRATEGY.
"""
self.default = default
self.strategy = strategy
def visit_unbound_type(self, t: UnboundType) -> bool:
return self.default
def visit_type_list(self, t: TypeList) -> bool:
return self.default
def visit_error_type(self, t: ErrorType) -> bool:
return self.default
def visit_any(self, t: AnyType) -> bool:
return self.default
def visit_void(self, t: Void) -> bool:
return self.default
def visit_uninhabited_type(self, t: UninhabitedType) -> bool:
return self.default
def visit_none_type(self, t: NoneTyp) -> bool:
return self.default
def visit_erased_type(self, t: ErasedType) -> bool:
return self.default
def visit_deleted_type(self, t: DeletedType) -> bool:
return self.default
def visit_type_var(self, t: TypeVarType) -> bool:
return self.default
def visit_partial_type(self, t: PartialType) -> bool:
return self.default
def visit_instance(self, t: Instance) -> bool:
return self.query_types(t.args)
def visit_callable_type(self, t: CallableType) -> bool:
# FIX generics
return self.query_types(t.arg_types + [t.ret_type])
def visit_tuple_type(self, t: TupleType) -> bool:
return self.query_types(t.items)
def visit_star_type(self, t: StarType) -> bool:
return t.type.accept(self)
def visit_union_type(self, t: UnionType) -> bool:
return self.query_types(t.items)
def visit_overloaded(self, t: Overloaded) -> bool:
return self.query_types(t.items())
def visit_type_type(self, t: TypeType) -> bool:
return t.item.accept(self)
def query_types(self, types: Sequence[Type]) -> bool:
"""Perform a query for a list of types.
Use the strategy constant to combine the results.
"""
if not types:
# Use default result for empty list.
return self.default
if self.strategy == ANY_TYPE_STRATEGY:
# Return True if at least one component is true.
res = False
for t in types:
res = res or t.accept(self)
if res:
break
return res
else:
# Return True if all components are true.
res = True
for t in types:
res = res and t.accept(self)
if not res:
break
return res
def strip_type(typ: Type) -> Type:
"""Make a copy of type without 'debugging info' (function name)."""
if isinstance(typ, CallableType):
return typ.copy_modified(name=None)
elif isinstance(typ, Overloaded):
return Overloaded([cast(CallableType, strip_type(item))
for item in typ.items()])
else:
return typ
def replace_leading_arg_type(t: CallableType, self_type: Type) -> CallableType:
"""Return a copy of a callable type with a different self argument type.
Assume that the callable is the signature of a method.
"""
return t.copy_modified(arg_types=[self_type] + t.arg_types[1:])
def is_named_instance(t: Type, fullname: str) -> bool:
return (isinstance(t, Instance) and
t.type is not None and
t.type.fullname() == fullname)
def copy_type(t: Type) -> Type:
"""
Build a copy of the type; used to mutate the copy with truthiness information
"""
return copy.copy(t)
def true_only(t: Type) -> Type:
"""
Restricted version of t with only True-ish values
"""
if not t.can_be_true:
# All values of t are False-ish, so there are no true values in it
return UninhabitedType(line=t.line)
elif not t.can_be_false:
# All values of t are already True-ish, so true_only is idempotent in this case
return t
elif isinstance(t, UnionType):
# The true version of a union type is the union of the true versions of its components
new_items = [true_only(item) for item in t.items]
return UnionType.make_simplified_union(new_items, line=t.line)
else:
new_t = copy_type(t)
new_t.can_be_false = False
return new_t
def false_only(t: Type) -> Type:
"""
Restricted version of t with only False-ish values
"""
if not t.can_be_false:
# All values of t are True-ish, so there are no false values in it
return UninhabitedType(line=t.line)
elif not t.can_be_true:
# All values of t are already False-ish, so false_only is idempotent in this case
return t
elif isinstance(t, UnionType):
# The false version of a union type is the union of the false versions of its components
new_items = [false_only(item) for item in | |
+ self.highlight_offset[0]
Yc = self.logo_offset[1] + self.highlight_offset[1]
Color(self.highlight_color[0],self.highlight_color[1],self.highlight_color[2],self.highlight_color[3])
for j in range(len(self.radius_list)) :
xc = self.dx_list[j]*self.logo_size + Xc
yc = self.dy_list[j]*self.logo_size + Yc
rc = self.radius_list[j]*self.logo_size
Line(circle=(xc,yc,rc),close=True,width=self.logo_thickness)
Xc = Xc - self.highlight_offset[0]
Yc = Yc - self.highlight_offset[1]
Color(self.logo_color[0],self.logo_color[1],self.logo_color[2],self.logo_color[3])
for j in range(len(self.radius_list)) :
xc = self.dx_list[j]*self.logo_size + Xc
yc = self.dy_list[j]*self.logo_size + Yc
rc = self.radius_list[j]*self.logo_size
Line(circle=(xc,yc,rc),close=True,width=self.logo_thickness)
def resize(self,widget,newsize) :
self.redraw_background()
class SpecificationCategory(BoxLayout) :
title = StringProperty("")
padding = [dp(20),dp(0),dp(20),dp(20)]
content_height = NumericProperty(0)
class SpecificationItem(BoxLayout) :
name = StringProperty("")
value = StringProperty("")
class SpecificationsPage(BoxLayout) :
est_cost = StringProperty("$200M")
est_capex = StringProperty("$200M")
est_opex = StringProperty("$0.75M/yr")
est_datex = StringProperty("$0.75M/yr")
stations = NumericProperty(0)
new_stations = NumericProperty(0)
ngeht_stations = NumericProperty(0)
bandwidth = NumericProperty(8)
data_rate = NumericProperty(10)
number_of_baselines_total = NumericProperty(0)
number_of_baselines_in_timerange = NumericProperty(0)
number_of_baselines_above_snrcut = NumericProperty(0)
number_of_visibilities_total = NumericProperty(0)
number_of_visibilities_in_timerange = NumericProperty(0)
number_of_visibilities_above_snrcut = NumericProperty(0)
est_baseline_sensitivity = StringProperty("0 mJy")
est_point_source_sensitivity = StringProperty("0 mJy")
est_angular_resolution = StringProperty("0 uas")
est_field_of_view = StringProperty("0 mas")
est_image_dynamic_range = StringProperty("0")
est_snapshot_dynamic_range = StringProperty("0")
ngeht_diameter = NumericProperty(_ngeht_diameter)
time_range = ListProperty(_time_range,size=2)
snr_cut = NumericProperty(_snr_cut)
source_RA = StringProperty("--")
source_Dec = StringProperty("--")
def generate_specs(self) :
self.get_station_counts()
self.get_data_rate()
self.get_data_statistics()
self.get_array_parameters()
#
self.estimate_cost()
#
self.estimate_performance()
def estimate_cost(self) :
capex,opex = ngeht_array.cost_model(_statdict,_ngeht_diameter,opex_exclude=list(_stationdicts['EHT 2022'].keys()))
tot = capex + opex*10
self.est_cost = "$%.1fM"%(int(tot*10+0.5)/10.0)
self.est_capex = "$%.1fM"%(int(capex*10+0.5)/10.0)
self.est_opex = "$%.1fM/yr"%(int(opex*10.0+0.5)/10.0)
self.est_datex = "TBD"
# self.est_datex = "$1M/yr"
def get_station_counts(self) :
n = 0
nnew = 0
nngeht = 0
for s in _statdict.keys() :
# print("Station:",s)
if (_statdict[s]['on']) :
n += 1
if (not s in _existing_station_list) :
nnew += 1
# print("-> new station?",s)
# if (not s in ['GB']) :
# nngeht += 1
# print("--> ngEHT station?",s)
self.stations = n
self.new_stations = nnew
self.ngeht_stations = nngeht
def get_data_rate(self) :
# 2 pol * nyquist * 2 bit * n stations
self.data_rate = 2 * 2*self.bandwidth*1e9 * 2 * self.stations / 1e12
def get_data_statistics(self) :
# Exclude stations not in array
stations = list(np.unique(np.array(list(_statdict.keys()))))
keep = np.array([ (_datadict['s1'][j] in stations) and (_datadict['s2'][j] in stations) for j in range(len(_datadict['s1'])) ])
ddtmp = {}
for key in ['u','v','V','s1','s2','t','err'] :
ddtmp[key] = _datadict[key][keep]
keep = np.array([ _statdict[ddtmp['s1'][j]]['on'] and _statdict[ddtmp['s2'][j]]['on'] for j in range(len(ddtmp['s1'])) ])
for key in ['u','v','V','s1','s2','t','err'] :
ddtmp[key] = ddtmp[key][keep]
# Get the number of unique baselines
self.number_of_baselines_total = 0
#stations = list(np.unique(np.append(_datadict['s1'],_datadict['s2'])))
for k,s1 in enumerate(stations) :
for s2 in stations[(k+1):] :
# print("Baseline count:",self.number_of_baselines_total,s1,s2,np.any((ddtmp['s1']==s1)*(ddtmp['s2']==s2)))
if ( np.any((ddtmp['s1']==s1)*(ddtmp['s2']==s2)) ) :
self.number_of_baselines_total += 1
self.number_of_visibilities_total = ddtmp['V'].size//2
# Keep baselines above SNR cut
keep = (ddtmp['t']>=_time_range[0])*(ddtmp['t']<_time_range[1])
ddtmp2 = {'u':np.array([]),'v':np.array([]),'V':np.array([]),'s1':np.array([]),'s2':np.array([]),'t':np.array([]),'err':np.array([])}
for key in ['u','v','V','s1','s2','t','err'] :
ddtmp2[key] = ddtmp[key][keep]
# Get the number of unique baselines
self.number_of_baselines_in_timerange = 0
#stations = list(np.unique(np.append(_datadict['s1'],_datadict['s2'])))
for k,s1 in enumerate(stations) :
for s2 in stations[(k+1):] :
# print(" time -- Baseline count:",self.number_of_baselines_in_timerange,s1,s2,np.any((ddtmp['s1']==s1)*(ddtmp['s2']==s2)))
if ( np.any((ddtmp2['s1']==s1)*(ddtmp2['s2']==s2)) ) :
self.number_of_baselines_in_timerange += 1
self.number_of_visibilities_in_timerange = ddtmp2['V'].size//2
# Cut points with S/N less than the specified minimum value
if ((_snr_cut is None) or (_snr_cut==0)) :
self.number_of_baselines_above_snrcut = self.number_of_baselines_in_timerange
self.number_of_visibilities_above_snrcut = self.number_of_visibilities_in_timerange
else :
# Get a list of error adjustments based on stations
diameter_correction_factor = {}
for s in stations :
if (_statdict[s]['exists']) :
diameter_correction_factor[s] = 1.0
else :
diameter_correction_factor[s] = _statdict[s]['diameter']/_ngeht_diameter
keep = np.array([ np.abs(ddtmp2['V'][j])/(ddtmp2['err'][j].real * diameter_correction_factor[ddtmp2['s1'][j]] * diameter_correction_factor[ddtmp2['s2'][j]]) > _snr_cut for j in range(len(ddtmp2['s1'])) ])
ddtmp = {'u':np.array([]),'v':np.array([]),'V':np.array([]),'s1':np.array([]),'s2':np.array([]),'t':np.array([]),'err':np.array([])}
for key in ['u','v','V','s1','s2','t','err'] :
ddtmp[key] = ddtmp2[key][keep]
# Get the number of unique baselines
self.number_of_baselines_above_snrcut = 0
stations = list(np.unique(np.append(_datadict['s1'],_datadict['s2'])))
for k,s1 in enumerate(stations) :
for s2 in stations[(k+1):] :
# print(" snrcut -- Baseline count:",self.number_of_baselines_above_snrcut,s1,s2,np.any((ddtmp['s1']==s1)*(ddtmp['s2']==s2)))
if ( np.any((ddtmp['s1']==s1)*(ddtmp['s2']==s2)) ) :
self.number_of_baselines_above_snrcut += 1
self.number_of_visibilities_above_snrcut = ddtmp['V'].size//2
def get_array_parameters(self) :
self.ngeht_diameter = _ngeht_diameter
self.time_range = _time_range
self.snr_cut = float(_snr_cut)
self.source_RA = self.hr_to_str(_source_RA)
self.source_Dec = self.deg_to_str(_source_Dec)
def sig_fig(self,val,n) :
mag = 10**(int(np.log10(val)-n+1))
return int(val/mag+0.5)*mag
def estimate_performance(self) :
### Generate multi-wavelength data
# Cut points with S/N less than the specified minimum value
stations = list(np.unique(np.array(list(_statdict.keys()))))
if (not _snr_cut is None) and _snr_cut>0 :
# Get a list of error adjustments based on stations
diameter_correction_factor = {}
for s in stations :
if (_statdict[s]['exists']) :
diameter_correction_factor[s] = 1.0
else :
diameter_correction_factor[s] = _statdict[s]['diameter']/_ngeht_diameter
else :
diameter_correction_factor = {}
for s in stations :
diameter_correction_factor[s] = 1.0
# Get the max sensitivities on various baselines
on_station_list = []
ngEHT_station_list = []
for s in _statdict.keys() :
if (_statdict[s]['on']) :
on_station_list.append(s)
if (_statdict[s]['exists']==False) :
ngEHT_station_list.append(s)
err_list_all = []
err_list_ngeht = []
for j in range(len(_datadict['s1'])) :
if ( (_datadict['s1'][j] in on_station_list) and (_datadict['s2'][j] in on_station_list) ) :
err_list_all.append( _datadict['err'][j].real * diameter_correction_factor[_datadict['s1'][j]] * diameter_correction_factor[_datadict['s2'][j]] )
if ( (_datadict['s1'][j] in on_station_list) and (_datadict['s2'][j] in ngEHT_station_list) ) :
err_list_ngeht.append( _datadict['err'][j].real * diameter_correction_factor[_datadict['s1'][j]] * diameter_correction_factor[_datadict['s2'][j]] )
err_list_all = np.array(err_list_all)
err_list_ngeht = np.array(err_list_ngeht)
# Get the sensitivity from the most sensitive element of the current arrary to an ngEHT station
if (len(err_list_ngeht)==0) :
self.est_baseline_sensitivity = "N/A"
else :
err_anchor = np.min(err_list_ngeht)
print("err anchor:",err_anchor)
baseline_sensitivity = 7 * np.max(err_anchor) * 1e3
self.est_baseline_sensitivity = "%2g mJy"%(self.sig_fig(baseline_sensitivity,2))
# Get the sensitivity between two most sensitive elements of the current array
if (len(err_list_all)==0) :
self.est_point_source_sensitivity = "N/A"
else :
err_max = np.min(err_list_all)
print("err anchor:",err_max)
point_source_sensitivity = 7 * np.max(err_max) * 1e3
self.est_point_source_sensitivity = "%2g mJy"%(self.sig_fig(point_source_sensitivity,2))
# # Get the sensitivity from the most sensitive element of the current arrary to an ngEHT station
# on_station_list = []
# on_station_sefd = []
# for s in _statdict.keys() :
# if (_statdict[s]['on']) :
# on_station_list.append(s)
# on_station_sefd.append(_statdict[s]['sefd'][0] * diameter_correction_factor[s])
# on_station_list = np.array(on_station_list)
# on_station_sefd = np.array(on_station_sefd)
# on_station_list = on_station_list[np.argsort(on_station_sefd)]
# on_station_sefd = on_station_sefd[np.argsort(on_station_sefd)]
# anchor_station = on_station_list[0]
# prototype_station = 'BA'
# print("Anchor station:",anchor_station)
# if ( not prototype_station in _statdict.keys() ) : # not an ngEHT array
# self.est_baseline_sensitivity = "N/A"
# else :
# err_anchor = _datadict['err'][(_datadict['s1']==anchor_station)*(_datadict['s2']==prototype_station)].real * diameter_correction_factor[anchor_station]*diameter_correction_factor[prototype_station]
# print("err anchor:",err_anchor)
# baseline_sensitivity = 7 * np.max(err_anchor) * 1e3
# self.est_baseline_sensitivity = "%2g mJy"%(self.sig_fig(baseline_sensitivity,2))
# # Get the sensitivity between two most sensitive elements of the current array
# err_max = _datadict['err'][(_datadict['s1']==anchor_station)*(_datadict['s2']==on_station_list[1])].real * diameter_correction_factor[anchor_station]*diameter_correction_factor[prototype_station]
# point_source_sensitivity = 7 * np.max(err_max) * 1e3
# self.est_point_source_sensitivity = "%2g mJy"%(self.sig_fig(point_source_sensitivity,2))
# Exclude stations not in current array
keep = np.array([ (_datadict['s1'][j] in stations) and (_datadict['s2'][j] in stations) for j in range(len(_datadict['s1'])) ])
ddtmp = {}
for key in ['u','v','V','s1','s2','t','err'] :
ddtmp[key] = _datadict[key][keep]
keep = np.array([ _statdict[ddtmp['s1'][j]]['on'] and _statdict[ddtmp['s2'][j]]['on'] for j in range(len(ddtmp['s1'])) ])
for key in ['u','v','V','s1','s2','t','err'] :
ddtmp[key] = ddtmp[key][keep]
# Cut points with S/N less than the specified minimum value
if (not _snr_cut is None) and _snr_cut>0 :
ddtmp2 = copy.deepcopy(ddtmp)
# Baseline-by-baseline filtering
# keep = np.array([ np.abs(ddtmp2['V'][j])/(ddtmp2['err'][j].real * diameter_correction_factor[ddtmp2['s1'][j]] * diameter_correction_factor[ddtmp2['s2'][j]]) > _snr_cut for j in range(len(ddtmp2['s1'])) ])
# Ad hoc phasing
keep = np.array([True]*len(ddtmp2['s1']))
jtot = np.arange(ddtmp2['t'].size)
for tscan in np.unique(ddtmp2['t']) :
inscan = (ddtmp2['t']==tscan)
s1_scan = ddtmp2['s1'][inscan]
s2_scan = ddtmp2['s2'][inscan]
snr_scan = np.array([ ddtmp2['V'][inscan][j]/( ddtmp2['err'][inscan][j] * diameter_correction_factor[s1_scan[j]] * diameter_correction_factor[s2_scan[j]] ) for j in range(len(s1_scan)) ])
detection_station_list = []
for ss in np.unique(np.append(s1_scan,s2_scan)) :
snr_scan_ss = np.append(snr_scan[s1_scan==ss],snr_scan[s2_scan==ss])
if np.any(snr_scan_ss > _snr_cut ) :
detection_station_list.append(ss)
keep[jtot[inscan]] = np.array([ (s1_scan[k] in detection_station_list) and (s2_scan[k] in detection_station_list) for k in range(len(s1_scan)) ])
ddtmp = {'u':np.array([]),'v':np.array([]),'V':np.array([]),'s1':np.array([]),'s2':np.array([]),'t':np.array([]),'err':np.array([])}
for key in ['u','v','V','s1','s2','t','err'] :
ddtmp[key] = ddtmp2[key][keep]
# Get the angular resolution
uvmax = np.sqrt(np.max( ddtmp['u']**2 + ddtmp['v']**2 ))*1e9
angular_resolution = 1.0/uvmax * 180.*3600e6/np.pi
# Get the fov (shortest non-intrasite baseline)
ddtmp2 = {'u':np.array([]),'v':np.array([]),'V':np.array([]),'s1':np.array([]),'s2':np.array([]),'t':np.array([]),'err':np.array([])}
keep = np.array([True]*ddtmp['V'].size)
for baseline in [['AA','AP'],['SM','JC']] :
print("Removing baseline",baseline[0],baseline[1])
isbaseline = (ddtmp['s1']==baseline[0])*(ddtmp['s2']==baseline[1]) + (ddtmp['s2']==baseline[0])*(ddtmp['s1']==baseline[1])
keep = keep*(isbaseline==False)
# for j in np.arange(ddtmp['V'].size) :
# print(ddtmp['s1'][j],ddtmp['s2'][j],keep[j])
for key in ['u','v','V','s1','s2','t','err'] :
ddtmp2[key] = ddtmp[key][keep]
uvmin = np.sqrt(np.min( ddtmp2['u']**2 + ddtmp2['v']**2 )) * 1e9
imin = np.argmin( ddtmp2['u']**2 + ddtmp2['v']**2 )
field_of_view = 1.0/uvmin * 180.*3600e3/np.pi
print("angular resolution:",angular_resolution)
print("field of view:",field_of_view,ddtmp2['s1'][imin],ddtmp2['s2'][imin])
# self.est_angular_resolution = "%0.1f uas"%(int(angular_resolution*10+0.5)/10.0)
# self.est_field_of_view = "%0.1f mas"%(int(field_of_view*10+0.5)/10.0)
self.est_angular_resolution = "%g \u03BCas"%(self.sig_fig(angular_resolution,1))
if (field_of_view>=1) :
self.est_field_of_view = "%g mas"%(self.sig_fig(field_of_view,1))
else :
self.est_field_of_view = | |
"""
.. module:: sparse_rep
.. moduleauthor:: <NAME>
.. moduleauthor:: <NAME>
The original SparsePZ code to be found at https://github.com/mgckind/SparsePz
This module reorganizes it for usage by DESC within qp, and is python3 compliant.
"""
__author__ = '<NAME>'
import numpy as np
from scipy.special import voigt_profile
from scipy import linalg as sla
from scipy import integrate as sciint
def shapes2pdf(wa, ma, sa, ga, meta, cut=1.e-5):
"""return a pdf evaluated at the meta['xvals'] values for the
given set of Voigt parameters"""
#input : list of shape parameters for a single object
x = meta['xvals']
pdf = np.zeros_like(x)
for w, m, s, g in zip(wa, ma, sa, ga):
pdft = voigt_profile(x - m, s, g)
pdft = np.where(pdft >= cut, pdft, 0.)
pdft = w * pdft / sla.norm(pdft)
pdf += pdft
pdf = np.where(pdf >= cut, pdf, 0.)
return pdf / sciint.trapz(pdf, x)
def create_basis(metadata, cut=1.e-5):
"""create the Voigt basis matrix out of a metadata dictionary"""
mu = metadata['mu']
Nmu = metadata['dims'][0]
sigma = metadata['sig']
Nsigma = metadata['dims'][1]
Nv = metadata['dims'][2]
xvals = metadata['xvals']
return create_voigt_basis(xvals, mu, Nmu, sigma, Nsigma, Nv, cut=cut)
def create_voigt_basis(xvals, mu, Nmu, sigma, Nsigma, Nv, cut=1.e-5):
"""
Creates a gaussian-voigt dictionary at the same resolution as the original PDF
:param float xvals: the x-axis point values for the PDF
:param float mu: [min_mu, max_mu], range of mean for gaussian
:param int Nmu: Number of values between min_mu and max_mu
:param float sigma: [min_sigma, max_sigma], range of variance for gaussian
:param int Nsigma: Number of values between min_sigma and max_sigma
:param Nv: Number of Voigt profiles per gaussian at given position mu and sigma
:param float cut: Lower cut for gaussians
:return: Dictionary as numpy array with shape (len(xvals), Nmu*Nsigma*Nv)
:rtype: float
"""
means = np.linspace(mu[0], mu[1], Nmu)
sig = np.linspace(sigma[0], sigma[1], Nsigma)
gamma = np.linspace(0, 0.5, Nv)
NA = Nmu * Nsigma * Nv
Npdf = len(xvals)
A = np.zeros((Npdf, NA))
kk = 0
for i in range(Nmu):
for j in range(Nsigma):
for k in range(Nv):
pdft = voigt_profile(xvals - means[i], sig[j], gamma[k])
pdft = np.where(pdft >= cut, pdft, 0.)
A[:, kk] = pdft / sla.norm(pdft)
kk += 1
return A
def sparse_basis(dictionary, query_vec, n_basis, tolerance=None):
"""
Compute sparse representation of a vector given Dictionary (basis)
for a given tolerance or number of basis. It uses Cholesky decomposition to speed the process and to
solve the linear operations adapted from <NAME>., <NAME>. and <NAME>., Technical Report - CS
Technion, April 2008
:param float dictionary: Array with all basis on each column, must has shape (len(vector), total basis) and each column must have euclidean l-2 norm equal to 1
:param float query_vec: vector of which a sparse representation is desired
:param int n_basis: number of desired basis
:param float tolerance: tolerance desired if n_basis is not needed to be fixed, must input a large number for n_basis to assure achieving tolerance
:return: indices, values (2 arrays one with the position and the second with the coefficients)
"""
a_n = np.zeros(dictionary.shape[1])
machine_eps = np.finfo(dictionary.dtype).eps
alpha = np.dot(dictionary.T, query_vec)
res = query_vec
idxs = np.arange(dictionary.shape[1]) # keeping track of swapping
L = np.zeros((n_basis, n_basis), dtype=dictionary.dtype)
L[0, 0] = 1.
for n_active in range(n_basis):
lam = np.argmax(abs(np.dot(dictionary.T, res)))
if lam < n_active or alpha[lam] ** 2 < machine_eps: #pragma: no cover
n_active -= 1
break
if n_active > 0: #pragma: no cover
# Updates the Cholesky decomposition of dictionary
L[n_active, :n_active] = np.dot(dictionary[:, :n_active].T, dictionary[:, lam])
sla.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], lower=True, overwrite_b=True)
v = sla.norm(L[n_active, :n_active]) ** 2
if 1 - v <= machine_eps:
print("Selected basis are dependent or normed are not unity")
break
L[n_active, n_active] = np.sqrt(1 - v)
dictionary[:, [n_active, lam]] = dictionary[:, [lam, n_active]]
alpha[[n_active, lam]] = alpha[[lam, n_active]]
idxs[[n_active, lam]] = idxs[[lam, n_active]]
# solves LL'x = query_vec as a composition of two triangular systems
gamma = sla.cho_solve((L[:n_active + 1, :n_active + 1], True), alpha[:n_active + 1], overwrite_b=False)
res = query_vec - np.dot(dictionary[:, :n_active + 1], gamma)
if tolerance is not None and sla.norm(res) ** 2 <= tolerance:
break
a_n[idxs[:n_active + 1]] = gamma
del dictionary
#return a_n
return idxs[:n_active + 1], gamma
def combine_int(Ncoef, Nbase):
"""
combine index of base (up to 62500 bases) and value (16 bits integer with sign) in a 32 bit integer
First half of word is for the value and second half for the index
:param int Ncoef: Integer with sign to represent the value associated with a base, this is a sign 16 bits integer
:param int Nbase: Integer representing the base, unsigned 16 bits integer
:return: 32 bits integer
"""
return (Ncoef << 16) | Nbase
def get_N(longN):
"""
Extract coefficients fro the 32bits integer,
Extract Ncoef and Nbase from 32 bit integer
return (longN >> 16), longN & 0xffff
:param int longN: input 32 bits integer
:return: Ncoef, Nbase both 16 bits integer
"""
return (longN >> 16), (longN & (2 ** 16 - 1))
def decode_sparse_indices(indices):
"""decode sparse indices into basis indices and weigth array
"""
Ncoef = 32001
sp_ind = np.array(list(map(get_N, indices)))
spi = sp_ind[:, 0, :]
dVals = 1./(Ncoef - 1)
vals = spi * dVals
vals[:, 0] = 1.
return sp_ind[:, 1, :], vals
def indices2shapes(sparse_indices, meta):
"""compute the Voigt shape parameters from the sparse index
Parameters
----------
sparse_index: `np.array`
1D Array of indices for each object in the ensemble
meta: `dict`
Dictionary of metadata to decode the sparse indices
"""
Nmu = meta['dims'][0]
Nsigma = meta['dims'][1]
Nv = meta['dims'][2]
Ncoef = meta['dims'][3]
mu = meta['mu']
sigma = meta['sig']
means_array = np.linspace(mu[0], mu[1], Nmu)
sig_array = np.linspace(sigma[0], sigma[1], Nsigma)
gam_array = np.linspace(0, 0.5, Nv)
#split the sparse indices into pairs (weight, basis_index)
#for each sparse index corresponding to one of the basis function
sp_ind = np.array(list(map(get_N, sparse_indices)))
spi = sp_ind[:, 0, :]
dVals = 1./(Ncoef - 1)
vals = spi * dVals
vals[:, 0] = 1.
Dind2 = sp_ind[:, 1, :]
means = means_array[np.array(Dind2 / (Nsigma * Nv), int)]
sigmas = sig_array[np.array((Dind2 % (Nsigma * Nv)) / Nv, int)]
gammas = gam_array[np.array((Dind2 % (Nsigma * Nv)) % Nv, int)]
return vals, means, sigmas, gammas
def build_sparse_representation(x, P, mu=None, Nmu=None, sig=None, Nsig=None, Nv=3, Nsparse=20, tol=1.e-10, verbose=True):
"""compute the sparse representation of a set of pdfs evaluated on a common x array
"""
#Note : the range for gamma is fixed to [0, 0.5] in create_voigt_basis
Ntot = len(P)
if verbose:
print("Total Galaxies = ", Ntot)
dx = x[1] - x[0]
if mu is None:
mu = [min(x), max(x)]
if Nmu is None:
Nmu = len(x)
if sig is None:
max_sig = (max(x) - min(x)) / 12.
min_sig = dx / 6.
sig = [min_sig, max_sig]
if Nsig is None:
Nsig = int(np.ceil(2. * (max_sig - min_sig) / dx))
if verbose:
print('dx = ', dx)
print('Nmu, Nsig, Nv = ', '[', Nmu, ',', Nsig, ',', Nv, ']')
print('Total bases in dictionary', Nmu * Nsig * Nv)
print('Nsparse (number of bases) = ', Nsparse)
#Create dictionary
print('Creating Dictionary...')
A = create_voigt_basis(x, mu, Nmu, sig, Nsig, Nv)
bigD = {}
Ncoef = 32001
AA = np.linspace(0, 1, Ncoef)
Da = AA[1] - AA[0]
bigD['xvals'] = x
bigD['mu'] = mu
bigD['sig'] = sig
bigD['dims'] = [Nmu, Nsig, Nv, Ncoef, Nsparse]
bigD['Ntot'] = Ntot
if verbose:
print('Creating Sparse representation...')
Sparse_Array = np.zeros((Ntot, Nsparse), dtype='int')
for k in range(Ntot):
pdf0 = P[k]
Dind, Dval = sparse_basis(A, pdf0, Nsparse, tolerance=tol)
if len(Dind) < 1:#pragma: no cover
continue
#bigD[k]['sparse'] = [Dind, Dval]
if max(Dval) > 0:
dval0 = Dval[0]
Dvalm = Dval / np.max(Dval)
index = np.array(list(map(round, (Dvalm / Da))), dtype='int')
index0 = int(round(dval0/Da))
index[0] = index0
else:
index = np.zeros(len(Dind), dtype='int') #pragma: no cover
sparse_ind = np.array(list(map(combine_int, index, Dind)))
Sparse_Array[k, 0:len(sparse_ind)] = sparse_ind
#swap back columns
A[:, [Dind]] = A[:, [np.arange(len(Dind))]]
if verbose:
print('done')
return Sparse_Array, bigD, A
def pdf_from_sparse(sparse_indices, A, xvals, cut=1.e-5):
"""return the array of evaluations at xvals from the sparse indices
"""
indices, vals = decode_sparse_indices(sparse_indices)
pdf_y = (A[:, indices]*vals).sum(axis=-1)
pdf_y | |
################################################################################
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
################################################################################
# This node performs detection and classification inference on multiple input video files and publishes results to topics multi_detection and multi_classification respectively
# Required ROS2 imports
import rclpy
from rclpy.node import Node
from std_msgs.msg import String
from vision_msgs.msg import Classification2D, ObjectHypothesis, ObjectHypothesisWithPose, BoundingBox2D, Detection2D, Detection2DArray
import os
import sys
sys.path.append('/opt/nvidia/deepstream/deepstream/lib')
import platform
import configparser
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
import pyds
sys.path.insert(0, './src/ros2_deepstream')
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
from common.FPS import GETFPS
from ctypes import *
import time
import math
import numpy as np
import cv2
import os
fps_streams = {}
frame_count = {}
saved_count = {}
MAX_DISPLAY_LEN=64
MUXER_OUTPUT_WIDTH=1920
MUXER_OUTPUT_HEIGHT=1080
MUXER_BATCH_TIMEOUT_USEC=4000000
TILED_OUTPUT_WIDTH=1920
TILED_OUTPUT_HEIGHT=1080
GST_CAPS_FEATURES_NVMM="memory:NVMM"
pgie_classes_str= ["Vehicle", "TwoWheeler", "Person","RoadSign"]
PGIE_CLASS_ID_VEHICLE = 0
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 2
PGIE_CLASS_ID_ROADSIGN = 3
location = os.getcwd() + "/src/ros2_deepstream/config_files/"
class_obj = (open(location+'object_labels.txt').readline().rstrip('\n')).split(';')
class_color = (open(location+'color_labels.txt').readline().rstrip('\n')).split(';')
class_make = (open(location+'make_labels.txt').readline().rstrip('\n')).split(';')
class_type = (open(location+'type_labels.txt').readline().rstrip('\n')).split(';')
class InferencePublisher(Node):
# tiler_sink_pad_buffer_probe will extract metadata received on tiler src pad
# and update params for drawing rectangle, object information etc.
def tiler_sink_pad_buffer_probe(self,pad,info,u_data):
frame_number=0
num_rects=0
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting is done by pyds.NvDsFrameMeta.cast()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
frame_number=frame_meta.frame_num
l_obj=frame_meta.obj_meta_list
num_rects = frame_meta.num_obj_meta
is_first_obj = True
save_image = False
obj_counter = {
PGIE_CLASS_ID_VEHICLE:0,
PGIE_CLASS_ID_BICYCLE:0,
PGIE_CLASS_ID_PERSON:0,
PGIE_CLASS_ID_ROADSIGN:0
}
# Message for output of detection inference
msg = Detection2DArray()
while l_obj is not None:
try:
# Casting l_obj.data to pyds.NvDsObjectMeta
obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
l_classifier = obj_meta.classifier_meta_list
# If object is a car (class ID 0), perform attribute classification
if obj_meta.class_id == 0 and l_classifier is not None:
# Creating and publishing message with output of classification inference
msg2 = Classification2D()
while l_classifier is not None:
result = ObjectHypothesis()
try:
classifier_meta = pyds.glist_get_nvds_classifier_meta(l_classifier.data)
except StopIteration:
print('Could not parse MetaData: ')
break
classifier_id = classifier_meta.unique_component_id
l_label = classifier_meta.label_info_list
label_info = pyds.glist_get_nvds_label_info(l_label.data)
classifier_class = label_info.result_class_id
if classifier_id == 2:
result.id = class_color[classifier_class]
elif classifier_id == 3:
result.id = class_make[classifier_class]
else:
result.id = class_type[classifier_class]
result.score = label_info.result_prob
msg2.results.append(result)
l_classifier = l_classifier.next
self.publisher_classification.publish(msg2)
except StopIteration:
break
obj_counter[obj_meta.class_id] += 1
# Creating message for output of detection inference
result = ObjectHypothesisWithPose()
result.id = str(class_obj[obj_meta.class_id])
result.score = obj_meta.confidence
left = obj_meta.rect_params.left
top = obj_meta.rect_params.top
width = obj_meta.rect_params.width
height = obj_meta.rect_params.height
bounding_box = BoundingBox2D()
bounding_box.center.x = float(left + (width/2))
bounding_box.center.y = float(top - (height/2))
bounding_box.size_x = width
bounding_box.size_y = height
detection = Detection2D()
detection.results.append(result)
detection.bbox = bounding_box
msg.detections.append(detection)
# Periodically check for objects with borderline confidence value that may be false positive detections.
# If such detections are found, annotate the frame with bboxes and confidence value.
# Save the annotated frame to file.
if((saved_count["stream_"+str(frame_meta.pad_index)]%30==0) and (obj_meta.confidence>0.3 and obj_meta.confidence<0.31)):
if is_first_obj:
is_first_obj = False
# Getting Image data using nvbufsurface
# the input should be address of buffer and batch_id
n_frame=pyds.get_nvds_buf_surface(hash(gst_buffer),frame_meta.batch_id)
#convert python array into numy array format.
frame_image=np.array(n_frame,copy=True,order='C')
#covert the array into cv2 default color format
frame_image=cv2.cvtColor(frame_image,cv2.COLOR_RGBA2BGRA)
save_image = True
frame_image=draw_bounding_boxes(frame_image,obj_meta,obj_meta.confidence)
try:
l_obj=l_obj.next
except StopIteration:
break
# Get frame rate through this probe
fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
# Publishing message with output of detection inference
self.publisher_detection.publish(msg)
if save_image:
cv2.imwrite(folder_name+"/stream_"+str(frame_meta.pad_index)+"/frame_"+str(frame_number)+".jpg",frame_image)
saved_count["stream_"+str(frame_meta.pad_index)]+=1
try:
l_frame=l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
def draw_bounding_boxes(image,obj_meta,confidence):
confidence='{0:.2f}'.format(confidence)
rect_params=obj_meta.rect_params
top=int(rect_params.top)
left=int(rect_params.left)
width=int(rect_params.width)
height=int(rect_params.height)
obj_name=pgie_classes_str[obj_meta.class_id]
image=cv2.rectangle(image,(left,top),(left+width,top+height),(0,0,255,0),2)
# Note that on some systems cv2.putText erroneously draws horizontal lines across the image
image=cv2.putText(image,obj_name+',C='+str(confidence),(left-10,top-10),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,255,0),2)
return image
def cb_newpad(self, decodebin, decoder_src_pad,data):
print("In cb_newpad\n")
caps=decoder_src_pad.get_current_caps()
gststruct=caps.get_structure(0)
gstname=gststruct.get_name()
source_bin=data
features=caps.get_features(0)
# Need to check if the pad created by the decodebin is for video and not
# audio.
if(gstname.find("video")!=-1):
# Link the decodebin pad only if decodebin has picked nvidia
# decoder plugin nvdec_*. We do this by checking if the pad caps contain
# NVMM memory features.
if features.contains("memory:NVMM"):
# Get the source bin ghost pad
bin_ghost_pad=source_bin.get_static_pad("src")
if not bin_ghost_pad.set_target(decoder_src_pad):
sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
else:
sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")
def decodebin_child_added(self,child_proxy,Object,name,user_data):
print("Decodebin child added:", name, "\n")
if(name.find("decodebin") != -1):
Object.connect("child-added",decodebin_child_added,user_data)
if(is_aarch64() and name.find("nvv4l2decoder") != -1):
print("Seting bufapi_version\n")
Object.set_property("bufapi-version",True)
def create_source_bin(self,index,uri):
print("Creating source bin")
# Create a source GstBin to abstract this bin's content from the rest of the
# pipeline
bin_name="source-bin-%02d" %index
print(bin_name)
nbin=Gst.Bin.new(bin_name)
if not nbin:
sys.stderr.write(" Unable to create source bin \n")
# Source element for reading from the uri.
# We will use decodebin and let it figure out the container format of the
# stream and the codec and plug the appropriate demux and decode plugins.
uri_decode_bin=Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
if not uri_decode_bin:
sys.stderr.write(" Unable to create uri decode bin \n")
# We set the input uri to the source element
uri_decode_bin.set_property("uri",uri)
# Connect to the "pad-added" signal of the decodebin which generates a
# callback once a new pad for raw data has beed created by the decodebin
uri_decode_bin.connect("pad-added",self.cb_newpad,nbin)
uri_decode_bin.connect("child-added",self.decodebin_child_added,nbin)
# We need to create a ghost pad for the source bin which will act as a proxy
# for the video decoder src pad. The ghost pad will not have a target right
# now. Once the decode bin creates the video decoder and generates the
# cb_newpad callback, we will set the ghost pad target to the video decoder
# src pad.
Gst.Bin.add(nbin,uri_decode_bin)
bin_pad=nbin.add_pad(Gst.GhostPad.new_no_target("src",Gst.PadDirection.SRC))
if not bin_pad:
sys.stderr.write(" Failed to add ghost pad in source bin \n")
return None
return nbin
def __init__(self):
super().__init__('inference_publisher')
self.declare_parameter('input_sources')
input_sources = self.get_parameter('input_sources').value
number_sources = len(input_sources)
for i in range(number_sources):
fps_streams["stream{0}".format(i)]=GETFPS(i)
self.publisher_detection = self.create_publisher(Detection2DArray, 'multi_detection', 10)
self.publisher_classification = self.create_publisher(Classification2D, 'multi_classification', 10)
# Standard GStreamer initialization
GObject.threads_init()
Gst.init(None)
# Create gstreamer elements
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
self.pipeline = Gst.Pipeline()
if not self.pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
print("Creating streamux \n ")
# Create nvstreammux instance to form batches from one or more sources.
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
sys.stderr.write(" Unable to create NvStreamMux \n")
self.pipeline.add(streammux)
for i in range(number_sources):
frame_count["stream_"+str(i)]=0
saved_count["stream_"+str(i)]=0
print("Creating source_bin ",i," \n ")
uri_name=input_sources[i]
if uri_name.find("rtsp://") == 0 :
is_live = True
source_bin=self.create_source_bin(i, uri_name)
if not source_bin:
sys.stderr.write("Unable to create source bin \n")
self.pipeline.add(source_bin)
padname="sink_%u" %i
sinkpad= streammux.get_request_pad(padname)
if not sinkpad:
sys.stderr.write("Unable to create sink pad bin \n")
srcpad=source_bin.get_static_pad("src")
if not srcpad:
sys.stderr.write("Unable to create src pad bin \n")
srcpad.link(sinkpad)
# Use nvinfer to run inferencing on decoder's output,
# behaviour of inferencing is set through config file
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference1")
if not pgie:
sys.stderr.write(" Unable to create pgie1 \n")
tracker = Gst.ElementFactory.make("nvtracker", "tracker")
if not tracker:
sys.stderr.write(" Unable to create tracker \n")
sgie1 = Gst.ElementFactory.make("nvinfer", "secondary1-nvinference-engine")
if not sgie1:
sys.stderr.write(" Unable to make sgie1 \n")
sgie2 = Gst.ElementFactory.make("nvinfer", "secondary2-nvinference-engine")
if not sgie1:
| |
<NAME>, <NAME>, and <NAME>, "Consistent Weighted Sampling", Unpublished technical report, 2010.
Parameters
-----------
repeat: int, default: 1
the number of repeating the algorithm as the part of the seed of the random number generator
Returns
-----------
fingerprints_k: ndarray, shape (n_instances, dimension_num)
one component of hash codes $(k, y_k)$ for data matrix, where row represents a data instance
fingerprints_y: ndarray, shape (n_instances, dimension_num)
one component of hash codes $(k, y_k)$ for data matrix, where row represents a data instance
elapsed: float
time of hashing data matrix
Notes
----------
The operations of seeking "active indices" and computing hashing values are implemented by C++
due to low efficiency of Python. The operations cannot be vectorized in Python so that it would be
very slow.
"""
fingerprints_k = np.zeros((self.instance_num, self.dimension_num))
fingerprints_y = np.zeros((self.instance_num, self.dimension_num))
start = time.time()
for j_sample in range(0, self.instance_num):
feature_id = sparse.find(self.weighted_set[:, j_sample] > 0)[0]
feature_id_num = feature_id.shape[0]
fingerprints = CDLL('./cpluspluslib/cws_fingerprints.so')
fingerprints.GenerateFingerprintOfInstance.argtypes = [c_int,
np.ctypeslib.ndpointer(dtype=c_double, ndim=1,
flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(dtype=c_int, ndim=1,
flags="C_CONTIGUOUS"),
c_int, c_int,
np.ctypeslib.ndpointer(dtype=c_double, ndim=1,
flags="C_CONTIGUOUS"),
np.ctypeslib.ndpointer(dtype=c_double, ndim=1,
flags="C_CONTIGUOUS")]
fingerprints.GenerateFingerprintOfInstance.restype = None
weights = np.array(self.weighted_set[feature_id, j_sample].todense())[:, 0]
fingerprint_k = np.zeros((1, self.dimension_num))[0]
fingerprint_y = np.zeros((1, self.dimension_num))[0]
fingerprints.GenerateFingerprintOfInstance(self.dimension_num,
weights, feature_id, feature_id_num, self.seed * repeat,
fingerprint_k, fingerprint_y)
fingerprints_k[j_sample, :] = fingerprint_k
fingerprints_y[j_sample, :] = fingerprint_y
elapsed = time.time() - start
return fingerprints_k, fingerprints_y, elapsed
def icws(self, repeat=1):
"""The Improved Consistent Weighted Sampling (ICWS) algorithm, directly samples the two special "active indices",
$y_k$ and $z_k$.
<NAME>, "Improved Consistent Weighted Sampling, Weighted Minhash and L1 Sketching",
in ICDM, 2010, pp. 246-255.
Parameters
----------
repeat: int, default: 1
the number of repeating the algorithm as the part of the seed of the random number generator
Returns
-----------
fingerprints_k: ndarray, shape (n_instances, dimension_num)
one component of hash codes $(k, y_k)$ for data matrix, where row represents a data instance
fingerprints_y: ndarray, shape (n_instances, dimension_num)
one component of hash codes $(k, y_k)$ for data matrix, where row represents a data instance
elapsed: float
time of hashing data matrix
"""
fingerprints_k = np.zeros((self.instance_num, self.dimension_num))
fingerprints_y = np.zeros((self.instance_num, self.dimension_num))
np.random.seed(self.seed * np.power(2, repeat - 1))
start = time.time()
beta = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
v1 = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
v2 = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
u1 = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
u2 = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
for j_sample in range(0, self.instance_num):
feature_id = sparse.find(self.weighted_set[:, j_sample] > 0)[0]
gamma = - np.log(np.multiply(u1[feature_id, :], u2[feature_id, :]))
t_matrix = np.floor(np.divide(
np.matlib.repmat(np.log(self.weighted_set[feature_id, j_sample].todense()), 1, self.dimension_num),
gamma) + beta[feature_id, :])
y_matrix = np.exp(np.multiply(gamma, t_matrix - beta[feature_id, :]))
a_matrix = np.divide(np.multiply(-np.log(np.multiply(v1[feature_id, :], v2[feature_id, :])),
np.multiply(u1[feature_id, :], u2[feature_id, :])), y_matrix)
min_position = np.argmin(a_matrix, axis=0)
fingerprints_k[j_sample, :] = feature_id[min_position]
fingerprints_y[j_sample, :] = y_matrix[min_position, np.arange(a_matrix.shape[1])]
elapsed = time.time() - start
return fingerprints_k, fingerprints_y, elapsed
def licws(self, repeat=1):
"""The 0-bit Consistent Weighted Sampling (0-bit CWS) algorithm generates the original hash code $(k, y_k)$
by running ICWS, but finally adopts only $k$ to constitute the fingerprint.
<NAME>, "0-bit Consistent Weighted Sampling", in KDD, 2015, pp. 665-674.
Parameters
----------
repeat: int, default: 1
the number of repeating the algorithm as the part of the seed of the random number generator
Returns
----------
fingerprints: ndarray, shape (n_instances, dimension_num)
hash codes for data matrix, where row represents a data instance
elapsed: float
time of hashing data matrix
"""
fingerprints = np.zeros((self.instance_num, self.dimension_num))
np.random.seed(self.seed * np.power(2, repeat - 1))
start = time.time()
beta = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
v1 = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
v2 = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
u1 = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
u2 = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
for j_sample in range(0, self.instance_num):
feature_id = sparse.find(self.weighted_set[:, j_sample] > 0)[0]
gamma = - np.log(np.multiply(u1[feature_id, :], u2[feature_id, :]))
t_matrix = np.floor(np.divide(
np.matlib.repmat(np.log(self.weighted_set[feature_id, j_sample].todense()), 1, self.dimension_num),
gamma) + beta[feature_id, :])
y_matrix = np.exp(np.multiply(gamma, t_matrix - beta[feature_id, :]))
a_matrix = np.divide(np.multiply(-np.log(np.multiply(v1[feature_id, :], v2[feature_id, :])),
np.multiply(u1[feature_id, :], u2[feature_id, :])), y_matrix)
min_position = np.argmin(a_matrix, axis=0)
fingerprints[j_sample, :] = feature_id[min_position]
elapsed = time.time() - start
return fingerprints, elapsed
def pcws(self, repeat=1):
"""The Practical Consistent Weighted Sampling (PCWS) algorithm improves the efficiency of ICWS
by simplifying the mathematical expressions.
<NAME>, <NAME>, <NAME>, and <NAME>, "Consistent Weighted Sampling Made More Practical",
in WWW, 2017, pp. 1035-1043.
Parameters
----------
repeat: int, default: 1
the number of repeating the algorithm as the part of the seed of the random number generator
Returns
-----------
fingerprints_k: ndarray, shape (n_instances, dimension_num)
one component of hash codes $(k, y_k)$ for data matrix, where row represents a data instance
fingerprints_y: ndarray, shape (n_instances, dimension_num)
one component of hash codes $(k, y_k)$ for data matrix, where row represents a data instance
elapsed: float
time of hashing data matrix
"""
fingerprints_k = np.zeros((self.instance_num, self.dimension_num))
fingerprints_y = np.zeros((self.instance_num, self.dimension_num))
np.random.seed(self.seed * np.power(2, repeat - 1))
start = time.time()
beta = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
x = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
u1 = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
u2 = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
for j_sample in range(0, self.instance_num):
feature_id = sparse.find(self.weighted_set[:, j_sample] > 0)[0]
gamma = - np.log(np.multiply(u1[feature_id, :], u2[feature_id, :]))
t_matrix = np.floor(np.divide(
np.matlib.repmat(np.log(self.weighted_set[feature_id, j_sample].todense()), 1, self.dimension_num),
gamma) + beta[feature_id, :])
y_matrix = np.exp(np.multiply(gamma, t_matrix - beta[feature_id, :]))
a_matrix = np.divide(-np.log(x[feature_id, :]), np.divide(y_matrix, u1[feature_id, :]))
min_position = np.argmin(a_matrix, axis=0)
fingerprints_k[j_sample, :] = feature_id[min_position]
fingerprints_y[j_sample, :] = y_matrix[min_position, np.arange(a_matrix.shape[1])]
elapsed = time.time() - start
return fingerprints_k, fingerprints_y, elapsed
def ccws(self, repeat=1, scale=1):
"""The Canonical Consistent Weighted Sampling (CCWS) algorithm directly uniformly discretizes the original weight
instead of uniformly discretizing the logarithm of the weight as ICWS.
<NAME>, <NAME>, <NAME>, and <NAME>, "Canonical Consistent Weighted Sampling for Real-Value Weighetd Min-Hash",
in ICDM, 2016, pp. 1287-1292.
Parameters
----------
repeat: int, default: 1
the number of repeating the algorithm as the part of the seed of the random number generator
scale: int
a constant to adapt the weight
Returns
-----------
fingerprints_k: ndarray, shape (n_instances, dimension_num)
one component of hash codes $(k, y_k)$ for data matrix, where row represents a data instance
fingerprints_y: ndarray, shape (n_instances, dimension_num)
one component of hash codes $(k, y_k)$ for data matrix, where row represents a data instance
elapsed: float
time of hashing data matrix
"""
fingerprints_k = np.zeros((self.instance_num, self.dimension_num))
fingerprints_y = np.zeros((self.instance_num, self.dimension_num))
np.random.seed(self.seed * np.power(2, repeat - 1))
start = time.time()
beta = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
gamma = np.random.beta(2, 1, (self.feature_num, self.dimension_num))
c = np.random.gamma(2, 1, (self.feature_num, self.dimension_num))
for j_sample in range(0, self.instance_num):
feature_id = sparse.find(self.weighted_set[:, j_sample] > 0)[0]
t_matrix = np.floor(scale * np.divide(np.matlib.repmat(self.weighted_set[feature_id, j_sample].todense(), 1,
self.dimension_num),
gamma[feature_id, :]) + beta[feature_id, :])
y_matrix = np.multiply(gamma[feature_id, :], (t_matrix - beta[feature_id, :]))
a_matrix = np.divide(c[feature_id, :], y_matrix) - 2 * np.multiply(gamma[feature_id, :], c[feature_id, :])
min_position = np.argmin(a_matrix, axis=0)
fingerprints_k[j_sample, :] = feature_id[min_position]
fingerprints_y[j_sample, :] = y_matrix[min_position, np.arange(a_matrix.shape[1])]
elapsed = time.time() - start
return fingerprints_k, fingerprints_y, elapsed
def i2cws(self, repeat=1):
"""The Improved Improved Consistent Weighted Sampling (I$^2$CWS) algorithm, samples the two special
"active indices", $y_k$ and $z_k$, independently by avoiding the equation of $y_k$ and $z_k$ in ICWS.
<NAME>, <NAME>, <NAME>, <NAME> and <NAME>, "Improved Consistent Weighted Sampling Revisited",
DOI: 10.1109/TKDE.2018.2876250, 2018.
Parameters
----------
repeat: int, default: 1
the number of repeating the algorithm as the part of the seed of the random number generator
Returns
-----------
fingerprints_k: ndarray, shape (n_instances, dimension_num)
one component of hash codes $(k, y_k)$ for data matrix, where row represents a data instance
fingerprints_y: ndarray, shape (n_instances, dimension_num)
one component of hash codes $(k, y_k)$ for data matrix, where row represents a data instance
elapsed: float
time of hashing data matrix
"""
fingerprints_k = np.zeros((self.instance_num, self.dimension_num))
fingerprints_y = np.zeros((self.instance_num, self.dimension_num))
np.random.seed(self.seed * np.power(2, repeat - 1))
start = time.time()
beta1 = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
beta2 = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
u1 = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
u2 = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
u3 = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
u4 = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
v1 = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
v2 = np.random.uniform(0, 1, (self.feature_num, self.dimension_num))
for j_sample in range(0, self.instance_num):
feature_id = sparse.find(self.weighted_set[:, j_sample] > 0)[0]
r2 = - np.log(np.multiply(u3[feature_id, :], u4[feature_id, :]))
| |
import sys, itertools, os
from collections import deque
ACCEPT = 'aceita'
STATES = 'estados'
INITIAL = 'inicial'
TRANSITIONS = 'transicoes'
operations = ['-u','-i','-d','-s','-c','-m']
OP_UNION = operations[0]
OP_INTERSECTION = operations[1]
OP_DFACONVERSION = operations[2]
OP_STAR = operations[3]
OP_COMPLEMENT = operations[4]
OP_MINIMIZATION = operations[5]
formalDef = {STATES: [], INITIAL: '', ACCEPT: [], TRANSITIONS: {}}
automatas = []
# _getNotFinalStates get state that are not final states
def _getNotFinalStates(formalDef):
notFinalStates = []
for state in formalDef[STATES]:
if not state in formalDef[ACCEPT]:
notFinalStates.append(state)
return notFinalStates
def _getNthEquivalentStates(formalDef):
notFinalStates = _getNotFinalStates(formalDef)
finalStates = formalDef[ACCEPT][:]
remainingStates = notFinalStates[:]
alphabet = getAlphabet(formalDef)
alphabet.add('e')
transitions = formalDef[TRANSITIONS]
result = [finalStates[:]]
while remainingStates != []:
nextFinal = set()
previousRemainingStates = remainingStates[:]
#finding nth-distance from final state
for state in remainingStates:
for symbol in alphabet:
for transition in transitions[state]:
try:
nextState = transition[symbol]
if nextState in finalStates:
nextFinal.add(state)
except:
pass
#remove from remaining states and append to result
for state in nextFinal:
if state in remainingStates:
remainingStates.remove(state)
finalStates.append(state)
if nextFinal!=set():
result.append(nextFinal)
#cases where it doesn't end
if previousRemainingStates == remainingStates:
result.append(remainingStates)
break
return result
# _minimalStates calculates the minimal states for the automaton given
def _minimalStates(formalDef):
sets = _getNthEquivalentStates(formalDef)
return list(map(lambda x: ''.join(x), sets))
# _getInitialState calculates the new initial state for the minimal automaton
def _getInitialState(formalDef, minimalStates):
for state in minimalStates:
if formalDef[INITIAL] in state:
return state
return ''
# _getAcceptStates calculates the new accept state for the minimal automaton
def _getAcceptStates(formalDef, minimalStates):
accept = []
for state in minimalStates:
for finalState in formalDef[ACCEPT]:
if finalState in state:
accept.append(state)
return list(set(accept))
# _getTransitions calculates the transitions for the new automaton
# with its states changes to be the minimal one.
def _getTransitions(formalDef, minimalStates):
accept = formalDef[ACCEPT]
transitions = {}
alphabet = getAlphabet(formalDef)
for state in minimalStates:
transitions[state] = {}
for symbol in alphabet: transitions[symbol] = []
for symbol in alphabet:
nextStates = []
for s in list(state):
if s in formalDef[TRANSITIONS]:
for b in formalDef[TRANSITIONS][s][symbol]:
for nextState in minimalStates:
if b in nextState:
nextStates.append(nextState)
transitions[state][symbol] = list(set(nextStates))
return transitions
# minimization generates a new automaton with the minimal states and transitions
# necessaries to do the same computation as the larger received does
def minimization(formalDef):
minimal_states = _minimalStates(formalDef)
initial = _getInitialState(formalDef, minimal_states)
accept = _getAcceptStates(formalDef, minimal_states)
transitions = _getTransitions(formalDef, minimal_states)
minimal_formalDef = {
STATES: minimal_states,
INITIAL: initial,
ACCEPT: accept,
TRANSITIONS: transitions
}
return minimal_formalDef
# handleInput read input commands and redirect to responsible function
def handleInput():
directories = [d for d in sys.argv[1:] if d not in operations and os.path.isfile(d)]
operation = next((op for op in sys.argv[1:] if op in operations),'')
automatas = [readAutomataFile(d) for d in directories]
result = ''
if operation == OP_UNION:
result = union(automatas[0], automatas[1])
elif operation == OP_INTERSECTION:
result = intersection(automatas[0],automatas[1])
elif operation == OP_DFACONVERSION:
result = nfaToDfa(automatas[0])
elif operation == OP_STAR:
result = starOperation(automatas[0])
elif operation == OP_COMPLEMENT:
result = generateComplement(automatas[0])
elif operation == OP_MINIMIZATION:
result = minimization(automatas[0])
else:
simulate(automatas[0])
if operation != '':
writeOutputFile(result)
# readAutomataFile reads a text file and specifies the automaton,
# requires the path to file
def readAutomataFile(filePath):
inputFile = open(filePath, 'r')
formalDef = {STATES: [], INITIAL: '', ACCEPT: [], TRANSITIONS: {}}
while True:
line = inputFile.readline()
if not line: break
args = line.strip().split(' ', 1)
if(args[0] in formalDef):
formalDef[args[0]] = args[1].split(', ')
else:
currentState, nextState, symbol = line.strip().split(' ')
if currentState in formalDef[TRANSITIONS]:
if (symbol in formalDef[TRANSITIONS][currentState]):
formalDef[TRANSITIONS][currentState][symbol].append(nextState)
else:
formalDef[TRANSITIONS][currentState][symbol] = [nextState]
else:
formalDef[TRANSITIONS][currentState] = {symbol: [nextState]}
formalDef[INITIAL] = formalDef[INITIAL][0]
inputFile.close()
return formalDef
# simulate process the automaton specified,
# when it reads the word given it returns a list
# with the states where the automaton stops at the end of the word
# requires the formal definition of the automaton
def simulate(formalDef):
word = sys.argv[2]
currentStates = [formalDef[INITIAL]]
print('{0:<60} {1:<}'.format('ESTADO','PALAVRA'))
print('{0:<60} {1:<}'.format(formalDef[INITIAL], word))
while(len(word) > 0):
symbol = word[0]
word = word[1:len(word)]
currentStates = nfaTraverse(formalDef,currentStates,symbol)
print('{0:<60} {1:<}'.format(', '.join(str(state) for state in currentStates), word if len(word) > 0 else 'e'))
showVeredict(formalDef, currentStates)
# showVeredict receives a list with the states the automaton stops
# at the end of a word and verifies if one of them is an accept state
# and shows a message according to it.
# requires the formal definition of the automaton and list of states
def showVeredict(formalDef, states):
accept = False
for state in states:
if (state in formalDef[ACCEPT]): accept = True
print('\nA palavra %sfoi aceita' % ('nao ' if not accept else ''))
# generateComplement generate the complement automaton for the
# automaton received in the param.
# it returns a formal definition for the complement automaton.
# requires the formal definition of the automaton
def generateComplement(formalDef):
formalDef = nfaToDfa(formalDef)
complementDef = {}
complementDef[INITIAL] = formalDef[INITIAL]
complementDef[TRANSITIONS] = formalDef[TRANSITIONS].copy()
complementDef[STATES] = formalDef[STATES][:]
complementDef[ACCEPT] = list(set(formalDef[STATES]) - set(formalDef[ACCEPT]))
return complementDef
# writeOutputFile writes an automaton to the default output
# requires the formal definition of the automaton
def writeOutputFile(formalDef):
automataAsStr = STATES + ' ' + ', '.join(str(state) for state in formalDef[STATES]) + '\n'
automataAsStr += INITIAL + ' ' + formalDef[INITIAL] + '\n'
automataAsStr += ACCEPT + ' ' + ', '.join(str(state) for state in formalDef[ACCEPT]) + '\n'
transitions = formalDef[TRANSITIONS]
for state in transitions:
for symbol in transitions[state]:
for nextState in transitions[state][symbol]:
automataAsStr += state + ' ' + nextState + ' ' + symbol
automataAsStr += '\n'
print(automataAsStr)
# traverse traverses through the automaton
# and returns all the states it should be after the input is set
# requires the formal definition of the automaton, state and symbol
def traverse(formalDef, state, symbol):
if state in formalDef[TRANSITIONS]:
if symbol in formalDef[TRANSITIONS][state]:
return formalDef[TRANSITIONS][state][symbol]
return []
# traverse traverses through the automaton
# and returns all the states it should be after all the inputs are set
# the difference from this to traverse is that it receives multiple states
# requires the formal definition of the automaton, state and symbol
def traverseMultipleStates(formalDef, states, symbol):
finalStates = []
for state in states:
if state in formalDef[TRANSITIONS]:
if symbol in formalDef[TRANSITIONS][state]:
finalStates = finalStates + formalDef[TRANSITIONS][state][symbol]
return finalStates
# traverseIndefinitely traverse indefinitely using a symbol,
# it only stops when the set of states doesn't change after a traverse.
# requires the formal definition of the automaton, state and symbol
def traverseIndefinitely(formalDef, states, symbol):
previousStates = []
while True:
states = set(states).union(set(traverseMultipleStates(formalDef,states,symbol)))
if states == previousStates:
break
previousStates = states
return states
# nfaTraverse traverses as it would in a nfa
# requires the formal definition of the automaton, state and symbol
def nfaTraverse(formalDef, states, symbol):
states = traverseIndefinitely(formalDef,states,'e')
states = traverseMultipleStates(formalDef,states,symbol)
states = traverseIndefinitely(formalDef,states,'e')
return states
# findStateFromStateCombinaton is an auxiliar
# method to build a state string from a combination of states
# requires a list of state combinations and the combination
def findStateFromStateCombinaton(stateCombinations,combination):
foundCombination = ""
for comb in stateCombinations:
if combination == set(comb):
foundCombination = "_".join(str(state) for state in comb)
break
return foundCombination
# getAlphabet(formalDef) get all the symbols of an automaton
# requires the formal definition of the automaton
def getAlphabet(formalDef):
alphabet = set()
for state in formalDef[TRANSITIONS]:
for symbol in formalDef[TRANSITIONS][state]:
alphabet.add(symbol)
alphabet.discard('e')
return alphabet
# nfaToDfa build a dfa from a nfa
# requires the formal definition of the automaton
def nfaToDfa(formalDef):
newFormalDef = {STATES: [], INITIAL: '', ACCEPT: [], TRANSITIONS: {}}
for s in formalDef[STATES]:
if "_" in s:
print("Não foi possível converter o automato para nfa pois o automato utiliza '_' em sua nomenclatura de estados")
return formalDef
# CREATE STATE COMBINATIONS (from each state)
states = formalDef[STATES]
stateCombinations = []
newStates = []
for i in range(1,len(states)+1):
combinations = set(itertools.combinations(states,i))
for comb in combinations:
stateCombinations.append(comb)
for states in stateCombinations:
newStates.append("_".join(str(state) for state in states))
newStates.append("_")
newFormalDef[STATES] = newStates
# SET ACCEPTING STATES (for every state accept the ones that has at least one accepting state)
accepting_states = formalDef[ACCEPT]
newAccepting_states = []
for states in stateCombinations:
for accept_state in accepting_states:
if accept_state in states:
newAccepting_states.append("_".join(str(state) for state in states))
newAccepting_states = set(newAccepting_states)
newFormalDef[ACCEPT] = newAccepting_states
# SET INITIAL STATE (from the initial state traverse all 'e' symbols until you can't you
# get the most states. The initial state is a combination of every single starting state)
initial = formalDef[INITIAL]
newInitial = traverseIndefinitely(formalDef,initial,'e')
newFormalDef[INITIAL] = findStateFromStateCombinaton(stateCombinations,newInitial)
# | |
<filename>scripts/scriptmaker.py
#! /bin/python3
## imports
import sys, os, getopt
import re, json
import shutil
import copy
from collections import defaultdict, namedtuple
from datetime import datetime
import pandas as pd
#########################################################################################################################
## scriptmaker classes
class ConfigException(Exception):
"""class for errors in config file"""
pass
class FileOptions:
"""Represent options of a bash-file."""
def __init__(self):
self.reset()
def reset(self):
self.Shebang = False
self.ParamInfo = "@##"
self.ParamValue = "@@#"
self.ProgramOutput = "@@@"
class ScriptMaker:
"""Class to create a bash-script.
Can be used with the 'with .. as .. ' syntax."""
def __init__(self, name=None):
if name==None:
self._name = 'runscript.sh'
else:
self._name = name
self._entered = False
self.FileOptions = FileOptions()
def _checkfile(self):
if not self._entered:
raise IOError("No file opened")
def __enter__(self):
self._file = open(self._name, 'w')
self._entered = True
self.FileOptions.reset()
return self
def __exit__(self, type, value, tb):
self._file.close()
self._entered = False
self.FileOptions.reset()
def writeLine(self, arg):
self._checkfile()
self._file.write(arg + "\n")
def writeDirective(self, directive, value):
pass
def writeTask(self, line, options=None):
self.writeLine(line)
def writeJob(self, line, options=None):
self.writeLine(line)
def echoParamInfo(self, arg):
line = "echo \"" + self.FileOptions.ParamInfo + ' ' + arg + "\""
self.writeLine(line)
def echoParamValue(self, arg):
line = "echo \"" + self.FileOptions.ParamValue + ' ' + arg + "\""
self.writeLine(line)
def shebang(self, path="/bin/bash"):
if self.FileOptions.Shebang:
return
self.writeLine("#! {}".format(path))
self.FileOptions.Shebang = True
class SBatchScriptMaker(ScriptMaker):
"""Class to write a sbatch script for slurm."""
def __init__(self, name=None, options=None):
super(SBatchScriptMaker, self).__init__(name=name)
def shebang(self, path="/bin/bash"):
super(SBatchScriptMaker, self).shebang(path + ' -l')
def writeDirective(self, directive, value):
self.writeLine("#SBATCH --{}={}".format(directive, value))
def writeTask(self, line, options=''):
try:
parsed = '-c {}'.format(getattr(options, "threads"))
except:
parsed = '-c 1'
self.writeLine("srun {} {} ; p".format(parsed, line))
def writeJob(self, line, options=None):
self.writeLine("sbatch "+line)
class BsubScriptMaker(ScriptMaker):
"""Class to write a bsub script for lsf."""
def __init__(self, name=None):
super(BsubScriptMaker, self).__init__(name=name)
def writeDirective(self, directive, value):
self.writeLine("#BSUB -{} {}".format(directive, value))
def writeJob(self, line, options=None):
self.writeLine("bsub < "+line)
## end scriptmaker classes
#########################################################################################################################
## parse options
def stripComment(string, comment_prefix='//'):
def replacer(match):
s = match.group(0)
if s.startswith(comment_prefix):
return ''
return s
pattern = re.compile(comment_prefix + r'.*|"(?:\\.|[^\\"])*"|\'(?:\\.|[^\\\'])*',
re.MULTILINE)
return re.sub(pattern, replacer, string)
def paramType(param_location):
"""Return type of parameter: none/named/positional."""
if param_location == None or str(param_location) == 'none':
return 'none'
try:
if int(param_location) > 0:
return 'positional'
except:
pass
return 'named'
def listRange(start, stop, adder):
"""Create a linear range from start up to (including) stop in adder steps."""
res = list()
while start <= stop:
res.append(start)
start = start + adder
return res
def parseToValueList(item):
"""Parse a range item into a list of values."""
res = list()
if not isinstance(item, str):
item = str(item)
try:
splits = item.split(':')
if '.' in item:
# we have floats
if len(splits) == 3:
start = float(splits[0])
stop = float(splits[1])
adder = float(splits[2])
elif len(splits) == 2:
start = float(splits[0])
stop = float(splits[1])
adder = 1.
else:
start = float(splits[0])
stop = start + 0.1 # ensure at least one value (num prec)
adder = 1.
else:
# we have int
if len(splits) == 3:
start = int(splits[0])
stop = int(splits[1])
adder = int(splits[2])
elif len(splits) == 2:
start = int(splits[0])
stop = int(splits[1])
adder = 1
else:
start = int(splits[0])
stop = start
adder = 1
tmp = listRange(start, stop, adder)
except:
tmp = [item]
res.extend( [str(e) for e in tmp] )
return res
def parseParameterValues(values):
if not isinstance(values, list):
tmp = parseToValueList(values)
return tmp
result = []
if isinstance(values[0], list):
for value in values:
list2d = []
for item in value:
list2d.append( parseToValueList(item))
len1d = len(list2d[0])
for lst in list2d:
if len(lst) != len1d:
print("len1d: ", len1d)
print("lst: ", lst)
print("list2d[0]:", list2d[0])
raise ConfigException("Different number of values in parameter-tandem")
for i in range(len1d):
tmp = [ list1d[i] for list1d in list2d ]
result.append(tmp)
else:
for value in values:
tmp = parseToValueList(value)
result.extend(tmp)
return result
def checkParameterValues(key, values):
keylen = len(key.split())
if keylen == 1:
if not isinstance(values, list):
return
for elem in values:
if isinstance(elem, list):
raise ConfigException("values of parameter '{}' cannot contain nested list".format(key))
else:
if not isinstance(values, list):
raise ConfigException("values of parameter '{}' must be nested list".format(key))
for elem in values:
if not (isinstance(elem, list) and len(elem) == keylen):
raise ConfigException("values of parameter '{}' must be nested lists of length {}".format(key, keylen))
def checkParameterTypes(param_locations, dtype=None, alg=''):
if dtype == None and len(param_locations) > 0:
loc1 = list(param_locations.values())[0]
if isinstance(loc1, list) and len(loc1) > 0:
dtype = paramType(loc1[0])
elif isinstance(loc1, list):
dtype = 'none'
else:
dtype = paramType(loc1)
elif dtype == None:
dtype = 'none'
if len(param_locations) > 0:
locValues = list(param_locations.values())
for locValue in locValues:
if isinstance(locValue, list):
for lv in locValue:
if dtype != paramType(lv):
raise ConfigException("parameters_locations of algorithm '{}' ".format(alg)+
"do not have consistent type")
else:
if dtype != paramType(locValue):
raise ConfigException("parameters_locations of algorithm '{}' ".format(alg)+
"do not have consistent type")
else:
if dtype != 'none':
raise ConfigException("parameters_location of algorithm '{}' ".format(alg)+
"do not have consistent type")
return dtype
def parseOptions(json_options):
"""Parse the raw son options.
Parses the parameter values into ranges and adds missing information
that can be inferred from other values.
Returns parsed options as dictionary"""
parsed = dict(json_options)
if "algorithms" in json_options:
algorithms = copy.deepcopy(json_options["algorithms"])
else:
algorithms = list()
pruned_alg = list()
for alg in algorithms:
if "ignore" in alg and alg["ignore"] == "true":
continue
else:
pruned_alg.append(alg)
algorithms = pruned_alg
for alg in algorithms:
# check for positional/named arguments
if "parameters_type" in alg:
checkParameterTypes(alg["parameters_location"], alg["parameters_type"], extractName(alg["executable"]))
else:
pt = checkParameterTypes(alg["parameters_location"], alg=extractName(alg["executable"]))
alg["parameters_type"] = pt
option = dict()
for k in alg["parameters_location"]:
if k in alg["parameters_values"]:
option[k] = parseParameterValues( alg["parameters_values"][k])
elif "common_parameter_values" in json_options and k in json_options["common_parameter_values"]:
option[k] = parseParameterValues( json_options["common_parameter_values"][k])
else:
raise ConfigException("Key '{}' not found in \"parameters_values\" or \"common_parameters_values\"".format(k))
alg["parameters_values"] = option
parsed["algorithms"] = algorithms
return parsed
## end parse options
#########################################################################################################################
## cartesian product / parameter space
def spanCartesionProduct(algorithm):
"""Span the parameter space of a single algorithm.
Spans the parameters space as the cartesian product of the options.
Keyword arguments:
algorithm -- parsed json configuration of a single algorithm.
Returns: pandas DataFrame."""
dfs = []
for colsName in algorithm["parameters_location"]:
df = pd.DataFrame(
data=algorithm["parameters_values"][colsName], columns=colsName.split()
)
df['temp_key_'] = 0
dfs.append(df)
cp = pd.DataFrame(data=[algorithm["executable"]], columns=['executable'] )
cp['temp_key_'] = 0
for i in range(len(dfs)):
cp = pd.merge(cp, dfs[i], how='outer', on='temp_key_')
cp.drop(columns=['temp_key_'], inplace=True)
namePosPairs = []
for k in algorithm["parameters_location"]:
val = algorithm["parameters_location"][k]
if isinstance(val, list):
keys = k.split()
for i in range(len(val)):
namePosPairs.append( (keys[i], val[i]) )
else:
namePosPairs.append( (k, val) )
posDict = { tp[1]: tp[0] for tp in namePosPairs }
if algorithm["parameters_type"] == "positional":
ckeys = [ int(p) for p in posDict.keys()]
ckeys.sort()
ckeys = [ str(p) for p in ckeys ]
else:
ckeys = list(posDict.keys())
ckeys.sort()
pcols = [ posDict[k] for k in ckeys]
return cp[ ["executable"] + pcols]
def spanParameterSpace(options):
"""Span the parameter space as the cartesian product of the options per algorithm.
Keyword arguments:
optios -- parsed json configuration
Returns: pandas DataFrame."""
df = pd.DataFrame()
for alg in options["algorithms"]:
algdf = spanCartesionProduct(alg)
df = df.append(algdf, ignore_index=True, sort=False)
return df
## end cartesian product / parameter space
#########################################################################################################################
## create single scripts
def extractName(arg):
"""Try to extract a basename from the provided argument."""
if not isinstance(arg, str):
arg = str(arg)
try:
name = os.path.basename(arg)
except:
name = arg
fileExtensions = ['el', 'txt', 'dat', 'csv']
for ext in fileExtensions:
name = re.sub(r'\.'+'{}$'.format(ext), '', name)
name = re.sub(r'\.', '_', name)
return name
def generateNameSuffix():
"""Create time stamp string."""
return datetime.now().strftime("%Y%m%d_%H%M%S")
def scriptAlgorithmUnit(param_space, options, maker):
"""Write script with all parameter configurations for a single algorithm.
Keyword arguments:
param_space -- pandas DataFrame with all relevant parameter configurations.
options -- The parsed json configuration.
maker -- Script writer. Should inherit from ScriptMaker/adhere to its interface"""
# drop irrelevant columns
#param_space = param_space.dropna(axis='columns', how='all')
namePosPairs = []
for k in options["parameters_location"]:
val = options["parameters_location"][k]
if isinstance(val, list):
keys = k.split()
for i in range(len(val)):
namePosPairs.append( (keys[i], val[i]) )
else:
namePosPairs.append( (k, val) )
posDict = { tp[1]: tp[0] for tp in namePosPairs }
if options["parameters_type"] == "positional":
ckeys = [ int(p) for p in posDict.keys()]
ckeys.sort()
ckeys = [ str(p) for p in ckeys ]
else:
ckeys = list(posDict.keys())
ckeys.sort()
pcols | |
All tests passed:
return True
def is_in_interfaces(self, s):
"""
Check whether ports comprised by a selector are in the stored interfaces.
Parameters
----------
s : str or unicode
Port selector.
Returns
-------
result : bool
True if the comprised ports are in any of the stored interfaces.
"""
try:
# Pad the expanded selector with blanks to prevent pandas from
# spurious matches such as mistakenly validating '/foo' as being in
# an Interface that only contains the ports '/foo[0:2]':
idx = self.sel.expand(s, self.idx_levels)
if not isinstance(self.data.index, pd.MultiIndex):
idx = [x[0] for x in idx]
d = self.data['interface'].loc[idx]
if isinstance(d, int):
return True
if np.any(d.isnull().tolist()):
return False
else:
return True
except:
return self.sel.is_in(s, self.index.tolist())
def out_ports(self, i=None, tuples=False):
"""
Restrict Interface ports to output ports.
Parameters
----------
i : int
Interface identifier. If None, return all output ports.
tuples : bool
If True, return a list of tuples; if False, return an
Interface instance.
Returns
-------
interface : Interface or list of tuples
Either an Interface instance containing all output ports and
their attributes in the specified interface, or a list of tuples
corresponding to the expanded ports.
"""
if i is None:
try:
df = self.data[self.data['io'] == 'out']
except:
df = None
else:
try:
df = self.data[(self.data['io'] == 'out') & \
(self.data['interface'] == i)]
except:
df = None
if tuples:
if df is None:
return []
else:
return df.index.tolist()
else:
if df is None:
return Interface()
else:
return self.from_df(df)
def port_select(self, f, inplace=False):
"""
Restrict Interface ports with a selection function.
Returns an Interface instance containing only those rows
whose ports are passed by the specified selection function.
Parameters
----------
f : function
Selection function with a single tuple argument containing
the various columns of the Interface instance's MultiIndex.
inplace : bool, default=False
If True, update and return the given Interface instance.
Otherwise, return a new instance.
Returns
-------
i : Interface
Interface instance containing ports selected by `f`.
"""
assert callable(f)
if inplace:
self.data = self.data.select(f)
return self
else:
return Interface.from_df(self.data.select(f))
def spike_ports(self, i=None, tuples=False):
"""
Restrict Interface ports to spiking ports.
Parameters
----------
i : int
Interface identifier. If None, return all spiking ports.
tuples : bool
If True, return a list of tuples; if False, return an
Interface instance.
Returns
-------
interface : Interface or list of tuples
Either an Interface instance containing all spiking ports and
their attributes in the specified interface, or a list of tuples
corresponding to the expanded ports.
"""
if i is None:
try:
df = self.data[self.data['type'] == 'spike']
except:
df = None
else:
try:
df = self.data[(self.data['type'] == 'spike') & \
(self.data['interface'] == i)]
except:
df = None
if tuples:
if df is None:
return []
else:
return df.index.tolist()
else:
if df is None:
return Interface()
else:
return self.from_df(df)
def to_selectors(self, i=None):
"""
Retrieve Interface's port identifiers as list of path-like selectors.
Parameters
----------
i : int
Interface identifier. If set to None, return all port identifiers.
Returns
-------
selectors : list of str
List of selector strings corresponding to each port identifier.
"""
ids = self.to_tuples(i)
result = []
for t in ids:
selector = ''
for s in t:
if isinstance(s, basestring):
selector += '/'+s
else:
selector += '[%s]' % s
result.append(selector)
return result
def to_tuples(self, i=None):
"""
Retrieve Interface's port identifiers as list of tuples.
Parameters
----------
i : int
Interface identifier. If set to None, return all port identifiers.
Returns
-------
result : list of tuple
List of token tuples corresponding to each port identifier.
"""
if i is None:
if isinstance(self.index, pd.MultiIndex):
return self.index.tolist()
else:
return [(t,) for t in self.index]
try:
if isinstance(self.index, pd.MultiIndex):
return self.data[self.data['interface'] == i].index.tolist()
else:
return [(t,) for t in self.data[self.data['interface'] == i].index]
except:
return []
def which_int(self, s):
"""
Return the interface containing the identifiers comprised by a selector.
Parameters
----------
selector : str or unicode
Port selector.
Returns
-------
i : set
Set of identifiers for interfaces that contain ports comprised by
the selector.
"""
try:
idx = self.sel.expand(s, self.idx_levels)
if not isinstance(self.data.index, pd.MultiIndex):
idx = [x[0] for x in idx]
d = self.data['interface'].loc[idx]
s = set(d)
s.discard(np.nan)
return s
except:
try:
s = set(self[s, 'interface'].values.flatten())
# Ignore unset entries:
s.discard(np.nan)
return s
except KeyError:
return set()
def __copy__(self):
"""
Make a copy of this object.
"""
return self.from_df(self.data)
copy = __copy__
copy.__doc__ = __copy__.__doc__
def set_pm(self, t, pm):
"""
Set port mapper associated with a specific port type.
Parameters
----------
t : str or unicode
Port type.
pm : neurokernel.plsel.BasePortMapper
Port mapper to save.
"""
# Ensure that the ports in the specified port mapper are a subset of
# those in the interface associated with the specified type:
assert isinstance(pm, BasePortMapper)
if not self.sel.is_in(pm.index.tolist(),
self.pm[t].index.tolist()):
raise ValueError('cannot set mapper using undefined selectors')
self.pm[t] = pm.copy()
def equals(self, other):
"""
Check whether this interface is equivalent to another interface.
Parameters
----------
other : neurokernel.pattern.Interface
Interface instance to compare to this Interface.
Returns
-------
result : bool
True if the interfaces are identical.
Notes
-----
Interfaces containing the same rows in different orders are not
regarded as equivalent.
"""
assert isinstance(other, Interface)
return self.data.equals(other.data)
def __len__(self):
return self.data.__len__()
def __repr__(self):
return 'Interface\n---------\n'+self.data.__repr__()
class Pattern(object):
"""
Connectivity pattern linking sets of interface ports.
This class represents connection mappings between interfaces comprising
sets of ports. Ports are represented using path-like identifiers;
the presence of a row linking the two identifiers in the class' internal
index indicates the presence of a connection. A single data attribute
('conn') associated with defined connections is created by default.
Specific attributes may be accessed by specifying their names after the
port identifiers; if a nonexistent attribute is specified when a sequential
value is assigned, a new column for that attribute is automatically
created: ::
p['/x[0]', '/y[0]', 'conn', 'x'] = [1, 'foo']
The direction of connections between ports in a class instance determines
whether they are input or output ports. Ports may not both receive input or
emit output. Patterns may contain fan-out connections, i.e., one source port
connected to multiple destination ports, but not fan-in connections, i.e.,
multiple source ports connected to a single destination port.
Examples
--------
>>> p = Pattern('/x[0:3]','/y[0:4]')
>>> p['/x[0]', '/y[0:2]'] = 1
>>> p['/y[2]', '/x[1]'] = 1
>>> p['/y[3]', '/x[2]'] = 1
Attributes
----------
data : pandas.DataFrame
Connection attribute data.
index : pandas.MultiIndex
Index of connections.
interface : Interface
Interfaces containing port identifiers and attributes.
Parameters
----------
sel0, sel1, ...: str, unicode, or sequence
Selectors defining the sets of ports potentially connected by the
pattern. These selectors must be disjoint, i.e., no identifier
comprised by one selector may be in any other selector.
columns : sequence of str
Data column names.
See Also
--------
plsel.SelectorMethods
"""
def __init__(self, *selectors, **kwargs):
columns = kwargs.get('columns', ['conn'])
self.sel = SelectorMethods()
# Force sets of identifiers to be disjoint so that no identifier can
# denote a port in more than one set:
assert self.sel.are_disjoint(*selectors)
# Collect all of the selectors:
selector = []
for s in selectors:
if isinstance(s, Selector) and len(s) != 0:
selector.extend(s.expanded)
elif isinstance(s, basestring):
selector.extend(self.sel.parse(s))
elif np.iterable(s):
selector.extend(s)
else:
raise ValueError('invalid selector type')
# Create Interface instance containing the ports comprised by all of the
# specified selectors:
self.interface = Interface(selector)
# Set the interface identifiers associated with each of the selectors
# consecutively:
for i, s in enumerate(selectors):
self.interface[s, 'interface'] = i
# Create a MultiIndex that can store mappings between identifiers in the
# two interfaces:
self.num_levels = {'from': self.interface.num_levels,
'to': self.interface.num_levels}
names = ['from_%s' % i for i in range(self.num_levels['from'])]+ \
['to_%s' %i for i in range(self.num_levels['to'])]
levels = [[] for i in range(len(names))]
labels = [[] for i in range(len(names))]
idx = pd.MultiIndex(levels=levels, codes=labels, names=names)
self.data = pd.DataFrame(index=idx, columns=columns, dtype=object)
@property
def from_slice(self):
"""
Slice of | |
<gh_stars>10-100
#!/usr/bin/env python3
"""
Get read depth over breakpoints.
An SV has 3 breakpoints. Two are on one contig and one is on another.
* DEL: 2 breakpoints on reference, 1 on the local assembly.
* INS: 1 breakpoint on reference, 2 on the local assembly.
For the purposes of this script, the two breakpoints on the same contig are bp_l (left) and bp_r (right), and the
breakpoint on a single contig is bp_s (single).
Breakpoints depths over all 3 are calculated by finding all reads that overlap each breakpoint, choosing the best
alignment for each read (CIGAR string and edit distance), and normalizing over bp_l and bp_r ((bp_l + bp_r) / 2).
"""
import argparse
import gc
import collections
import os
import pandas as pd
import pysam
from scipy.stats import binom
# CIGAR operations
BAM_CMATCH = 0
BAM_CINS = 1
BAM_CDEL = 2
BAM_CREF_SKIP = 3
BAM_CSOFT_CLIP = 4
BAM_CHARD_CLIP = 5
BAM_CPAD = 6
BAM_CEQUAL = 7
BAM_CDIFF = 8
# Set of CIGAR operations that indicate mis-matched alignments (ignoring operations bwa would not write).
CIGAR_NO_ALIGN = {BAM_CINS, BAM_CDEL, BAM_CSOFT_CLIP, BAM_CHARD_CLIP, BAM_CDIFF}
CIGAR_CLIPPED = {BAM_CSOFT_CLIP, BAM_CHARD_CLIP}
# Number of SVs to process before resetting pysam (close and re-open file). Avoids a memory leak in pysam.
PYSAM_RESET_INTERVAL = 1000
class AlignRecord:
"""
One alignment record with required fields extracted from the pysam segment.
Fields:
* qname: Query name.
* pos: Position.
* mapq: Mapping quality.
* align_distance: Distance calculated by summing CIGAR string entries of non-matching bases.
* edit_distance: Number of mismatched bases in the aligned regions of the sequence.
* read_count: Number of reads this record represents.
"""
def __init__(self, segment):
"""
Create a new alignment record.
:param segment: Pysam segment (one BAM entry).
"""
# Get basic information
self.qname = segment.query_name
self.pos = segment.reference_start
self.mapq = segment.mapping_quality
# Count number of clipped bases on each end
self.clip_l = 0
self.clip_r = 0
index = 0
while segment.cigartuples[index][0] in CIGAR_CLIPPED:
self.clip_l += segment.cigartuples[index][1]
index += 1
index = len(segment.cigartuples) - 1
while segment.cigartuples[index][0] in CIGAR_CLIPPED:
self.clip_r += segment.cigartuples[index][1]
index -= 1
self.clip_lr = self.clip_l + self.clip_r
# Get CIGAR distance
self.align_distance = 0
for cigar_entry in segment.cigartuples:
if cigar_entry[0] in CIGAR_NO_ALIGN:
self.align_distance += cigar_entry[1]
# Get edit distance (NM tag)
try:
self.edit_distance = segment.get_tag('NM')
except KeyError:
self.edit_distance = None
# Read count: Number of breakpoints this record represents. If the same read is mapped over two ends of
# an event, count it twice initially so that it normalizes properly.
self.read_count = 1
def compare(self, other):
"""
Compare the distance from this record to the reference to the distance of `other` from the reference. The
mapping quality is analyzed first and returned if they are not the same. The CIGAR operations are analyzed
second, and if the number of bases differing is equal, then the NM tag is analyzed (if it exists
in both records).
The difference between the mapping quality, CIGAR distance, or the edit distance (see above) is returned
as `self - other` for the first comparison that is non-zero.
:param other: Other record to compare.
:return: A negative number if this record is closer to the reference, a positive number if the other record
is closer to the reference, and 0 if both records align equally well to the reference by CIGAR and NM tag
edit distance.
"""
# Compare mapping quality
record_distance = self.mapq - other.mapq
if record_distance != 0:
return record_distance
# Compare CIGAR difference
record_distance = self.align_distance - other.align_distance
if record_distance != 0:
return record_distance
# Compare edit distance (NM tag) if set
if self.edit_distance is not None and other.edit_distance is not None:
record_distance = self.edit_distance - other.edit_distance
return record_distance
def __repr__(self):
"""
Get a string representation of this object.
:return: String representation of this object.
"""
return 'AlignRecord[qname={qname}, pos={pos}, dist_cigar={align_distance}, dist_nm={edit_distance}]'.format(
**self.__dict__
)
def get_best_alignment(record_list):
"""
Get the best alignment from a list of alignments. The best alignment has the lowest distance to the reference. If
more than one alignment has the minimum distance, then the first one in the list is returned.
:param record_list: List of records.
:return: Best record in `record_list`.
"""
best_alignment = record_list[0]
for alignment in record_list[1:]:
if alignment.compare(best_alignment) < 0:
best_alignment = alignment
return best_alignment
def merge_breakpoint_records(records_l, records_r):
"""
Merge records found on both the left and the right breakpoints. If a read or its paired end overlaps both
breakpoints, then choose the lesser of the two alignments to represent the read (lesser by furthest distance
from the reference or contig it belonged to).
:param records_l: Left breakpoint records.
:param records_r: Right breakpoint records.
:return: Merged left and right breakpoints.
"""
records_lr = dict()
# Get records in each set
qname_l = set(records_l.keys())
qname_r = set(records_r.keys())
qname_lr = qname_l.intersection(qname_r)
# Merge records with the same name from the left and right breakpoints. Choose the lesser of the two alignments.
for record_name in qname_lr:
if records_l[record_name].compare(records_r[record_name]) >= 0:
records_lr[record_name] = records_l[record_name]
else:
records_lr[record_name] = records_r[record_name]
records_lr[record_name].read_count += 1 # Count both records
# Add records unique to each set
for record_name in (qname_l - qname_lr):
records_lr[record_name] = records_l[record_name]
for record_name in (qname_r - qname_lr):
records_lr[record_name] = records_r[record_name]
# Return the merged set
return records_lr
def choose_best_records(records_lr, records_s):
"""
Get the best records for each set. For all records in both sets, the best is chosen. All records unique to either
set is also selected and returned.
:param records_lr: Records across the left and right breakpoints.
:param records_s: Records acroos the single breakpoint.
:return: A tuple of two elements: best records in left-right, and best records in single.
"""
# Init lists
best_lr = list()
best_s = list()
# Get sets of record names
qname_lr = set(records_lr.keys())
qname_s = set(records_s.keys())
qname_all = qname_lr.intersection(qname_s)
# Iterate over shared alignments
for record_name in qname_all:
diff = records_lr[record_name].compare(records_s[record_name])
if diff < 0:
best_lr.append(records_lr[record_name])
elif diff > 0:
best_s.append(records_s[record_name])
# Records in both sets with an equal quality are discarded
# Add records unique to each
for record_name in qname_lr - qname_all:
best_lr.append(records_lr[record_name])
for record_name in qname_s - qname_all:
best_s.append(records_s[record_name])
# Return lists of records
return best_lr, best_s
def get_clipped_freq(records_l, records_r, records_s, minclip=4):
"""
Get the number of reads that are clipped over the breakpoints in a meaningful way. For the left and right
breakpoints, look for clipping in the SV. For the single breakpoint, look for clipping on either side of the read.
:param records_l: Records over the left breakpoint.
:param records_r: Records over the right breakpoint.
:param records_s: Records over the single breakpoint.
:param minclip: Minimum number of bases clipped to count the read as clipped.
:return: A tuple of two elements: Proportion of reads clipped over the left and right breakpoints, and proportion
of reads clipped over the single breakpoint (in that order).
"""
if len(records_l) > 0 or len(records_r) > 0:
clipped_lr = (
len([True for key in records_l if records_l[key].clip_r >= minclip]) +
len([True for key in records_r if records_r[key].clip_l >= minclip])
) / (len(records_l) + len(records_r))
else:
clipped_lr = 0
if len(records_s) > 0:
clipped_s = (
len([True for key in records_s if records_s[key].clip_l >= minclip or records_s[key].clip_r >= minclip]) /
len(records_s)
)
else:
clipped_s = 0
return clipped_lr, clipped_s
def get_bp_depth(sv_record, bam_file, minclip=4, mapq=20):
"""
Get the average depth over reference and alternate contigs.
:param sv_record: SV record from the input BED file.
:param bam_file: Pysam opened BAM file.
:param minclip: The minimum number of bases clipped on one end to count that end of the read as clipped.
:param mapq: Minimum mapping quality of reads to be counted.
:return: A 5-element tuple: Depth over the reference, depth over the alternate contig, number of clipped reads over
over the reference reads counted toward the read depth, number of clipped reads over the alternate contig reads
counted toward the read depth, and proportion of reads clipped on the primary contig.
"""
# Get breakpoint locations
# * bp_l: Left breakpoint (contig if INS, primary if DEL)
# * bp_r: Right breakpoint (contig if INS, primary | |
"<NAME>", "Y<NAME>", "Ai Fairouz"],
],
["TRF", ["YU-KI", "DJ KOO"]],
["Amasia Landscape", ["Akico"]],
["Mother Ninja", ["MOTSU", "Akico"]],
["Do As Infinity", ["<NAME>"]],
["siraph", ["Annabel"]],
["kanon x kanon", ["<NAME>"]],
["Aoi Gakuen 3-nin Musume", ["<NAME>", "Yuki Matsuoka", "Yuka Inokuchi"]],
[
"Tenbi Gakuen Joseito no Mina-san",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Misato",
],
],
[
"Love♥Stay",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["IA", ["Lia"]],
[
"Kokoro ga Sakebitagatterunda Cast",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Ai Kakuma",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Nishinohashi Hero Shoutenger",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["<NAME>", ["<NAME>"]],
[
"Ultra-Souls",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["Dark Cherries", ["<NAME>", "<NAME>", "<NAME>"]],
[
"Justice",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["<NAME>ai", ["<NAME>", "<NAME>", "<NAME>"]],
["<NAME>", ["<NAME>", "<NAME>", "<NAME>"]],
[
"<NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"<NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"The rest of the fucking school",
],
],
[
"<NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"<NAME>*",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Mei Okada",
"<NAME>",
"<NAME>",
],
],
["Merm4id", ["<NAME>", "Mei Okada", "<NAME>ishi"]],
[
"<NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"PNGN 6",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Azusa Kataoka",
],
],
["PNGN 4", ["<NAME>", "<NAME>", "<NAME>", "Azusa Kataoka"]],
["PNGN 3", ["Mariya Ise", "<NAME>", "<NAME>"]],
["BōZ", ["<NAME>", "Kishou Taniyama"]],
["NSD4", ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]],
["Tokyo Performance Doll", ["UL-SAYS"]],
["Sol/Lull BOB", ["<NAME>"]],
["KNIFE OF DAY", ["Yoshimasa Hosoya"]],
["Supercar", ["iLL"]],
["LAMA", ["<NAME>", "iLL"]],
["NITRO", ["TRUE", "<NAME>", "<NAME>", "<NAME>"]],
["TERRASPEX", ["Kouta Matsuyama"]],
[
"Miracle☆StAr",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["day after tomorrow", ["Me"]],
[
"Ushiro kara Haiyori-tai",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["Ushiro kara Haiyori-tai N", ["Kana Asumi"]],
[
"MAHO-dou",
[
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["SPLASH", ["<NAME>", "<NAME>", "<NAME>", "<NAME>ai"]],
["v-u-den", ["<NAME>", "<NAME>", "<NAME>ada"]],
["ABCHO", ["<NAME>", "<NAME>"]],
[
"Morning Musume '15",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
["<NAME>", True],
"<NAME>",
],
],
["Gokudolls Niji-gumi", ["<NAME>", "<NAME>", "<NAME>"]],
["Gokudolls Kan-gumi", ["<NAME>", "<NAME>", "<NAME>"]],
[
"LizNoir",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
["TRINITYAiLE", ["<NAME>", "<NAME>", "<NAME>"]],
["Tortilla Company", ["<NAME>", "<NAME>", "<NAME>"]],
[
"Microphone soul spinners",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
[
"Angel-tai",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Rune Angel-tai",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["Runrun Angel-tai", ["Satomi Akesaka", "Atsuko Enomoto"]],
[
"DOP",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["GAPsCAPs", ["<NAME>", "<NAME>", "<NAME>"]],
["Splasoda°", ["<NAME>", "<NAME>", "<NAME>", "Nozomi Nagumo"]],
["Suzu☆Rena", ["<NAME>", "<NAME>"]],
[
"Galaxy Standard",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["Hybrid Thoughts", ["Paranom", "Kasper", "Aztech"]],
[
"Melocure",
[
"meg rock",
"<NAME>",
],
],
[
"Sylvania no Otomodachi",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Sanchoume All Stars",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Anos Fan Union",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["Shonen-tai", ["<NAME>", "<NAME>", "<NAME>"]],
["BC Jiyuu Gakuen", ["<NAME>", "<NAME>", "<NAME>"]],
["Chih<NAME>", ["<NAME>"]],
["ORESAMA", ["Pon"]],
["ONE III NOTES", ["Pon", "Foggy-D"]],
[
"Hakumai Koukou Keion-bu",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
["LieN", ["<NAME>"]],
["Ulfuls", ["Tortoise Matsumoto"]],
["UNISON SQUARE GARDEN", ["<NAME>"]],
["XIIX", ["<NAME>"]],
["<NAME>", ["<NAME>", "<NAME>", "<NAME>"]],
["Metamorphose", ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]],
[
"Happy Clover",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["Wiseman", ["<NAME>", "<NAME>", "<NAME>"]],
["Mini Hams", ["Ai Kago", "<NAME>", "<NAME>", "<NAME>"]],
["Sheki-Dol", ["<NAME>", "<NAME>", "<NAME>", "S<NAME>i"]],
[
"Hyoutei Eternity",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
[
"<NAME>",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
[
"<NAME>",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
[
"<NAME>",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
[
"<NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["Sea☆A", ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]],
["Godiego", ["<NAME>"]],
["Y<NAME>", ["<NAME>"]],
["No Limit", ["Saya Fukuzumi", "<NAME>", "<NAME>"]],
["Uchuu no Hajimari", ["<NAME>", "<NAME>", "<NAME>"]],
["<NAME>", ["<NAME>", "<NAME>", "<NAME>"]],
["DIAGRAM", ["Tatey<NAME>", "Rui Tanabe", "Say<NAME>"]],
["De<NAME>", ["<NAME>", "<NAME>", "<NAME>"]],
["<NAME>", ["<NAME>", "M・A・O", "Maaya Uchida"]],
["H△G", ["Chiho"]],
[
"<NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Shinjugamine Jogakuen Hoshimori Class",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Y<NAME>chiyama",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"A<NAME>uki",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"<NAME> San Koukuu Madou Dai-tai",
["<NAME>", "A bunch of other people"],
],
["<NAME>", ["MOTSU"]],
["Happy Around!", ["<NAME>", "<NAME>", "<NAME>"]],
["Maximum the Hormone", ["Maximum the Ryo-kun", "<NAME>"]],
["Kinniku Shoujo-tai", ["Kenji Ohtsuki"]],
["Tokusatsu", ["Kenji Ohtsuki"]],
["Triomatic", ["Yumi Kikuchi", "<NAME>", "<NAME>"]],
[
"Dai 501 Tougou Sentou Koukuu-dan",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"<NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Mizuki",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["<NAME>", ["<NAME>"]],
[
"<NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"<NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"Lynn",
"<NAME>",
"Machico",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Egaos",
["<NAME>", "<NAME>", "<NAME>", "<NAME>", "Lynn"],
],
["<NAME>", ["<NAME>o"]],
[
"<NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["Kishida Kyoudan &THE Akeboshi Rockets", ["ichigo"]],
["EUROX", ["MNEMO"]],
["StarRing", ["Moe Toyota", "<NAME>", "<NAME>"]],
["Shadow", ["<NAME>", "<NAME>", "<NAME>"]],
["Project Frontier", ["StarRing", "Shadow"]],
[
"THE IDOLM@STER CINDERELLA GIRLS LITTLE STARS!",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["LUI FRONTiC Akabane JAPAN", ["Pour Lui"]],
[
"BiS",
[
"Pour Lui",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"ARCANA PROJECT",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"EMPiRE",
[
"YU-KI EMPiRE",
"YUKA EMPiRE",
"MAYU EMPiRE",
"YUiNA EMPiRE",
"MiDORiKO EMPiRE",
],
],
[
"BiSH",
[
"AiNA THE END",
"CENT CHiHiRO CHiTTiii",
"LiNGLiNG",
"HASHiYASUME ATSUKO",
"MOMOKO GUMi COMPANY",
"AYUNi D",
],
],
[
"Moso Calibration",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Dempagumi.inc",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Niji no Conquistador",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"<NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"<NAME> Tennis-bu",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"<NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["MAGIC OF LiFE", ["Nobuyuki Takatsuto"]],
["Funta", ["UCO"]],
["Obocchaman-kun", ["<NAME>", "<NAME>", "Yuk<NAME>"]],
[
"Dr. Slump All Stars",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["YURIMARI", ["<NAME>", "<NAME>"]],
["Kiroro", ["<NAME>"]],
[
"STARMARIE",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["Guardians4", ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]],
["S/mileage", ["<NAME>", "<NAME>", "Saki Ogawa", "Ayaka Wada"]],
["Lilpri", ["<NAME>", "Ayaka Wada", "<NAME>"]],
[
"<NAME>!",
["<NAME>", "Akari Saho", "<NAME>", "Ayaka Wada"],
],
["Buono!", ["<NAME>", "<NAME>", "<NAME>"]],
[
"\u2103-ute",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"<NAME>",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Athena & Robikerottsu",
["Chisato Okai", "S<NAME>", "<NAME>", "<NAME>"],
],
["ONEPIXCEL", ["<NAME>", "<NAME>", "<NAME>"]],
[
"Dorothy Little Happy",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
["Favorite Blue", ["<NAME>"]],
["ribbon", ["<NAME>", "<NAME>", "<NAME>"]],
[
"CoCo",
["<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
["KUSUKUSU", ["<NAME>"]],
["VisioN", ["<NAME>"]],
["Personz", ["JILL"]],
["JiLL-Decoy association", ["chihiRo"]],
["jillmax", ["HIDE"]],
["Tokyo Shounen", ["<NAME>"]],
["Picasso", ["Tetsuya Tsujihata"]],
["Non Stop Rabbit", ["Tatsuya Taguchi"]],
["RABBIT", ["<NAME>"]],
["Death Rabbits", ["Emi Mochizuki", "Yuzu Ohkawa", "<NAME>", "<NAME>"]],
["+earth☆sky", ["Kyoka Minami", "Kotone Noda", "<NAME>"]],
["Usagi Dash", ["<NAME>"]],
[
"BeForU",
[
"<NAME>",
"Noria",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Les*Fleurirs",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
[
"Ribbon Girl",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
["<NAME>", True],
"Runa",
["Hazuki", True],
],
],
["JET SETS", ["<NAME>"]],
["Key Of Lover -access-", ["KyLa"]],
[
"Orb Hunter 4",
["<NAME>", "<NAME>", "<NAME>", "<NAME>"],
],
["Tegomass", ["<NAME>", "<NAME>"]],
[
"NEWS",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Johnny's WEST",
[
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
],
],
[
"Hey! Say! JUMP",
[
| |
<reponame>ammirato/tdid_demo<filename>utils.py
import torch
import torch.nn as nn
from torch.autograd import Variable
import torchvision.models as models
import os
import cv2
import numpy as np
import math
import sys
import h5py
import active_vision_dataset_processing.data_loading.active_vision_dataset as AVD
import active_vision_dataset_processing.data_loading.transforms as AVD_transforms
#TODO check gradient clipping
def get_target_images(target_path, target_names,preload_images=False):
"""
Returns dict with path to each target image, or loaded image
Ex) get_target_images('target_path', ['possible','target','names'])
Input parameters:
target_path: (str) path that holds directories of all targets.
i.e. target_path/target_0/* has one type
of target image for each object.
target_path/target_1/* has another type
of target image for each object.
Each type can have multiple images,
i.e. target_0/* can have multiple images per object
target_names: (list) list of str, each element is a target name
preload_images (optional): (bool) If True, the return dict will have
images(as ndarrays) as values. If False,
the values will be full paths to the images.
Returns:
(dict) key=target_name, value=list of lists
Parent list has one list for each target type.
Elments in child list are either path to image, or loaded image
"""
#type of target image can mean different things,
#probably different type is different view
target_dirs = os.listdir(target_path)
target_dirs.sort()
target_images = {}
#each target gets a list of lists, one for each type dir
for name in target_names:
target_images[name] = []
for type_ind, t_dir in enumerate(target_dirs):
for name in os.listdir(os.path.join(target_path,t_dir)):
if name.find('N') == -1:
obj_name = name[:name.rfind('_')]
else:
obj_name = name[:name.find('N')-1]
#make sure object is valid, and load the image or store path
if obj_name in target_names:
#make sure this type has a list
if len(target_images[obj_name]) <= type_ind:
target_images[obj_name].append([])
if preload_images:
target_images[obj_name][type_ind].append(cv2.imread(
os.path.join(target_path,t_dir,name)))
else:
target_images[obj_name][type_ind].append(
os.path.join(target_path,t_dir,name))
return target_images
def match_and_concat_images_list(img_list, min_size=None):
"""
Stacks image in a list into a single ndarray
Input parameters:
img_list: (list) list of ndarrays, images to be stacked. If images
are not the same shape, zero padding will be used to make
them the same size.
min_size (optional): (int) If not None, ensures images are at least
min_size x min_size. Default: None
Returns:
(ndarray) a single ndarray with first dimension equal to the
number of elements in the inputted img_list
"""
#find size all images will be
max_rows = 0
max_cols = 0
for img in img_list:
max_rows = max(img.shape[0], max_rows)
max_cols = max(img.shape[1], max_cols)
if min_size is not None:
max_rows = max(max_rows,min_size)
max_cols = max(max_cols,min_size)
#resize and stack the images
for il,img in enumerate(img_list):
resized_img = np.zeros((max_rows,max_cols,img.shape[2]))
resized_img[0:img.shape[0],0:img.shape[1],:] = img
img_list[il] = resized_img
return np.stack(img_list,axis=0)
def create_illumination_pattern(rows, cols, center_row,center_col,minI=.1,maxI=1,radius=None):
'''
Creates a random illumination pattern mask
Input parameters:
rows: (int) number of rows in returned pattern
cols: (int) number of cols in returned pattern
center_row: (int) row of center of illumination
center_col: (int) col of center of illumination
minI (optional): min illumination change. Default: .1
maxI (optional): (float) max illum change. Default: 1
radius (optional): (int) radius of illumination thing. If None
a random radius is chosen. Default: None
Returns:
(ndarray) array to be pixel-wise multiplied with an image to change
the images illumination
'''
if radius is None:
radius = float(int(20000 + (30000)*np.random.rand(1)))
pattern = np.zeros((rows, cols));
for row in range(rows):
for col in range(cols):
dy = row - center_row;
dx = col - center_col;
pattern[row,col] = (minI + (maxI -minI)
* math.exp(-(.5)*(dx*dx + dy*dy)/ radius))
return pattern
def augment_image(img, crop_max=5, rotate_max=30, do_illum=.5):
'''
Alters an image with some common data augmentation techniques
Imput parameters:
img: (ndarray) the image
crop_max (optional): (int) max length that can be "cropped" from
each side. Cropping does not change image shape,
but sets "cropped" region to 0. Default: 5
rotate_max (optional): (int) max degrees for in-plane rotation
Default: 30
do_illum (optional): (float) chance that a random illumination
change will be applied. Set to 0 if no
illumination change is desired. Default: .5
Returns:
(ndarray) the augmented image
'''
#crop
crops = np.random.choice(crop_max,4)
start_row = 0 + crops[0]
end_row = img.shape[0] - crops[1]
start_col = 0 + crops[2]
end_col = img.shape[1] - crops[3]
img[0:start_row,:,:] = 0
img[:,0:start_col,:] = 0
img[end_row:,:,:] = 0
img[:,end_col:,:] = 0
#rotate
rot_angle = np.random.choice(rotate_max*2,1) - rotate_max
M = cv2.getRotationMatrix2D((img.shape[1]/2,img.shape[0]/2),rot_angle,1)
img = cv2.warpAffine(img,M,(img.shape[1],img.shape[0]))
#change illumination
if np.random.rand() < do_illum:
max_side = max(img.shape[:2])
xc,yc = np.random.choice(max_side,2)
pattern = create_illumination_pattern(max_side,max_side,xc,yc)
pattern = pattern[0:img.shape[0],0:img.shape[1]]
img = img* np.tile(np.expand_dims(pattern,2),(1,1,3))
return img
def check_object_ids(chosen_ids,id_to_name,target_images):
"""
Picks only chosen ids that have a target object and target image.
ex) check_object_ids(chosen_ids,id_to_name,target_images)
Input Parameters:
chosen_ids: (list) list of ints, each int is a class id
id_to_name: (dict) key=class_id(int), value=target_name(str)
target_images: (dict) same as returned from get_target_images function
Returns:
(list) ids in chosen ids that exist in id_to_name dict, and
returns -1 if any id does not have a target image
"""
ids_with_name = list(set(set(chosen_ids) & set(id_to_name.keys())))
for cid in ids_with_name:
if cid == 0:#skip background
continue
if ((len(target_images[id_to_name[cid]]) < 1) or
(len(target_images[id_to_name[cid]][0])) < 1):
print('Missing target images for {}!'.format(id_to_name[cid]))
return -1
return ids_with_name
def normalize_image(img,cfg):
"""
Noramlizes image according to config parameters
ex) normalize_image(image,config)
Input Parameters:
img: (ndarray) numpy array, the image to be normalized
cfg: (Config) config instance from configs/
Returns:
(ndarray) noralized image
"""
if cfg.PYTORCH_FEATURE_NET:
return ((img/255.0) - [0.485, 0.456, 0.406])/[0.229, 0.224, 0.225]
else:
raise NotImplementedError
def get_class_id_to_name_dict(root,file_name='instance_id_map.txt'):
"""
Get dict from integer class id to string name
Input Parameters:
root: (str) directory that holds .txt file with class names and ids
file_name (optional): (str) name of file with class names and ids
Default: 'instance_id_map.txt'
Format: each line has: target_name id
where id is an integer character
Returns:
(dict) dict with key=id, value=target_name
"""
map_file = open(os.path.join(root,file_name),'r')
id_to_name_dict = {}
for line in map_file:
line = str.split(line)
id_to_name_dict[int(line[1])] = line[0]
return id_to_name_dict
def get_AVD_dataset(root, scene_list, chosen_ids,
max_difficulty=4,
fraction_of_no_box=.1,
instance_fname=None,
classification=False,
):
"""
Returns a loader for the AVD dataset.
dataset = get_AVD_dataset('/path/to/data',['scene1','scene2], [chosen_ids])
Input Parameters:
root: (str) path to data. Parent of all scene directories
scene_list: (list) scenes to include
chosen_ids: (list) list of object ids to keep labels for
(other labels discarded)
max_difficulty (optional): (int) max bbox difficulty to use Default: 4
instance_fname (optional): (str) name of file with class ids and names
If none, uses default in get_class_id_to_name
Default: None
classification (opitional): (bool) Whether or not data is for
classification. Default: False
Returns:
an instance of AVD class from the AVD data_loading code
"""
##initialize transforms for the labels
#only consider boxes from the chosen classes
pick_trans = AVD_transforms.PickInstances(chosen_ids,
max_difficulty=max_difficulty)
#compose the transforms in a specific order, first to last
if instance_fname is None:
id_to_name_dict = get_class_id_to_name_dict(root)
else:
id_to_name_dict = get_class_id_to_name_dict(root,instance_fname)
dataset = AVD.AVD(root=root,
scene_list=scene_list,
target_transform=pick_trans,
classification=classification,
class_id_to_name=id_to_name_dict,
fraction_of_no_box=fraction_of_no_box)
return dataset
def save_training_meta_data(cfg,net):
"""
Writes a text file that describes model and paramters.
ex) save_training_meta_data(cfg,net)
Input parameters:
cfg: (Config) a config isntance from configs/
net: (torch Module) a pytorch network
Returns:
None
"""
meta_fid = open(os.path.join(cfg.META_SAVE_DIR, cfg.MODEL_BASE_SAVE_NAME + '.txt'),'w')
config_params = [attr for attr in dir(cfg)
if not callable(getattr(cfg, attr))
and not attr.startswith("__")]
for param in config_params:
if param == 'ID_TO_NAME' or param == 'NAME_TO_ID':
continue
meta_fid.write('{}: {}\n'.format(param, str(getattr(cfg,param))))
meta_fid.write(net.__str__())
meta_fid.close()
def load_pretrained_weights(model_name):
'''
Load weights of a pretrained pytorch model for feature extraction
Example: For Alexnet, a torch.nn.Sequential model with everything
but the fully connected layers is returned
Input parameters:
model_name: name of the model to load. Options:
vgg16_bn
squeezenet1_1
resnet101
alexnet
Returns:
(torch.nn.Sequential) The first N layers of the pretrained model
that are useful for feature extraction. N depends on which model
'''
if model_name == 'vgg16_bn':
vgg16_bn = models.vgg16_bn(pretrained=True)
return torch.nn.Sequential(*list(vgg16_bn.features.children())[:-1])
elif model_name == 'squeezenet1_1':
fnet = models.squeezenet1_1(pretrained=True)
return torch.nn.Sequential(*list(fnet.features.children())[:-1])
elif model_name == 'resnet101':
fnet = models.resnet101(pretrained=True)
return torch.nn.Sequential(*list(fnet.children())[:-2])
elif model_name == 'alexnet':
fnet = models.alexnet(pretrained=True)
return torch.nn.Sequential(*list(fnet.features.children()))
else:
raise NotImplementedError
sys.exit()
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import time
class Timer(object):
"""A simple | |
<gh_stars>10-100
import numpy as np
import weakref
import copy
import collections
from weakref import WeakValueDictionary
# TODO
# - composite exception for constructor
# - resources (new class generated as _silk_types['ResourceX'], where X is name of Silk class)
# elsewhere:
# - update bracketlength macro:
# XArray[spam][eggs] => maxshape = (spam, eggs) AND validate/hardform
# XArray[:spam][:eggs] => maxshape = (spam, eggs)
# mixed syntax also allowed
# - xml/json conversion,
# - depsgraph/namespace
# finally: registrar, cell depsgraph
from ..registers import typenames
from . import SilkObject
from .helpers import _prop_setter_any, _prop_setter_json, _set_numpy_ele_prop,\
_get_numpy_ele_prop, _filter_json, datacopy, _update_ptr
class NoneChild(object):
_is_none = True
class Silk(SilkObject):
_anonymous = None # bool
_props = None # list
dtype = None # list
_positional_args = None # list
__slots__ = [
"_parent", "_storage_enum", "_storage_nonjson_children",
"_data", "_children", "_is_none", "__weakref__"
]
def __init__(self, *args, _mode="any", **kwargs):
self._storage_enum = None
self._storage_nonjson_children = set()
self._children = None
if _mode == "parent":
self._init(
kwargs["parent"],
kwargs["storage"],
kwargs["data_store"],
)
elif _mode == "from_numpy":
assert "parent" not in kwargs
self._init(
None,
"numpy",
kwargs["data_store"],
)
else:
assert "parent" not in kwargs
assert "storage" not in kwargs
assert "data_store" not in kwargs
self._init(None, "json", None)
if _mode == "any":
self.set(*args, **kwargs)
elif _mode == "empty":
pass
elif _mode == "from_json":
self.set(*args, prop_setter=_prop_setter_json, **kwargs)
else:
raise ValueError(_mode)
def _init(self, parent, storage, data_store):
from .silkarray import SilkArray
if parent is not None:
if storage == "numpy":
self._parent = lambda: parent # hard ref
self._parent = weakref.ref(parent)
else:
self._parent = lambda: None
self.storage = storage
self._is_none = False
self._storage_nonjson_children.clear()
if self._children is not None:
for child in self._children.values():
child._parent = lambda: None
if storage == "json":
self._children = {}
if data_store is None:
data_store = {}
elif storage == "numpy":
self._children = WeakValueDictionary()
assert data_store is not None
assert data_store.dtype == np.dtype(self.dtype, align=True)
assert data_store.shape == ()
self._data = data_store
return
else:
raise ValueError(storage)
assert storage == "json"
for pname, p in self._props.items():
if p["elementary"]:
continue
t = self._get_typeclass(pname)
if pname not in data_store:
if issubclass(t, SilkArray):
data_store[pname] = []
else:
data_store[pname] = {}
c_data_store = data_store[pname]
self._children[pname] = t(
_mode="parent",
storage="json",
parent=self,
data_store=c_data_store,
len_data_store=None,
)
self._data = data_store
def _get_typeclass(self, propname):
p = self._props[propname]
if "typeclass" in p:
t = p["typeclass"]
else:
typename = p["typename"]
t = typenames._silk_types[typename]
return t
def copy(self, storage="json"):
"""Returns a copy with the storage in the specified format"""
cls = type(self)
if storage == "json":
json = self.json()
ret = cls.from_json(json)
for prop in self._props:
if not self._props[prop]["elementary"]:
child = self._children[prop]
is_none = child._is_none
ret._children[prop]._is_none = is_none
elif storage == "numpy":
ret = cls.from_numpy(self.numpy())
else:
raise ValueError(storage)
return ret
@classmethod
def from_json(cls, data):
data = _filter_json(data)
return cls(data, _mode="from_json")
@classmethod
def from_numpy(cls, data, copy=True,validate=True):
"""Constructs from a numpy array singleton "data"
"""
if data.shape != ():
raise TypeError("Data must be a singleton")
if data.dtype != np.dtype(cls.dtype,align=True):
raise TypeError("Data has the wrong dtype")
if copy:
data = datacopy(data)
ret = cls(_mode="from_numpy", data_store=data)
if validate:
ret.validate()
return ret
@classmethod
def empty(cls):
return cls(_mode="empty")
def _get_child(self, childname, force=False):
from .silkarray import SilkArray
if self.storage == "numpy":
prop = self._props[childname]
is_none = False
if prop["optional"]:
if not self._data["HAS_" + childname]:
is_none = True
if is_none and not force:
return NoneChild
t = self._get_typeclass(childname)
len_data_store = None
if issubclass(t, SilkArray):
if prop.get("var_array", False):
len_data_store = self._data["LEN_"+childname]
child = t (
_mode = "parent",
parent = self,
storage = "numpy",
data_store = self._data[childname],
len_data_store = len_data_store
)
self._children[childname] = child
return self._children[childname]
def set(self, *args, prop_setter=_prop_setter_any, **kwargs):
if len(args) == 1 and len(kwargs) == 0:
if args[0] is None or isinstance(args[0], SilkObject) and args[0]._is_none:
self._is_none = True
self._clear_data()
return
# TODO: make a nice composite exception that stores all exceptions
try:
self._construct(prop_setter, *args, **kwargs)
except Exception:
if len(args) == 1 and len(kwargs) == 0:
try:
a = args[0]
try:
if isinstance(a, np.void):
d = {}
for name in a.dtype.fields:
if name.startswith("HAS_"):
continue
name2 = "HAS_" + name
if name2 in a.dtype.names and not a[name2]:
continue
d[name] = a[name]
self._construct(prop_setter, **d)
else:
raise TypeError
except Exception:
if isinstance(a, dict):
self._construct(prop_setter, **a)
elif isinstance(a, str):
self._parse(a)
elif isinstance(a, collections.Iterable) or isinstance(a, np.void):
self._construct(prop_setter, *a)
elif isinstance(a, SilkObject):
d = {prop: getattr(a, prop) for prop in dir(a)}
self._construct(prop_setter, **d)
elif hasattr(a, "__dict__"):
self._construct(prop_setter, **a.__dict__)
else:
raise TypeError(a)
except Exception:
raise
else:
raise
self.validate()
self._is_none = False
def validate(self):
pass # overridden during registration
def json(self):
"""Returns a JSON representation of the Silk object
"""
if self.storage == "json":
return _filter_json(self._data)
d = {}
for attr in self._props:
p = self._props[attr]
ele = p["elementary"]
value = None
if ele:
if self.storage == "numpy":
value = _get_numpy_ele_prop(self, attr)
else:
value = self._data[attr]
if value is not None:
t = self._get_typeclass(attr)
value = t(value)
else:
child = self._get_child(attr)
if not child._is_none:
value = child.json()
if value is not None:
d[attr] = value
return d
def numpy(self):
"""Returns a numpy representation of the Silk object
NOTE: for optional members,
the entire storage buffer is returned,
including (zeroed) elements if the data is not present!
the extra field "HAS_xxx" indicates if the data is present.
NOTE: for all numpy array members,
the entire storage buffer is returned,
including (zeroed) elements if the data is not present!
the length of each array is stored in the LEN_xxx field
TODO: document multidimensional length vector, PTR_LEN_xxx
NOTE: for numpy array members of variable shape,
an extra field "PTR_xxx" contains a C pointer to the data
For this, the dimensionality of the array does not matter,
e.g. both for IntegerArray and IntegerArrayArray,
the C pointer will be "int *"
and both for MyStructArray and MyStructArrayArray,
the C pointer will be "MyStruct *"
TODO: add and document SHAPE field
"""
if self.storage == "numpy":
return datacopy(self._data)
new_obj = self.copy("json")
return new_obj.make_numpy()
def make_json(self):
if self.storage == "json":
return self._data
elif self.storage == "numpy":
json = _filter_json(self.json(), self)
parent = self._parent()
if parent is not None and parent.storage == "numpy":
parent.numpy_shatter()
self._init(parent, "json", None)
self.set(json, prop_setter=_prop_setter_json)
if parent is not None:
parent._remove_nonjson_child(self)
myname = parent._find_child(id(self))
parent._data[myname] = self._data
return self._data
elif self.storage == "mixed":
for child_id in list(self._storage_nonjson_children): # copy!
for child in self._children.values():
if id(child) == child_id:
child.make_json()
break
else:
raise Exception("Cannot find child that was marked as 'non-JSON'")
# Above will automatically update storage status to "json"
return self._data
def _restore_array_coupling(self):
pass
def make_numpy(self,_toplevel=None):
"""Sets the internal storage to 'numpy'
Returns the numpy array that is used as internal storage buffer
NOTE: for optional members,
the entire storage buffer is returned,
including (zeroed) elements if the data is not present!
an extra field "HAS_xxx" indicates if the data is present.
TODO: update doc
NOTE: for numpy array members of variable shape,
an extra field "PTR_xxx" contains a C pointer to the data
For this, the dimensionality of the array does not matter,
e.g. both for IntegerArray and IntegerArrayArray,
the C pointer will be "int *"
and both for MyStructArray and MyStructArrayArray,
the C pointer will be "MyStruct *"
"""
from .silkarray import SilkArray
if self.storage == "numpy":
return self._data
dtype = np.dtype(self.dtype, align=True)
data = np.zeros(dtype=dtype, shape=(1,))
for propname,prop in self._props.items():
if prop["elementary"]:
value = getattr(self, propname)
_set_numpy_ele_prop(self, propname, value, data)
else:
child = self._get_child(propname)
if not child._is_none:
child.make_numpy(_toplevel=False)
if isinstance(child, SilkArray):
if prop.get("var_array", False):
child._restore_array_coupling(data[0], propname)
else:
data[0][propname] = np.zeros_like(dtype[propname])
slices = [slice(0,v) in child._data.shape]
data[0][propname][slices] = child._data
else:
data[0][propname] = child._data
child._data = None
self._init(self._parent(), "numpy", data[0])
parent = self._parent()
if parent is not None:
if parent.storage != "numpy":
parent._add_nonjson_child(self)
return data[0]
def _find_child(self, child_id):
for childname, ch in self._children.items():
if child_id == id(ch):
return childname
raise KeyError
def _add_nonjson_child(self, child):
childname = self._find_child(id(child))
if self._props[childname].get("var_array", False) and \
self.storage == "numpy":
return
assert self.storage != "numpy"
njc = self._storage_nonjson_children
child_id = id(child)
if child_id not in njc:
njc.add(child_id)
if self.storage == "json":
self.storage = "mixed"
parent = self._parent()
if parent is not None:
parent._add_nonjson_child(self)
def _remove_nonjson_child(self, child):
assert self.storage != "numpy"
njc = self._storage_nonjson_children
child_id = id(child)
if | |
len(x)
# outputs a data frame assigning each column a title
# IN:
# s F - fileName
# l COL - colNames
# OUT:
# dataFrame DF with content of x
def df(f,col):
x = lol(f)
col_names = {}
for i in range(0,len(x[0,:])):
col_names[col[i]]=x[:,i]
df = pd.DataFrame(col_names)
return df
# returns dir, stm, ext from input
# IN:
# x: fullPathString
# OUT:
# dir
# stm
# ext
def dfe(x):
dd = os.path.split(x)
d = dd[0]
s = os.path.splitext(os.path.basename(dd[1]))
e = s[1]
e = re.sub('\.','',e)
return d, s[0], e
# returns file name stem
# IN:
# f fullPath/fileName
# OUT:
# s stem
def stm(f):
s = os.path.splitext(os.path.basename(f))[0]
return s
# replaces path (and extension) and keeps stem (and extension)
# IN:
# f: 'my/dir/to/file.ext'
# d: 'my/new/dir'
# e: myNewExt <''>
# OUT:
# n: 'my/new/dir/file.ext' / 'my/new/dir/file.myNewExt'
def repl_dir(f,d,e=''):
dd,stm,ext = dfe(f)
if len(e)>0:
ext=e
return os.path.join(d,"{}.{}".format(stm,ext))
# replaces extension
# IN:
# f: 'my/dir/to/file.ext'
# ext: newext, if empty only dir/stm returned
# OUT:
# f: 'my/dir/to/file.newext'
def repl_ext(f,ext=""):
d,stm,e = dfe(f)
if len(ext)==0:
return os.path.join(d,stm)
return os.path.join(d,"{}.{}".format(stm,ext))
# normalizes vector x according to
# opt.mtd|(rng|max|min)
# mtd: 'minmax'|'zscore'|'std'
# 'minmax' - normalize to opt.rng
# 'zscore' - z-transform
# 'std' - divided by std (whitening)
# IN:
# x - vector
# opt - dict 'mtd'|'rng'|'max'|'min'
# OUT:
# x normalized
def nrm_vec(x,opt):
if opt['mtd']=='minmax':
r = opt['rng']
if 'max' in opt:
ma = opt['max']
else:
ma = max(x)
if 'min' in opt:
mi = opt['min']
else:
mi = min(x)
if ma>mi:
x = (x-mi)/(ma-mi)
x = r[0] + x*(r[1]-r[0]);
elif opt['mtd']=='zscore':
x = st.zscore(x)
elif opt['mtd']=='std':
x = sc.whiten(x)
return x
# normalizes scalar to range opt.min|max set to opt.rng
# supports minmax only
def nrm(x,opt):
if opt['mtd']=='minmax':
mi = opt['min']
ma = opt['max']
r = opt['rng']
if ma>mi:
x = (x-mi)/(ma-mi)
x = r[0] + x*(r[1]-r[0]);
return x
# maps integers from -32768 to 32767 to interval [-1 1]
def wav_int2float(s):
#return nrm_vec(s,{'mtd':'minmax','min':-32768,'max':32767,'rng':[-1,1]})
s = s/32768
s[find(s,'<',-1)]=-1
s[find(s,'>',1)]=1
return s
# normalisation of T to range specified in vector RANGE
# opt
# .t0 zero is placed to value t0
# .rng [min max] val for t nrmd, must span interval
# RNG must span interval including 0
def nrm_zero_set(t,opt):
if len(t)==0: return t
# t halves
t1 = t[find(t,'<=',opt['t0'])]
t2 = t[find(t,'>',opt['t0'])]
if len(t1)==0 or len(t2)==0:
return nrm_vec(t,{'mtd':'minmax','rng':opt['rng']})
# corresponding ranges
r1=[opt['rng'][0], 0];
r2=[opt['rng'][1]/len(t2), opt['rng'][1]];
# separate normalisations for t-halves
o = {}
o['mtd'] = 'minmax';
o['rng'] = r1
t1n = nrm_vec(t1,o)
o['rng'] = r2
t2n = nrm_vec(t2,o);
return np.concatenate((t1n,t2n))
# returns index array for vector of length len() l
# thus highest idx is l-1
def idx_a(l,sts=1):
return np.arange(0,l,sts)
# return np.asarray(range(l))
# returns index array between on and off (both included)
def idx_seg(on,off,sts=1):
return np.arange(on,off+1,sts)
# returns index iterable of list L
def idx(l):
return range(len(l))
# copy key-value pairs from dict A to dict B
def cp_dict(a,b):
for x in list(a.keys()):
b[x] = a[x]
return b
# returns dimension of numpy array
# IN:
# array
# OUT:
# int for number of dims
def ndim(x):
return len(x.shape)
# transforms TextGrid tier to 2 arrays
# point -> 1 dim + lab
# interval -> 2 dim (one row per segment) + lab
# IN:
# t: tg tier (by tg_tier())
# opt dict
# .skip <""> regular expression for labels of items to be skipped
# if empty, only empty items will be skipped
# OUT:
# x: 1- or 2-dim array of time stamps
# lab: corresponding labels
# REMARK:
# empty intervals are skipped
def tg_tier2tab(t,opt={}):
opt = opt_default(opt,{"skip": ""})
if len(opt["skip"])>0:
do_skip=True
else:
do_skip=False
x = ea()
lab = []
if 'intervals' in t:
for i in numkeys(t['intervals']):
z = t['intervals'][i]
if len(z['text'])==0:
continue
if do_skip and re.search(opt["skip"],z["text"]):
continue
x = push(x,[z['xmin'],z['xmax']])
lab.append(z['text'])
else:
for i in numkeys(t['points']):
z = t['points'][i]
if do_skip and re.search(opt["skip"],z["mark"]):
continue
x = push(x,z['time'])
lab.append(z['mark'])
return x, lab
# transforms table to TextGrid tier
# IN:
# t - numpy 1- or 2-dim array with time info
# lab - list of labels <[]>
# specs['class'] <'IntervalTier' for 2-dim, 'TextTier' for 1-dim>
# ['name']
# ['xmin'] <0>
# ['xmax'] <max tab>
# ['size'] - will be determined automatically
# ['lab_pau'] - <''>
# OUT:
# dict tg tier (see i_tg() subdict below myItemIdx)
# for 'interval' tiers gaps between subsequent intervals will be bridged
# by lab_pau
def tg_tab2tier(t,lab,specs):
tt = {'name':specs['name']}
nd = ndim(t)
# 2dim array with 1 col
if nd==2: nd = ncol(t)
# tier class
if nd == 1:
tt['class'] = 'TextTier'
tt['points'] = {}
else:
tt['class'] = 'IntervalTier'
tt['intervals'] = {}
# pause label for gaps between intervals
if 'lab_pau' in specs:
lp = specs['lab_pau']
else:
lp = ''
# xmin, xmax
if 'xmin' not in specs:
tt['xmin']=0
else:
tt['xmin'] = specs['xmin']
if 'xmax' not in specs:
if nd==1:
tt['xmax'] = t[-1]
else:
tt['xmax'] = t[-1,1]
else:
tt['xmax'] = specs['xmax']
# point tier content
if nd==1:
for i in idx_a(len(t)):
# point tier content might be read as [[x],[x],[x],...] or [x,x,x,...]
if of_list_type(t[i]):
z = t[i,0]
else:
z = t[i]
tt['points'][i+1] = {'time':z, 'mark':lab[i]}
tt['size']=len(t)
# interval tier content
else:
j=1
# initial pause
if t[0,0] > tt['xmin']:
tt['intervals'][j]={'xmin':tt['xmin'],'xmax':t[0,0],'text':lp}
j+=1
for i in idx_a(len(t)):
# pause insertions
if ((j-1 in tt['intervals']) and
t[i,0]>tt['intervals'][j-1]['xmax']):
tt['intervals'][j]={'xmin':tt['intervals'][j-1]['xmax'],
'xmax':t[i,0],'text':lp}
j+=1
tt['intervals'][j]={'xmin':t[i,0],'xmax':t[i,1],'text':lab[i]}
j+=1
# final pause
if tt['intervals'][j-1]['xmax'] < tt['xmax']:
tt['intervals'][j]={'xmin':tt['intervals'][j-1]['xmax'],
'xmax':tt['xmax'],'text':lp}
j+=1 # so that uniform 1 subtraction for size
# size
tt['size']=j-1
return tt
# add tier to TextGrid
# IN:
# tg dict from i_tg(); can be empty dict
# tier subdict to be added:
# same dict form as in i_tg() output, below 'myItemIdx'
# opt
# ['repl'] <True> - replace tier of same name
# OUT:
# tg updated
# REMARK:
# !if generated from scratch head xmin and xmax are taken over from the tier
# which might need to be corrected afterwards!
def tg_add(tg,tier,opt={'repl':True}):
# from scratch
if 'item_name' not in tg:
fromScratch = True
tg = {'name':'', 'format':'long', 'item_name':{}, 'item':{},
'head':{'size':0,'xmin':0,'xmax':0,'type':'ooTextFile'}}
else:
fromScratch = False
# tier already contained?
if (opt['repl']==True and (tier['name'] in tg['item_name'])):
i = tg['item_name'][tier['name']]
tg['item'][i] = tier
else:
# item index
ii = numkeys(tg['item'])
if len(ii)==0: i=1
else: i = ii[-1]+1
tg['item_name'][tier['name']] = i
tg['item'][i] = tier
tg['head']['size'] += 1
if fromScratch and 'xmin' in tier:
for x in ['xmin','xmax']:
tg['head'][x] = tier[x]
return tg
# transform interchange format to TextGrid
# transforms table to TextGrid tier
# IN:
# an: annot dict e.g. by i_par() or i_copa_xml()
# OUT:
# tg: TextGrid dict
# for 'interval' tiers gaps between subsequent intervals are bridged
# only tiers with time information are taken over!
def inter2tg(an):
typeMap = {'segment':'IntervalTier', 'event':'TextTier'}
itemMap = {'segment':'intervals', 'event':'points'}
tg = {'type':'TextGrid','format':'long',
'head':{'xmin':0,'xmax':-1,'size':0},
'item_name':{},'item':{}}
# item idx
ii=1
# over tiers
for x in sorted(an.keys()):
# skip tier without time info
if an[x]['type'] not in typeMap:
continue
tg['head']['size']+=1
tg['item_name'][x]=ii
tg['item'][ii]={'name':x, 'size':0, 'xmin':0, 'xmax':-1,
'class':typeMap[an[x]['type']]}
z = itemMap[an[x]['type']]
# becomes tg['item'][ii]['points'|'intervals']
tt={}
# point or interval tier content
if z=='points':
# j: tier items (j+1 in TextGrid output)
for j in numkeys(an[x]['items']):
y = an[x]['items'][j]
tt[j+1]={'time':y['t'],
'mark':y['label']}
tg['item'][ii]['size'] += 1
tg['item'][ii]['xmax'] = y['t']
else:
j=1
# initial pause
y = an[x]['items'][0]
if y['t_start'] > 0:
tt[j]={'xmin':tg['item'][ii]['xmin'],
'xmax':y['t_start'], 'text':''}
j+=1
# i: input tier idx, j: output tier idx
for i in numkeys(an[x]['items']):
y = an[x]['items'][i]
# pause insertions
if ((j-1 in tt) and
y['t_start'] > tt[j-1]['xmax']):
tt[j]={'xmin':tt[j-1]['xmax'],
'xmax':y['t_start'], 'text':''}
j+=1
tt[j]={'xmin':y['t_start'],'xmax':y['t_end'],'text':y['label']}
tg['item'][ii]['xmax']=tt[j]['xmax']
j+=1
# size
tg['item'][ii]['size']=j-1
# copy to interval/points subdict
tg['item'][ii][z] = tt
# xmax
tg['head']['xmax'] = max(tg['head']['xmax'],tg['item'][ii]['xmax'])
ii+=1
# uniform xmax, final silent interval
for ii in tg['item']:
# add silent interval
if (tg['item'][ii]['class']=='IntervalTier' and
tg['item'][ii]['xmax'] < tg['head']['xmax']):
tg['item'][ii]['size'] += 1
j = max(tg['item'][ii]['intervals'])+1
xm = tg['item'][ii]['intervals'][j-1]['xmax']
tg['item'][ii]['intervals'][j] = {'text':'','xmin':xm,
'xmax':tg['head']['xmax']}
tg['item'][ii]['xmax']=tg['head']['xmax']
return tg
# as inter2tg() but omitting header item
# IN:
# par dict from i_par()
# OUT:
# tg dict as with i_tg()
def par2tg(par_in):
par = cp.deepcopy(par_in)
del par['header']
return inter2tg(par)
# returns item-related subkeys: 'intervals', 'text' or 'points', 'mark'
# IN:
# t tier
# OUT:
# x key1
# y key2
def tg_item_keys(t):
if 'intervals' in t:
return 'intervals', 'text'
return 'points', 'mark'
# wrapper around tg2inter() + adding 'header' item / 'class' key
# WARNING: information loss! MAU tier does not contain any wordIdx reference!
# IN:
# tg: dict read by tg_in
# fs: sample rate
# OUT:
# par: par dict (copa-xml format)
# REMARK: output cannot contain wordIdx refs!
# | |
steps, labels = self._xAxisTicker(xEOM)
else:
steps, labels = self._xAxisTicker(xVals)
return steps, labels
def configure(self, data):
self._convertXV(data)
from reportlab.lib.set_ops import union
xVals = reduce(union,map(lambda x: map(lambda dv: dv[0],x),data),[])
xVals.sort()
steps,labels = self._getStepsAndLabels(xVals)
valueMin, valueMax = self.valueMin, self.valueMax
if valueMin is None: valueMin = xVals[0]
if valueMax is None: valueMax = xVals[-1]
self._valueMin, self._valueMax = valueMin, valueMax
self._tickValues = steps
self._labelTextFormat = labels
self._scaleFactor = self._length / float(valueMax - valueMin)
self._tickValues = steps
self._configured = 1
class YValueAxis(_YTicks,ValueAxis):
"Y/value axis"
_attrMap = AttrMap(BASE=ValueAxis,
tickLeft = AttrMapValue(isNumber,
desc='Tick length left of the axis.'),
tickRight = AttrMapValue(isNumber,
desc='Tick length right of the axis.'),
joinAxis = AttrMapValue(None,
desc='Join both axes if true.'),
joinAxisMode = AttrMapValue(OneOf(('left', 'right', 'value', 'points', None)),
desc="Mode used for connecting axis ('left', 'right', 'value', 'points', None)."),
joinAxisPos = AttrMapValue(isNumberOrNone,
desc='Position at which to join with other axis.'),
)
# Indicate the dimension of the data we're interested in.
_dataIndex = 1
def __init__(self):
ValueAxis.__init__(self)
self.labels.boxAnchor = 'e'
self.labels.dx = -5
self.labels.dy = 0
self.tickRight = 0
self.tickLeft = 5
self.joinAxis = None
self.joinAxisMode = None
self.joinAxisPos = None
def demo(self):
data = [(10, 20, 30, 42)]
self.setPosition(100, 10, 80)
self.configure(data)
drawing = Drawing(200, 100)
drawing.add(self)
return drawing
def joinToAxis(self, xAxis, mode='left', pos=None):
"Join with x-axis using some mode."
_assertXAxis(xAxis)
if mode == 'left':
self._x = xAxis._x * 1.0
elif mode == 'right':
self._x = (xAxis._x + xAxis._length) * 1.0
elif mode == 'value':
self._x = xAxis.scale(pos) * 1.0
elif mode == 'points':
self._x = pos * 1.0
def _joinToAxis(self):
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
if jam in ('left', 'right'):
self.joinToAxis(ja, mode=jam)
elif jam in ('value', 'points'):
self.joinToAxis(ja, mode=jam, pos=self.joinAxisPos)
def makeAxis(self):
g = Group()
self._joinToAxis()
if not self.visibleAxis: return g
axis = Line(self._x, self._y-self.loLLen, self._x, self._y + self._length+self.hiLLen)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
class AdjYValueAxis(YValueAxis):
"""A Y-axis applying additional rules.
Depending on the data and some built-in rules, the axis
may choose to adjust its range and origin.
"""
_attrMap = AttrMap(BASE = YValueAxis,
requiredRange = AttrMapValue(isNumberOrNone, desc='Minimum required value range.'),
leftAxisPercent = AttrMapValue(isBoolean, desc='When true add percent sign to label values.'),
leftAxisOrigShiftIPC = AttrMapValue(isNumber, desc='Lowest label shift interval ratio.'),
leftAxisOrigShiftMin = AttrMapValue(isNumber, desc='Minimum amount to shift.'),
leftAxisSkipLL0 = AttrMapValue(EitherOr((isBoolean,isListOfNumbers)), desc='Skip/Keep lowest tick label when true/false.\nOr skiplist'),
labelVOffset = AttrMapValue(isNumber, desc='add this to the labels'),
)
def __init__(self,**kw):
YValueAxis.__init__(self,**kw)
self.requiredRange = 30
self.leftAxisPercent = 1
self.leftAxisOrigShiftIPC = 0.15
self.leftAxisOrigShiftMin = 12
self.leftAxisSkipLL0 = self.labelVOffset = 0
self.valueSteps = None
def _rangeAdjust(self):
"Adjusts the value range of the axis."
from reportlab.graphics.charts.utils import find_good_grid, ticks
y_min, y_max = self._valueMin, self._valueMax
m = self.maximumTicks
n = filter(lambda x,m=m: x<=m,[4,5,6,7,8,9])
if not n: n = [m]
valueStep, requiredRange = self.valueStep, self.requiredRange
if requiredRange and y_max - y_min < requiredRange:
y1, y2 = find_good_grid(y_min, y_max,n=n,grid=valueStep)[:2]
if y2 - y1 < requiredRange:
ym = (y1+y2)*0.5
y1 = min(ym-requiredRange*0.5,y_min)
y2 = max(ym+requiredRange*0.5,y_max)
if y_min>=100 and y1<100:
y2 = y2 + 100 - y1
y1 = 100
elif y_min>=0 and y1<0:
y2 = y2 - y1
y1 = 0
self._valueMin, self._valueMax = y1, y2
T, L = ticks(self._valueMin, self._valueMax, split=1, n=n, percent=self.leftAxisPercent,grid=valueStep, labelVOffset=self.labelVOffset)
abf = self.avoidBoundFrac
if abf:
i1 = (T[1]-T[0])
if not isinstance(abf,_SequenceTypes):
i0 = i1 = i1*abf
else:
i0 = i1*abf[0]
i1 = i1*abf[1]
_n = getattr(self,'_cValueMin',T[0])
_x = getattr(self,'_cValueMax',T[-1])
if _n - T[0] < i0: self._valueMin = self._valueMin - i0
if T[-1]-_x < i1: self._valueMax = self._valueMax + i1
T, L = ticks(self._valueMin, self._valueMax, split=1, n=n, percent=self.leftAxisPercent,grid=valueStep, labelVOffset=self.labelVOffset)
self._valueMin = T[0]
self._valueMax = T[-1]
self._tickValues = T
if self.labelTextFormat is None:
self._labelTextFormat = L
else:
self._labelTextFormat = self.labelTextFormat
if abs(self._valueMin-100)<1e-6:
self._calcValueStep()
vMax, vMin = self._valueMax, self._valueMin
m = max(self.leftAxisOrigShiftIPC*self._valueStep,
(vMax-vMin)*self.leftAxisOrigShiftMin/self._length)
self._valueMin = self._valueMin - m
if self.leftAxisSkipLL0:
if isinstance(self.leftAxisSkipLL0,_SequenceTypes):
for x in self.leftAxisSkipLL0:
try:
L[x] = ''
except IndexError:
pass
L[0] = ''
# Sample functions.
def sample0a():
"Sample drawing with one xcat axis and two buckets."
drawing = Drawing(400, 200)
data = [(10, 20)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Ying', 'Yang']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
return drawing
def sample0b():
"Sample drawing with one xcat axis and one bucket only."
drawing = Drawing(400, 200)
data = [(10,)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Ying']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
return drawing
def sample1():
"Sample drawing containing two unconnected axes."
from reportlab.graphics.shapes import _baseGFontNameB
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Beer','Wine','Meat','Cannelloni']
xAxis.labels.boxAnchor = 'n'
xAxis.labels[3].dy = -15
xAxis.labels[3].angle = 30
xAxis.labels[3].fontName = _baseGFontNameB
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4a():
"Sample drawing, xvalue/yvalue axes, y connected at 100 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'points'
xAxis.joinAxisPos = 100
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4b():
"Sample drawing, xvalue/yvalue axes, y connected at value 35 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'value'
xAxis.joinAxisPos = 35
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4c():
"Sample drawing, xvalue/yvalue axes, y connected to bottom of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4c1():
"xvalue/yvalue axes, without drawing axis lines/ticks."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
yAxis.visibleAxis = 0
yAxis.visibleTicks = 0
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.configure(data)
xAxis.visibleAxis = 0
xAxis.visibleTicks = 0
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4d():
"Sample drawing, xvalue/yvalue axes, y connected to top of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'top'
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5a():
"Sample drawing, xvalue/yvalue axes, y connected at 100 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'points'
yAxis.joinAxisPos = 100
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5b():
"Sample drawing, xvalue/yvalue axes, y connected at value 35 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'value'
yAxis.joinAxisPos = 35
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5c():
"Sample drawing, xvalue/yvalue axes, y connected at right of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'right'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5d():
"Sample drawing, xvalue/yvalue axes, y connected at left of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'left'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6a():
"Sample drawing, xcat/yvalue axes, x connected at top of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'top'
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6b():
"Sample drawing, xcat/yvalue axes, x connected at bottom of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor | |
<reponame>dicomgrid/sdk-python<filename>ambra_sdk/service/entrypoints/generated/webhook.py
""" Webhook.
Do not edit this file by hand.
This is generated by parsing api.html service doc.
"""
from ambra_sdk.exceptions.service import AccountNotFound
from ambra_sdk.exceptions.service import CustomNotHash
from ambra_sdk.exceptions.service import FdcJwtInvalidPrivateKey
from ambra_sdk.exceptions.service import IncompleteFilter
from ambra_sdk.exceptions.service import InvalidCron
from ambra_sdk.exceptions.service import InvalidEvent
from ambra_sdk.exceptions.service import InvalidFilterField
from ambra_sdk.exceptions.service import InvalidJson
from ambra_sdk.exceptions.service import InvalidMethod
from ambra_sdk.exceptions.service import InvalidRegexp
from ambra_sdk.exceptions.service import InvalidTransformCondition
from ambra_sdk.exceptions.service import InvalidType
from ambra_sdk.exceptions.service import InvalidUrl
from ambra_sdk.exceptions.service import InvalidWebhookSetup
from ambra_sdk.exceptions.service import MissingFields
from ambra_sdk.exceptions.service import NodeNotFound
from ambra_sdk.exceptions.service import NotFound
from ambra_sdk.exceptions.service import NotHash
from ambra_sdk.exceptions.service import NotPermitted
from ambra_sdk.exceptions.service import NotWithCron
from ambra_sdk.exceptions.service import ParseFailed
from ambra_sdk.exceptions.service import SfdcJwtMissingFields
from ambra_sdk.exceptions.service import SfdcJwtNotHash
from ambra_sdk.exceptions.service import SfdcMissingFields
from ambra_sdk.exceptions.service import SfdcNotHash
from ambra_sdk.exceptions.service import SidUserNotFound
from ambra_sdk.exceptions.service import SidUserNotInAccount
from ambra_sdk.exceptions.service import SidUserPrivilegeEscalation
from ambra_sdk.exceptions.service import UserNotFound
from ambra_sdk.service.query import QueryO
from ambra_sdk.service.query import AsyncQueryO
class Webhook:
"""Webhook."""
def __init__(self, api):
self._api = api
def list(
self,
account_id,
):
"""List.
:param account_id: uuid of the account
"""
request_data = {
'account_id': account_id,
}
errors_mapping = {}
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NOT_FOUND', None)] = NotFound('The account can not be found')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to view this list')
query_data = {
'api': self._api,
'url': '/webhook/list',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def add(
self,
account_id,
event,
method,
name,
url,
auth=None,
by_accession_number=None,
by_uid=None,
by_webhook_event=None,
cron=None,
delay=None,
filter_field=None,
filter_regexp=None,
max_age=None,
node_id=None,
once=None,
parameters=None,
retry=None,
sid_user_id=None,
suspended=None,
):
"""Add.
:param account_id: uuid of the account
:param event: Event to call it on (See the notes for the available events)
:param method: Method to call it with (POST|GET|POST_JSON|PUT|GET_JSON)
:param name: Name of the webhook
:param url: URL to call
:param auth: A JSON hash with the authentication details (optional)
:param by_accession_number: Flag to expand the once search to include studies with the same accession_number (optional)
:param by_uid: Flag to expand the once search to include studies with the same study_uid (optional)
:param by_webhook_event: Flag to fire WEBHOOK_FAILED once at final unsuccessful try of a failing webhook (optional)
:param cron: Cron timing string for CRON events e.g 0 9 * * mon-fri(optional)
:param delay: Number of seconds to delay running this webhook for after it is triggered (optional)
:param filter_field: Name of the study field (by default) or another object's field (should have prefix like "webhook.") to filter on (optional)
:param filter_regexp: Regular expression to match the value of the filter_field against (optional)
:param max_age: Ignore studies that are more than this number of days old based on the study_date (optional)
:param node_id: uuid of the node to proxy the webhook through (optional)
:param once: Flag that this webhook should only be run once for a specific study (optional)
:param parameters: A JSON object of the parameter names and values (optional)
:param retry: Retry the webhook if it fails (optional)
:param sid_user_id: UUID of the user to generate a sid as (optional)
:param suspended: This webhook is suspended and not triggered (optional)
"""
request_data = {
'account_id': account_id,
'auth': auth,
'by_accession_number': by_accession_number,
'by_uid': by_uid,
'by_webhook_event': by_webhook_event,
'cron': cron,
'delay': delay,
'event': event,
'filter_field': filter_field,
'filter_regexp': filter_regexp,
'max_age': max_age,
'method': method,
'name': name,
'node_id': node_id,
'once': once,
'parameters': parameters,
'retry': retry,
'sid_user_id': sid_user_id,
'suspended': suspended,
'url': url,
}
errors_mapping = {}
errors_mapping[('ACCOUNT_NOT_FOUND', None)] = AccountNotFound('The account can not be found')
errors_mapping[('CUSTOM_NOT_HASH', None)] = CustomNotHash('The custom auth value is not a JSON hash')
errors_mapping[('FDC_JWT_INVALID_PRIVATE_KEY', None)] = FdcJwtInvalidPrivateKey('The private key is invalid')
errors_mapping[('INCOMPLETE_FILTER', None)] = IncompleteFilter('Both a field and regexp are required')
errors_mapping[('INVALID_CRON', None)] = InvalidCron('The cron value is invalid')
errors_mapping[('INVALID_EVENT', None)] = InvalidEvent('An invalid event was passed')
errors_mapping[('INVALID_FILTER_FIELD', None)] = InvalidFilterField('Invalid filter field name')
errors_mapping[('INVALID_JSON', None)] = InvalidJson('The parameters field is not in valid JSON format.')
errors_mapping[('INVALID_METHOD', None)] = InvalidMethod('An invalid method was passed')
errors_mapping[('INVALID_REGEXP', None)] = InvalidRegexp('Invalid regular expression')
errors_mapping[('INVALID_TRANSFORM_CONDITION', None)] = InvalidTransformCondition('The transform condition is invalid')
errors_mapping[('INVALID_URL', None)] = InvalidUrl('The url value is invalid')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NODE_NOT_FOUND', None)] = NodeNotFound('The node can not be found')
errors_mapping[('NOT_HASH', None)] = NotHash('The parameter or auth field is not a hash.')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to add a webhook to this account')
errors_mapping[('NOT_WITH_CRON', None)] = NotWithCron('The delay or retry option can not be used for cron events')
errors_mapping[('SFDC_JWT_MISSING_FIELDS', None)] = SfdcJwtMissingFields('Fields are missing for the SFDC auth hash')
errors_mapping[('SFDC_JWT_NOT_HASH', None)] = SfdcJwtNotHash('The SFDC auth value is not a JSON hash')
errors_mapping[('SFDC_MISSING_FIELDS', None)] = SfdcMissingFields('Fields are missing for the SFDC auth hash')
errors_mapping[('SFDC_NOT_HASH', None)] = SfdcNotHash('The SFDC auth value is not a JSON hash')
errors_mapping[('SID_USER_NOT_FOUND', None)] = SidUserNotFound('The sid user can not be found')
errors_mapping[('SID_USER_NOT_IN_ACCOUNT', None)] = SidUserNotInAccount('The sid user is not a member of this account')
errors_mapping[('SID_USER_PRIVILEGE_ESCALATION', None)] = SidUserPrivilegeEscalation('The user for sid_user_id has higher privileges that the calling user')
errors_mapping[('USER_NOT_FOUND', None)] = UserNotFound('The basic authentication user can not be found')
query_data = {
'api': self._api,
'url': '/webhook/add',
'request_data': request_data,
'errors_mapping': errors_mapping,
'required_sid': True,
}
return QueryO(**query_data)
def set(
self,
uuid,
auth=None,
by_accession_number=None,
by_uid=None,
by_webhook_event=None,
cron=None,
delay=None,
event=None,
filter_field=None,
filter_regexp=None,
max_age=None,
method=None,
name=None,
node_id=None,
once=None,
parameters=None,
retry=None,
sid_user_id=None,
suspended=None,
url=None,
):
"""Set.
:param uuid: uuid of the webhook
:param auth: A JSON hash with the authentication details (optional)
:param by_accession_number: Flag to expand the once search to include studies with the same accession_number (optional)
:param by_uid: Flag to expand the once search to include studies with the same study_uid (optional)
:param by_webhook_event: Flag to fire WEBHOOK_FAILED once at final unsuccessful try of a failing webhook (optional)
:param cron: Cron timing string for CRON events (optional)
:param delay: Number of seconds to delay running this webhook for after it is triggered (optional)
:param event: Event to call it on (optional see add command for options)
:param filter_field: Name of the field to filter on (optional)
:param filter_regexp: Regular expression to match the value of the filter_field against (optional)
:param max_age: Ignore studies that are more than this number of days old based on the study_date (optional)
:param method: Method to call it with (optional see add command for options)
:param name: Name of the webhook (optional)
:param node_id: uuid of the node to proxy the webhook through (optional)
:param once: Flag that this webhook should only be run once for a specific study (optional)
:param parameters: A JSON object of the parameter names and values (optional see add command for options)
:param retry: Retry the webhook if it fails (optional)
:param sid_user_id: UUID of the user to generate a sid as (optional)
:param suspended: This webhook is suspended and not triggered (optional)
:param url: URL to call (optional)
"""
request_data = {
'auth': auth,
'by_accession_number': by_accession_number,
'by_uid': by_uid,
'by_webhook_event': by_webhook_event,
'cron': cron,
'delay': delay,
'event': event,
'filter_field': filter_field,
'filter_regexp': filter_regexp,
'max_age': max_age,
'method': method,
'name': name,
'node_id': node_id,
'once': once,
'parameters': parameters,
'retry': retry,
'sid_user_id': sid_user_id,
'suspended': suspended,
'url': url,
'uuid': uuid,
}
errors_mapping = {}
errors_mapping[('INCOMPLETE_FILTER', None)] = IncompleteFilter('Both a field and regexp are required')
errors_mapping[('INVALID_CRON', None)] = InvalidCron('The cron value is invalid')
errors_mapping[('INVALID_EVENT', None)] = InvalidEvent('An invalid event was passed')
errors_mapping[('INVALID_FILTER_FIELD', None)] = InvalidFilterField('Invalid filter field name')
errors_mapping[('INVALID_JSON', None)] = InvalidJson('The parameters field is not in valid JSON format.')
errors_mapping[('INVALID_METHOD', None)] = InvalidMethod('An invalid method was passed')
errors_mapping[('INVALID_REGEXP', None)] = InvalidRegexp('Invalid regular expression')
errors_mapping[('INVALID_TRANSFORM_CONDITION', None)] = InvalidTransformCondition('The transform condition is invalid')
errors_mapping[('INVALID_URL', None)] = InvalidUrl('The url value is invalid')
errors_mapping[('MISSING_FIELDS', None)] = MissingFields('A required field is missing or does not have data in it. The error_subtype holds a array of all the missing fields')
errors_mapping[('NODE_NOT_FOUND', None)] = NodeNotFound('The node can not be found')
errors_mapping[('NOT_FOUND', None)] = NotFound('The webhook can not be found')
errors_mapping[('NOT_HASH', None)] = NotHash('The parameter field is not a hash.')
errors_mapping[('NOT_PERMITTED', None)] = NotPermitted('You are not permitted to edit the webhook')
errors_mapping[('NOT_WITH_CRON', None)] = NotWithCron('The delay or retry option | |
"""
Link Layer Discovery Protocol
IEEE 802.1AB
DCB eXchange protocol
IEEE 802.1Qaz
"""
import struct
from pypacker import pypacker, triggerlist
from pypacker.pypacker import (mac_str_to_bytes, mac_bytes_to_str,
ip4_str_to_bytes, ip4_bytes_to_str,
ip6_str_to_bytes, ip6_bytes_to_str)
from pypacker.pypacker import FIELD_FLAG_AUTOUPDATE
# avoid unneeded references for performance reasons
unpack_H = struct.Struct(">H").unpack
unpack_I = struct.Struct(">I").unpack
unpack_B = struct.Struct(">B").unpack
pack_B = struct.Struct(">B").pack
# Mandatory TLV fields length in bytes
# of two field type(7-bit) and length(9-bit)
TLV_HEADER_LEN = 2
TYPE_FIELD_BITS = 7
LENGTH_FIELD_BITS = 9
TYPE_MASK = 0xFE00
LENGTH_MASK = 0x01FF
# Organizationally specific TLV fields length in bytes
# of two field OUI(24-bit) and subtype(8-bit)
ORG_SPEC_TYPE = 127
ORG_SPEC_HEADER_LEN = 4
OUI_MASK = 0xFFFFFF00
# length of TLV Subtype field
SUBTYPE_LEN_BYTE = 1
SUBTYPE_LEN_BITS = 8
SUBTYPE_MASK = 0x000000FF
# Convenient access for value field in Chassis TLV
GET_CHASSIS_TLV_SUBTYPES = {4: mac_bytes_to_str, 5: ip4_bytes_to_str}
SET_CHASSIS_TLV_SUBTYPES = {4: mac_str_to_bytes, 5: ip4_str_to_bytes}
# Convenient access for value field in Port TLV
GET_PORT_TLV_SUBTYPES = {3: mac_bytes_to_str, 4: ip4_bytes_to_str}
SET_PORT_TLV_SUBTYPES = {3: mac_str_to_bytes, 4: ip4_str_to_bytes}
# Convenient access for Management Address
GET_ADDRESS_SUBTYPE = {1: ip4_bytes_to_str, 2: ip6_bytes_to_str, 6: mac_bytes_to_str}
SET_ADDRESS_SUBTYPE = {1: ip4_str_to_bytes, 2: ip6_str_to_bytes, 6: mac_str_to_bytes}
def get_property_tlv_type():
"""Create a get/set-property for type field."""
return property(
lambda obj: (obj.type_len & TYPE_MASK) >> LENGTH_FIELD_BITS,
lambda obj, val: obj.__setattr__("type_len",
(obj.type_len & ~TYPE_MASK) | (val << LENGTH_FIELD_BITS)),
)
def get_property_tlv_len():
"""Create a get/set-property for length field."""
return property(
lambda obj: obj.type_len & LENGTH_MASK,
lambda obj, val: obj.__setattr__("type_len", (obj.type_len & TYPE_MASK) | val)
)
def get_property_tlv_oui():
"""Create a get/set-property for OUI field."""
return property(
lambda obj: (obj.oui_subtype & OUI_MASK) >> 8,
lambda obj, val: obj.__setattr__("oui_subtype", (obj.oui_subtype & ~OUI_MASK) | (val << 8))
)
def get_property_tlv_subtype():
"""Create a get/set-property for subtype field."""
return property(
lambda obj: obj.oui_subtype & SUBTYPE_MASK,
lambda obj, val: obj.__setattr__("oui_subtype",
(obj.oui_subtype & OUI_MASK) | val),
)
def get_property_to_convert_8_bytes_to_list(var):
"""Create a get/set-property to convert 8 bytes field to list(decimal representation)."""
return property(
lambda obj: [unpack_B(x)[0] for x in obj.__getattribute__(var)],
lambda obj, val: obj.__setattr__(var, [pack_B(x) for x in val]),
)
def get_property_to_convert_4_bytes_to_list(var):
"""Create a get/set-property to convert 4 bytes field to list(decimal representation)."""
return property(
lambda obj: [(obj.__getattribute__(var) >> x) & 0xF for x in reversed(range(0, 32, 4))],
lambda obj, val: obj.__setattr__(var,
sum([item << bits for item, bits in zip(val, reversed(range(0, 32, 4)))])),
)
def get_property_to_convert_1_byte_to_list(var):
"""Create a get/set-property to convert 1 byte field to list(bit representation)."""
return property(
lambda obj: [(obj.__getattribute__(var) >> x) & 1 for x in reversed(range(8))],
lambda obj, val: obj.__setattr__(var, int("".join(map(str, val)), 2)),
)
class LLDP(pypacker.Packet):
__hdr__ = (
("tlvlist", None, triggerlist.TriggerList),
)
def _dissect(self, buf):
self._init_triggerlist("tlvlist", buf, self.__parse_tlv)
return len(buf)
@staticmethod
def __parse_tlv(buf):
"""Parse LLDP TLVs and return them as list."""
_, clz_bts_list = count_and_dissect_tlvs(buf)
tlvlist = []
for clz, bts in clz_bts_list:
tlvlist.append(clz(bts))
return tlvlist
class LLDPGeneric(pypacker.Packet):
__hdr__ = (
("type_len", "H", 0, FIELD_FLAG_AUTOUPDATE),
("value", None, b""),
)
tlv_type = get_property_tlv_type()
tlv_len = get_property_tlv_len()
def _dissect(self, buf):
self.value = buf[TLV_HEADER_LEN:]
return len(buf)
def bin(self, update_auto_fields=True):
if update_auto_fields and self._changed() and self.type_len_au_active:
self.tlv_len = len(self) - TLV_HEADER_LEN
return pypacker.Packet.bin(self, update_auto_fields=update_auto_fields)
class LLDPDUEnd(pypacker.Packet):
__hdr__ = (
("tlv_type", "B", 0),
("tlv_len", "B", 0),
)
class LLDPChassisId(pypacker.Packet):
__hdr__ = (
("type_len", "H", 512, FIELD_FLAG_AUTOUPDATE), # type(1)
("subtype", "B", 0),
("value", None, b""),
)
tlv_type = get_property_tlv_type()
tlv_len = get_property_tlv_len()
def __get_value(self):
if self.subtype not in GET_CHASSIS_TLV_SUBTYPES.keys():
return self.value
return GET_CHASSIS_TLV_SUBTYPES.get(self.subtype)(self.value)
def __set_value(self, value):
if self.subtype not in SET_CHASSIS_TLV_SUBTYPES.keys():
self.value = value
else:
self.value = SET_CHASSIS_TLV_SUBTYPES.get(self.subtype)(value)
value_s = property(__get_value, __set_value)
def _dissect(self, buf):
self.value = buf[TLV_HEADER_LEN + SUBTYPE_LEN_BYTE:]
return len(buf)
def bin(self, update_auto_fields=True):
if update_auto_fields and self._changed() and self.type_len_au_active:
self.tlv_len = len(self) - TLV_HEADER_LEN
return pypacker.Packet.bin(self, update_auto_fields=update_auto_fields)
class LLDPPortId(pypacker.Packet):
__hdr__ = (
("type_len", "H", 1024, FIELD_FLAG_AUTOUPDATE), # type(2)
("subtype", "B", 0),
("value", None, b""),
)
tlv_type = get_property_tlv_type()
tlv_len = get_property_tlv_len()
def __get_value(self):
if self.subtype not in GET_PORT_TLV_SUBTYPES.keys():
return self.value
return GET_PORT_TLV_SUBTYPES.get(self.subtype)(self.value)
def __set_value(self, value):
if self.subtype not in SET_PORT_TLV_SUBTYPES.keys():
self.value = value
else:
self.value = SET_PORT_TLV_SUBTYPES.get(self.subtype)(value)
value_s = property(__get_value, __set_value)
def _dissect(self, buf):
self.value = buf[TLV_HEADER_LEN + SUBTYPE_LEN_BYTE:]
return len(buf)
def bin(self, update_auto_fields=True):
if update_auto_fields and self._changed() and self.type_len_au_active:
self.tlv_len = len(self) - TLV_HEADER_LEN
return pypacker.Packet.bin(self, update_auto_fields=update_auto_fields)
class LLDPTTL(pypacker.Packet):
__hdr__ = (
("type_len", "H", 1538), # type(3), length(2)
("seconds", "H", 0),
)
tlv_type = get_property_tlv_type()
tlv_len = get_property_tlv_len()
class LLDPPortDescription(pypacker.Packet):
__hdr__ = (
("type_len", "H", 2048, FIELD_FLAG_AUTOUPDATE), # type(4)
("value", None, b""),
)
tlv_type = get_property_tlv_type()
tlv_len = get_property_tlv_len()
def _dissect(self, buf):
self.value = buf[TLV_HEADER_LEN:]
return len(buf)
def bin(self, update_auto_fields=True):
if update_auto_fields and self._changed() and self.type_len_au_active:
self.tlv_len = len(self) - TLV_HEADER_LEN
return pypacker.Packet.bin(self, update_auto_fields=update_auto_fields)
class LLDPSystemName(pypacker.Packet):
__hdr__ = (
("type_len", "H", 2560, FIELD_FLAG_AUTOUPDATE), # type(5)
("value", None, b""),
)
tlv_type = get_property_tlv_type()
tlv_len = get_property_tlv_len()
def _dissect(self, buf):
self.value = buf[TLV_HEADER_LEN:]
return len(buf)
def bin(self, update_auto_fields=True):
if update_auto_fields and self._changed() and self.type_len_au_active:
self.tlv_len = len(self) - TLV_HEADER_LEN
return pypacker.Packet.bin(self, update_auto_fields=update_auto_fields)
class LLDPSystemDescription(pypacker.Packet):
__hdr__ = (
("type_len", "H", 3072, FIELD_FLAG_AUTOUPDATE), # type(6)
("value", None, b""),
)
tlv_type = get_property_tlv_type()
tlv_len = get_property_tlv_len()
def _dissect(self, buf):
self.value = buf[TLV_HEADER_LEN:]
return len(buf)
def bin(self, update_auto_fields=True):
if update_auto_fields and self._changed() and self.type_len_au_active:
self.tlv_len = len(self) - TLV_HEADER_LEN
return pypacker.Packet.bin(self, update_auto_fields=update_auto_fields)
class LLDPSystemCapabilities(pypacker.Packet):
__hdr__ = (
("type_len", "H", 3588), # type(7), length(4)
("capabilities", "H", 0),
("enabled", "H", 0),
)
tlv_type = get_property_tlv_type()
tlv_len = get_property_tlv_len()
class LLDPManagementAddress(pypacker.Packet):
__hdr__ = (
("type_len", "H", 4096, FIELD_FLAG_AUTOUPDATE),
# contains the length of the addrsubtype(1 byte) + addrval(1-31 bytes) fields
("addrlen", "B", 2, FIELD_FLAG_AUTOUPDATE),
("addrsubtype", "B", 0),
("addrval", None, b"\x00"),
# contains one of subtypes {1: "Unknown", 2: "ifIndex", 3: "System Port Number"}
("ifsubtype", "B", 1),
("ifnumber", "I", 0),
("oidlen", "B", 0, FIELD_FLAG_AUTOUPDATE),
("oid", None, b""),
)
tlv_type = get_property_tlv_type()
tlv_len = get_property_tlv_len()
def __get_addrval(self):
if self.addrsubtype not in GET_ADDRESS_SUBTYPE.keys():
return self.addrval
return GET_ADDRESS_SUBTYPE.get(self.addrsubtype)(self.addrval)
def __set_addrval(self, value):
if self.addrsubtype not in SET_ADDRESS_SUBTYPE.keys():
self.addrval = value
else:
self.addrval = SET_ADDRESS_SUBTYPE.get(self.addrsubtype)(value)
addrval_s = property(__get_addrval, __set_addrval)
def _dissect(self, buf):
addrlen = unpack_B(buf[TLV_HEADER_LEN: TLV_HEADER_LEN + 1])[0]
addrval_position = TLV_HEADER_LEN + 2
self.addrval = buf[addrval_position: addrval_position + addrlen - 1]
oidlen_postion = addrval_position + addrlen + 4
oidlen = unpack_B(buf[oidlen_postion: oidlen_postion + 1])[0]
if oidlen:
self.oid = buf[oidlen_postion + 1: oidlen_postion + 1 + oidlen]
return len(buf)
def bin(self, update_auto_fields=True):
if update_auto_fields and self._changed():
if self.type_len_au_active:
self.tlv_len = len(self) - TLV_HEADER_LEN
if self.addrlen_au_active:
self.addrlen = len(self.addrval) + SUBTYPE_LEN_BYTE
if self.oidlen_au_active:
self.oidlen = len(self.oid)
return pypacker.Packet.bin(self, update_auto_fields=update_auto_fields)
class LLDPOrgSpecGeneric(pypacker.Packet):
__hdr__ = (
("type_len", "H", 65024, FIELD_FLAG_AUTOUPDATE), # type(127)
("oui_subtype", "I", 0),
("value", None, b""),
)
tlv_type = get_property_tlv_type()
tlv_len = get_property_tlv_len()
oui = get_property_tlv_oui()
subtype = get_property_tlv_subtype()
def _dissect(self, buf):
self.value = buf[TLV_HEADER_LEN + ORG_SPEC_HEADER_LEN:]
return len(buf)
def bin(self, update_auto_fields=True):
if update_auto_fields and self._changed() and self.type_len_au_active:
self.tlv_len = len(self) - TLV_HEADER_LEN
return pypacker.Packet.bin(self, update_auto_fields=update_auto_fields)
class LLDPDot1PortVlanId(pypacker.Packet):
__hdr__ = (
("type_len", "H", 65030), # type(127), length(6)
("oui_subtype", "I", 8438273), # OUI(00-80-C2), subtype(0x01)
("vlan", "H", 1),
)
tlv_type = get_property_tlv_type()
tlv_len = get_property_tlv_len()
oui = get_property_tlv_oui()
subtype = get_property_tlv_subtype()
class DCBXCongestionNotification(pypacker.Packet):
__hdr__ = (
("type_len", "H", 65030), # type(127), length(6)
("oui_subtype", "I", 8438280), # OUI(00-80-C2), subtype(0x08)
("cnpv", "B", 0),
("ready", "B", 0),
)
tlv_type = get_property_tlv_type()
tlv_len = get_property_tlv_len()
oui = get_property_tlv_oui()
subtype = get_property_tlv_subtype()
cnpv_list = get_property_to_convert_1_byte_to_list("cnpv")
ready_list = get_property_to_convert_1_byte_to_list("ready")
class DCBXConfiguration(pypacker.Packet):
__hdr__ = (
("type_len", "H", 65049), # type(127), length(25)
("oui_subtype", "I", 8438281), # OUI(00-80-C2), subtype(0x09)
("w_cbs_maxtc", "B", 0), # Field contains Willing-1bit, CBS-1bit, Reserved-3bit, Max TCs-3bit
("priority", "I", 0), # Field represents list of 8 items where one item = 4 bits
("tcbandwith", None, triggerlist.TriggerList),
("tsaassigment", None, triggerlist.TriggerList),
)
tlv_type = get_property_tlv_type()
tlv_len = get_property_tlv_len()
oui = get_property_tlv_oui()
subtype = get_property_tlv_subtype()
tcbandwith_list = get_property_to_convert_8_bytes_to_list("tcbandwith")
tsaassigment_list = get_property_to_convert_8_bytes_to_list("tsaassigment")
priority_list = get_property_to_convert_4_bytes_to_list("priority")
def __get_w(self):
return (self.w_cbs_maxtc & 0x80) >> 7
def __set_w(self, value):
self.w_cbs_maxtc = (self.w_cbs_maxtc & ~0x80) | (value << 7)
willing = property(__get_w, __set_w)
def __get_cbs(self):
return (self.w_cbs_maxtc & 0x40) >> 6
def __set_cbs(self, value):
self.w_cbs_maxtc = (self.w_cbs_maxtc & ~0x40) | (value << 6)
cbs = property(__get_cbs, __set_cbs)
def __get_maxtcs(self):
return self.w_cbs_maxtc & 0x07
def __set_maxtcs(self, value):
self.w_cbs_maxtc = self.w_cbs_maxtc & 0xF8 | value
maxtcs = property(__get_maxtcs, __set_maxtcs)
def _dissect(self, buf):
for i in range(11, 19):
self.tcbandwith.append(buf[i:i + 1])
for i in range(19, 27):
self.tsaassigment.append(buf[i:i + 1])
return len(self)
class DCBXRecommendation(pypacker.Packet):
__hdr__ = (
("type_len", "H", 65049), # type(127), length(25)
("oui_subtype", "I", 8438282), # OUI(00-80-C2), subtype(0x10)
("reserved", "B", 0),
("priority", "I", 0), # Field represents list of 8 items where one item = 4 bits
("tcbandwith", None, triggerlist.TriggerList),
("tsaassigment", None, triggerlist.TriggerList),
)
tlv_type = get_property_tlv_type()
tlv_len = get_property_tlv_len()
oui = get_property_tlv_oui()
subtype = get_property_tlv_subtype()
tcbandwith_list = get_property_to_convert_8_bytes_to_list("tcbandwith")
tsaassigment_list = get_property_to_convert_8_bytes_to_list("tsaassigment")
priority_list = get_property_to_convert_4_bytes_to_list("priority")
def _dissect(self, buf):
# start from TLV_HEADER_LEN + ORG_SPEC_HEADER_LEN +
# 1 byte(reserved) + 4 bytes(priority)
for i in range(11, 19):
self.tcbandwith.append(buf[i:i + 1])
for i in range(19, 27):
self.tsaassigment.append(buf[i:i + 1])
return len(self)
class DCBXPriorityBasedFlowControlConfiguration(pypacker.Packet):
__hdr__ = (
("type_len", "H", 65030), # type(127), length(6)
("oui_subtype", "I", 8438283), # OUI(00-80-C2), subtype(0x11)
("w_mbc_pfc", "B", 0), # Field contains Willing-1bit, MBC-1bit, Reserved-2bit, PFC cap TCs-4bit
("pfcenable", "B", 0),
)
tlv_type = get_property_tlv_type()
tlv_len = get_property_tlv_len()
oui = get_property_tlv_oui()
subtype = get_property_tlv_subtype()
pfcenable_list = get_property_to_convert_1_byte_to_list("pfcenable")
def __get_w(self):
return (self.w_mbc_pfc & 0x80) >> 7
def __set_w(self, value):
self.w_mbc_pfc = (self.w_mbc_pfc & ~0x80) | (value << 7)
willing = property(__get_w, __set_w)
def __get_mbc(self):
return (self.w_mbc_pfc & 0x40) >> 6
def __set_mbc(self, value):
self.w_mbc_pfc = (self.w_mbc_pfc & ~0x40) | (value << 6)
mbc = property(__get_mbc, __set_mbc)
def __get_pfccap(self):
return self.w_mbc_pfc & 0x0F
def __set_pfccap(self, value):
self.w_mbc_pfc = self.w_mbc_pfc & 0xF0 | value
pfccap = property(__get_pfccap, __set_pfccap)
class DCBXApplicationPriority(pypacker.Packet):
__hdr__ = (
("type_len", "H", 65024, FIELD_FLAG_AUTOUPDATE), # type(127)
("oui_subtype", "I", 8438284), # OUI(00-80-C2), subtype(0x12)
("reserved", "B", 0),
("apppriotable", None, triggerlist.TriggerList),
)
tlv_type = get_property_tlv_type()
tlv_len = get_property_tlv_len()
oui = get_property_tlv_oui()
subtype = get_property_tlv_subtype()
def _dissect(self, buf):
# start from TLV_HEADER_LEN + ORG_SPEC_HEADER_LEN + 1 byte(reserved)
for i in range(7, len(buf), 3):
self.apppriotable.append(DCBXApplicationPriorityTable(buf[i:i + 3]))
return len(self)
def bin(self, update_auto_fields=True):
if update_auto_fields and self._changed() and self.type_len_au_active:
self.tlv_len = len(self) - TLV_HEADER_LEN
return pypacker.Packet.bin(self, update_auto_fields=update_auto_fields)
class DCBXApplicationPriorityTable(pypacker.Packet):
__hdr__ = (
("priority_sel", "B", 0), # Field contains Priority-3bits, Reserved-2bits, Sel-3bits)
("protocolid", "H", 0),
)
def __get_prio(self):
return (self.priority_sel & 0xE0) >> 5
def __set_prio(self, value):
self.priority_sel = (self.priority_sel & ~0xE0) | (value << 5)
priority = property(__get_prio, __set_prio)
def __get_sel(self):
return self.priority_sel & 0x07
def __set_sel(self, value):
self.priority_sel = (self.priority_sel & 0xF8) | value
sel = property(__get_sel, __set_sel)
LLDP_TLV_CLS = {
0: LLDPDUEnd,
1: LLDPChassisId,
2: LLDPPortId,
3: LLDPTTL,
4: LLDPPortDescription,
5: LLDPSystemName,
6: LLDPSystemDescription,
7: LLDPSystemCapabilities,
8: LLDPManagementAddress
}
LLDP_ORG_SPEC_TLV_CLS = {
(0x0080c2, 0x01): LLDPDot1PortVlanId,
(0x0080c2, 0x08): DCBXCongestionNotification,
(0x0080c2, 0x09): DCBXConfiguration,
(0x0080c2, 0x0a): DCBXRecommendation,
(0x0080c2, 0x0b): DCBXPriorityBasedFlowControlConfiguration,
(0x0080c2, 0x0c): DCBXApplicationPriority,
}
def count_and_dissect_tlvs(buf):
"""
Count and dissect TLVs. Return length of LLDP layer
buf -- buffer to dissect
return -- parsed_bytes_total, [(clz, bts), ...]
"""
shift = 0
tlv_type, tlv_len = 1, 1
clz_bts_list = | |
"id": "5<PASSWORD>",
"username": "Token",
}
response = self.client.post(
"/api/liveregistrations/",
{"email": "<EMAIL>", "should_send_reminders": True},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 201)
created_liveregistration = LiveRegistration.objects.last()
self.assertEqual(
json.loads(response.content),
{
"consumer_site": str(video.playlist.consumer_site.id),
"email": "<EMAIL>",
"id": str(created_liveregistration.id),
"is_registered": True,
"lti_user_id": "56255f3807599c377bf0e5bf072359fd",
"lti_id": str(other_playlist.lti_id),
"should_send_reminders": True,
"username": "Token",
"video": str(video.id),
},
)
def test_api_liveregistration_create_public_token_record_email_other_registration_lti(
self,
):
"""Same email can be used for the same video with a public token."""
starting_at = timezone.now() + timedelta(days=5)
video = VideoFactory(live_state=IDLE, live_type=RAW, starting_at=starting_at)
# created by LTI
LiveRegistrationFactory(
email="<EMAIL>",
consumer_site=video.playlist.consumer_site,
lti_id=str(video.playlist.lti_id),
lti_user_id="56255f3807599c377bf0e5bf072359fd",
video=video,
)
self.assertEqual(LiveRegistration.objects.count(), 1)
self.assertTrue(video.is_scheduled)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
response = self.client.post(
"/api/liveregistrations/",
{"email": "<EMAIL>", "should_send_reminders": True},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 201)
self.assertEqual(LiveRegistration.objects.count(), 2)
created_liveregistration = LiveRegistration.objects.get(
email="<EMAIL>",
consumer_site=None,
video=video,
)
self.assertEqual(
json.loads(response.content),
{
"consumer_site": None,
"email": "<EMAIL>",
"id": str(created_liveregistration.id),
"is_registered": True,
"lti_user_id": None,
"lti_id": None,
"should_send_reminders": True,
"username": None,
"video": str(video.id),
},
)
def test_api_liveregistration_create_lti_token_record_email_other_consumer_site(
self,
):
"""New registration for a consumer_site different."""
starting_at = timezone.now() + timedelta(days=5)
video = VideoFactory(live_state=IDLE, live_type=RAW, starting_at=starting_at)
other_consumer_site = ConsumerSiteFactory()
# created by LTI
LiveRegistrationFactory(
email="<EMAIL>",
consumer_site=video.playlist.consumer_site,
lti_id="Maths",
lti_user_id="56255f3807599c377bf0e5bf072359fd",
video=video,
)
self.assertEqual(LiveRegistration.objects.count(), 1)
self.assertTrue(video.is_scheduled)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["context_id"] = "Maths"
jwt_token.payload["consumer_site"] = str(other_consumer_site.id)
jwt_token.payload["roles"] = [
random.choice(["administrator", "instructor", "student", ""])
]
jwt_token.payload["user"] = {
"email": None,
"id": "56255f3807599c377bf0e5bf072359fd",
"username": "Token",
}
response = self.client.post(
"/api/liveregistrations/",
{"email": "<EMAIL>", "should_send_reminders": True},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 201)
self.assertEqual(LiveRegistration.objects.count(), 2)
created_liveregistration = LiveRegistration.objects.get(
email="<EMAIL>",
consumer_site=other_consumer_site,
video=video,
)
self.assertEqual(
json.loads(response.content),
{
"consumer_site": str(other_consumer_site.id),
"email": "<EMAIL>",
"id": str(created_liveregistration.id),
"is_registered": True,
"lti_user_id": "56255f3807599c377bf0e5bf072359fd",
"lti_id": "Maths",
"should_send_reminders": True,
"username": "Token",
"video": str(video.id),
},
)
def test_api_liveregistration_create_lti_token_record_email_other_context_id(
self,
):
"""New registration for a context_id different."""
starting_at = timezone.now() + timedelta(days=5)
video = VideoFactory(live_state=IDLE, live_type=RAW, starting_at=starting_at)
# created by LTI
LiveRegistrationFactory(
email="<EMAIL>",
consumer_site=video.playlist.consumer_site,
lti_id="Maths",
lti_user_id="56255f3807599c377bf0e5bf072359fd",
video=video,
)
self.assertEqual(LiveRegistration.objects.count(), 1)
self.assertTrue(video.is_scheduled)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["context_id"] = "Maths2"
jwt_token.payload["consumer_site"] = str(video.playlist.consumer_site.id)
jwt_token.payload["roles"] = [
random.choice(["administrator", "instructor", "student", ""])
]
jwt_token.payload["user"] = {
"email": None,
"id": "56255f3807599c377bf0e5bf072359fd",
"username": "Token",
}
response = self.client.post(
"/api/liveregistrations/",
{"email": "<EMAIL>", "should_send_reminders": True},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 201)
self.assertEqual(LiveRegistration.objects.count(), 2)
created_liveregistration = LiveRegistration.objects.get(
email="<EMAIL>",
lti_id="Maths2",
video=video,
)
self.assertEqual(
json.loads(response.content),
{
"consumer_site": str(video.playlist.consumer_site.id),
"email": "<EMAIL>",
"id": str(created_liveregistration.id),
"is_registered": True,
"lti_user_id": "56255f3807599c377bf0e5bf072359fd",
"lti_id": "Maths2",
"should_send_reminders": True,
"username": "Token",
"video": str(video.id),
},
)
def test_api_liveregistration_create_lti_token_record_email_lti_user_id(
self,
):
"""New registration for a lti_id different."""
starting_at = timezone.now() + timedelta(days=5)
video = VideoFactory(live_state=IDLE, live_type=RAW, starting_at=starting_at)
# created by LTI
LiveRegistrationFactory(
email="<EMAIL>",
consumer_site=video.playlist.consumer_site,
lti_id="Maths",
lti_user_id="OLD",
video=video,
)
self.assertEqual(LiveRegistration.objects.count(), 1)
self.assertTrue(video.is_scheduled)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["context_id"] = "Maths"
jwt_token.payload["consumer_site"] = str(video.playlist.consumer_site.id)
jwt_token.payload["roles"] = [
random.choice(["administrator", "instructor", "student", ""])
]
jwt_token.payload["user"] = {
"email": None,
"id": "NEW",
"username": "Token",
}
response = self.client.post(
"/api/liveregistrations/",
{"email": "<EMAIL>", "should_send_reminders": True},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 201)
self.assertEqual(LiveRegistration.objects.count(), 2)
created_liveregistration = LiveRegistration.objects.get(
email="<EMAIL>",
lti_user_id="NEW",
video=video,
)
self.assertEqual(
json.loads(response.content),
{
"consumer_site": str(video.playlist.consumer_site.id),
"email": "<EMAIL>",
"id": str(created_liveregistration.id),
"is_registered": True,
"lti_user_id": "NEW",
"lti_id": "Maths",
"should_send_reminders": True,
"username": "Token",
"video": str(video.id),
},
)
def test_api_liveregistration_create_token_lti_email_restricted_token(self):
"""LTI token can only register for the email in the token."""
video = VideoFactory(
live_state=IDLE,
live_type=RAW,
starting_at=timezone.now() + timedelta(days=100),
)
self.assertTrue(video.is_scheduled)
# token with no context_id
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["context_id"] = "Maths"
jwt_token.payload["consumer_site"] = str(video.playlist.consumer_site.id)
jwt_token.payload["roles"] = [
random.choice(["administrator", "instructor", "student", ""])
]
jwt_token.payload["user"] = {
"email": "<EMAIL>",
"id": "56255f3807599c377bf0e5bf072359fd",
"username": "Token",
}
response = self.client.post(
"/api/liveregistrations/",
{"email": "<EMAIL>", "should_send_reminders": True},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.content),
{
"email": [
"You are not authorized to register with a specific email "
"<EMAIL>. You can only use the email from your "
"authentication."
]
},
)
def test_api_liveregistration_create_public_token_cant_register_when_not_scheduled(
self,
):
"""Can't register if video is not scheduled."""
video = VideoFactory()
self.assertFalse(video.is_scheduled)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
response = self.client.post(
"/api/liveregistrations/",
{"email": "<EMAIL>", "should_send_reminders": True},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.content),
{"video": [f"video with id {str(video.id)} doesn't accept registration."]},
)
def test_api_liveregistration_create_lti_token_cant_register_when_not_scheduled(
self,
):
"""LTI token can't register if video is not scheduled."""
video = VideoFactory()
self.assertFalse(video.is_scheduled)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["context_id"] = str(video.playlist.lti_id)
jwt_token.payload["consumer_site"] = str(video.playlist.consumer_site.id)
jwt_token.payload["user"] = {
"email": None,
"id": "56255f3807599c377bf0e5bf072359fd",
"username": "Token",
}
response = self.client.post(
"/api/liveregistrations/",
{"email": "<EMAIL>", "should_send_reminders": True},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.content),
{"video": [f"video with id {str(video.id)} doesn't accept registration."]},
)
def test_api_liveregistration_create_cant_register_same_email_same_consumer(
self,
):
"""Key email/consumer_site/lti_id/lti_user_id/video must be unique."""
starting_at = timezone.now() + timedelta(days=5)
video = VideoFactory(live_state=IDLE, live_type=RAW, starting_at=starting_at)
# registration with consumer_site
LiveRegistrationFactory(
email="<EMAIL>",
consumer_site=video.playlist.consumer_site,
lti_id=str(video.playlist.lti_id),
lti_user_id="56255f3807599c377bf0e5bf072359fd",
video=video,
)
self.assertTrue(video.is_scheduled)
# token with same context_id and same email
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["context_id"] = str(video.playlist.lti_id)
jwt_token.payload["consumer_site"] = str(video.playlist.consumer_site.id)
jwt_token.payload["user"] = {
"email": "<EMAIL>",
"id": "56255f3807599c377bf0e5bf072359fd",
"username": "Token",
}
response = self.client.post(
"/api/liveregistrations/",
{"email": "<EMAIL>", "should_send_reminders": True},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.content),
{
"lti_user_id": [
"This identified user is already registered "
"for this video and consumer site and course."
]
},
)
def test_api_liveregistration_create_cant_register_same_email_same_consumer_with_deleted(
self,
):
"""Key email/consumer_site/lti_id/lti_user_id/video must be unique and can be used after
being deleted."""
starting_at = timezone.now() + timedelta(days=5)
video = VideoFactory(live_state=IDLE, live_type=RAW, starting_at=starting_at)
self.assertTrue(video.is_scheduled)
# registration with consumer_site
liveregistration = LiveRegistrationFactory(
email="<EMAIL>",
consumer_site=video.playlist.consumer_site,
lti_id=str(video.playlist.lti_id),
lti_user_id="56255f3807599c377bf0e5bf072359fd",
video=video,
)
liveregistration.delete()
# token with same context_id and same email
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["context_id"] = str(video.playlist.lti_id)
jwt_token.payload["consumer_site"] = str(video.playlist.consumer_site.id)
jwt_token.payload["user"] = {
"email": "<EMAIL>",
"id": "56255f3807599c377bf0e5bf072359fd",
"username": "Token",
}
response = self.client.post(
"/api/liveregistrations/",
{"email": "<EMAIL>", "should_send_reminders": True},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 201)
self.assertEqual(LiveRegistration.objects.count(), 1)
liveregistration = LiveRegistration.objects.get(
email="<EMAIL>", deleted__isnull=True
)
self.assertEqual(
json.loads(response.content),
{
"consumer_site": str(video.playlist.consumer_site.id),
"email": "<EMAIL>",
"id": str(liveregistration.id),
"is_registered": True,
"lti_user_id": "56255f3807599c377bf0e5bf072359fd",
"lti_id": str(video.playlist.lti_id),
"should_send_reminders": True,
"username": "Token",
"video": str(video.id),
},
)
def test_api_liveregistration_create_cant_register_same_email_same_consumer_deleted(
self,
):
"""Key email/consumer_site/lti_id/lti_user_id/video must be unique but can be
reused if deleted is set."""
starting_at = timezone.now() + timedelta(days=5)
video = VideoFactory(live_state=IDLE, live_type=RAW, starting_at=starting_at)
self.assertTrue(video.is_scheduled)
# registration with consumer_site
liveregister = LiveRegistrationFactory(
email="<EMAIL>",
consumer_site=video.playlist.consumer_site,
lti_id=str(video.playlist.lti_id),
lti_user_id="56255f3807599c377bf0e5bf072359fd",
video=video,
)
# delete it
liveregister.delete()
self.assertEqual(LiveRegistration.objects.count(), 0)
# token with same context_id and same email
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["context_id"] = str(video.playlist.lti_id)
jwt_token.payload["consumer_site"] = str(video.playlist.consumer_site.id)
jwt_token.payload["user"] = {
"email": "<EMAIL>",
"id": "56255f3807599c377bf0e5bf072359fd",
"username": "Token",
}
response = self.client.post(
"/api/liveregistrations/",
{"email": "<EMAIL>", "should_send_reminders": True},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 201)
self.assertEqual(LiveRegistration.objects.count(), 1)
liveregistration = LiveRegistration.objects.get(
email="<EMAIL>", deleted__isnull=True
)
self.assertEqual(
json.loads(response.content),
{
"consumer_site": str(video.playlist.consumer_site.id),
"email": "<EMAIL>",
"lti_user_id": "56255f3807599c377bf0e5bf072359fd",
"lti_id": str(video.playlist.lti_id),
"id": str(liveregistration.id),
"is_registered": True,
"should_send_reminders": True,
"username": "Token",
"video": str(video.id),
},
)
def test_api_liveregistration_create_cant_register_same_email_same_consumer_none(
self,
):
"""Duo email/video must be unique when consumer_site is not defined."""
starting_at = timezone.now() + timedelta(days=5)
video = VideoFactory(live_state=IDLE, live_type=RAW, starting_at=starting_at)
# registration with no consumer_site
LiveRegistrationFactory(email="<EMAIL>", video=video)
self.assertTrue(video.is_scheduled)
# token with no context_id leading to an undefined consumer_site
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
response = self.client.post(
"/api/liveregistrations/",
{"email": "<EMAIL>", "should_send_reminders": True},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
# registration for this video with this email when consumer_site is not defined
# already exists
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.content),
{
"email": [
"<EMAIL> is already registered for "
"this video, consumer site and course."
]
},
)
def test_api_liveregistration_create_same_lti_info_diff_email_consumer(
self,
):
"""Unicity of video/consumer_site/lti_id/lti_user_id.
Combination of video/consumer_site/lti_id/lti_user_id can't be used for different
emails."""
starting_at = timezone.now() + timedelta(days=5)
video = VideoFactory(live_state=IDLE, live_type=RAW, starting_at=starting_at)
LiveRegistrationFactory(
email="<EMAIL>",
video=video,
lti_id="Maths",
lti_user_id="56255f3807599c377bf0e5bf072359fd",
consumer_site=video.playlist.consumer_site,
)
self.assertTrue(video.is_scheduled)
jwt_token = AccessToken()
jwt_token.payload["resource_id"] = str(video.id)
jwt_token.payload["context_id"] = "Maths"
jwt_token.payload["consumer_site"] = str(video.playlist.consumer_site.id)
# token with no email so user can register to any email
jwt_token.payload["user"] = {
"email": None,
"id": "56255f3807599c377bf0e5bf072359fd",
"username": "Token",
}
response = self.client.post(
"/api/liveregistrations/",
{"email": "<EMAIL>", "should_send_reminders": True},
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
# can't register because key video/context_id/lti_user_id already exists
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.content),
{
"lti_user_id": [
"This identified user is already registered "
"for this video and consumer site and course."
]
},
)
def test_api_liveregistration_create_public_token_same_email_different_video(
self,
):
"""Same email can be used for two different videos with public token."""
video = VideoFactory(
live_state=IDLE,
live_type=RAW,
starting_at=timezone.now() + timedelta(days=5),
)
video2 = VideoFactory(
live_state=IDLE,
live_type=RAW,
starting_at=timezone.now() + timedelta(hours=1),
)
# registration with no consumer_site
LiveRegistrationFactory(email="<EMAIL>", video=video)
# token with no context_id leading to no consumer_site
| |
print("Walking the line: ",p1,p2)
hits = [ ['v',v1] ]
# do the search:
# Note that we really need a better equality test here
# hits[-1][1] != v2 doesn't work beac
def obj_eq(a,b):
return type(a)==type(b) and a==b
while not obj_eq(hits[-1][1], v2):
# if we just came from a vertex, choose a new face in the given direction
if hits[-1][0] == 'v':
if self.verbose > 1:
print("Last hit was the vertex at %s"%(hits[-1][1].point()))
# like face_in_direction, but also check for possibility that
# an edge is coincident with the query line.
next_item = self.next_from_vertex( hits[-1][1],(p1,p2) )
if self.verbose > 1:
print("Moved from vertex to ",next_item)
if next_item[0] == 'v':
# Find the edge connecting these two:
for e in self.DT.incident_edges( next_item[1] ):
f,v_opp = e
if f.vertex( (v_opp+1)%3 ) == hits[-1][1] or \
f.vertex( (v_opp+2)%3 ) == hits[-1][1]:
hits.append( ['e', (f,v_opp)] )
break
elif hits[-1][0] == 'f':
# either we cross over an edge into another face, or we hit
# one of the vertices.
next_item = self.next_from_face( hits[-1][1], (p1,p2) )
# in case the next item is also a face, go ahead and insert
# the intervening edge
if next_item[0]=='f':
middle_edge = None
for v_opp in range(3):
if self.verbose > 1:
print("Comparing %s to %s looking for the intervening edge"%(hits[-1][1].neighbor(v_opp),
next_item[1]))
if hits[-1][1].neighbor(v_opp) == next_item[1]:
middle_edge = ['e', (hits[-1][1],v_opp)]
break
if middle_edge is not None:
hits.append( middle_edge )
else:
raise Exception("Two faces in a row, but couldn't find the edge between them")
elif hits[-1][0] == 'e':
# This one is easy - just have to check which end of the edge is in the
# desired direction
next_item = self.next_from_edge( hits[-1][1], (p1,p2) )
hits.append( next_item )
if self.verbose > 1:
print("Got hits: ",hits)
# but ignore the first and last, since they are the starting/ending points
hits = hits[1:-1]
# and since some of those CGAL elements are going to disappear, translate everything
# into node references
for i in range(len(hits)):
if hits[i][0] == 'v':
hits[i][1] = [ self.vh_info[ hits[i][1] ] ]
elif hits[i][0] == 'e':
f,v_opp = hits[i][1]
hits[i][1] = [ self.vh_info[ f.vertex( (v_opp+1)%3 ) ], self.vh_info[ f.vertex( (v_opp+2)%3 ) ] ]
elif hits[i][0] == 'f':
f = hits[i][1]
hits[i][1] = [ self.vh_info[ f.vertex(0) ],
self.vh_info[ f.vertex(1) ],
f.vertex(2) ]
# have to go back through, and where successive items are faces, we must
# have crossed cleanly through an edge, and that should be inserted, too
return hits
def check_line_is_clear_new(self,n1=None,n2=None,v1=None,v2=None,p1=None,p2=None):
""" returns a list of vertex tuple for constrained segments that intersect
the given line.
in the case of vertices that are intersected, just a tuple of length 1
(and assumes that all vertices qualify as constrained)
"""
# if points were given, create some temporary vertices
if p1 is not None:
cp1 = Point_2( p1[0], p1[1] )
v1 = self.DT.insert(cp1) ; self.vh_info[v1] = 'tmp'
if p2 is not None:
cp2 = Point_2( p2[0], p2[1] )
v2 = self.DT.insert(cp2) ; self.vh_info[v2] = 'tmp'
crossings = self.line_walk_edges_new(n1=n1,n2=n2,v1=v1,v2=v2)
constrained = []
for crossing_type,crossing in crossings:
if crossing_type == 'f':
continue
if crossing_type == 'v':
constrained.append( (crossing_type,crossing) )
continue
if crossing_type == 'e':
n1,n2 = crossing
if self.verbose > 1:
print("Got potential conflict with edge",n1,n2)
try:
self.find_edge( (n1,n2) )
constrained.append( ('e',(n1,n2)) )
except trigrid.NoSuchEdgeError:
pass
if p1 is not None:
del self.vh_info[v1]
self.DT.remove( v1 )
if p2 is not None:
del self.vh_info[v2]
self.DT.remove( v2 )
return constrained
def check_line_is_clear_batch(self,p1,n2):
"""
When checking multiple nodes against the same point,
may be faster to insert the point just once.
p1: [x,y]
n2: [ node, node, ... ]
Return true if segments from p1 to each node in n2 are
all clear of constrained edges
"""
pnt = Point_2( p1[0], p1[1] )
probe = self.DT.insert(pnt)
self.vh_info[probe] = 'PROBE!'
try:
for nbr in n2:
crossings = self.check_line_is_clear_new( n1=nbr, v2=probe )
if len(crossings) > 0:
return False
finally:
del self.vh_info[probe]
self.DT.remove(probe)
return True
def check_line_is_clear(self,n1=None,n2=None,v1=None,v2=None,p1=None,p2=None):
""" returns a list of vertex tuple for constrained segments that intersect
the given line
"""
# if points were given, create some temporary vertices
if p1 is not None:
cp1 = Point_2( p1[0], p1[1] )
v1 = self.DT.insert(cp1) ; self.vh_info[v1] = 'tmp'
if p2 is not None:
cp2 = Point_2( p2[0], p2[1] )
v2 = self.DT.insert(cp2) ; self.vh_info[v2] = 'tmp'
edges = self.line_walk_edges(n1=n1,n2=n2,v1=v1,v2=v2)
constrained = []
for f,i in edges:
e = (f,i)
if self.DT.is_constrained(e):
vA = f.vertex( (i+1)%3 )
vB = f.vertex( (i+2)%3 )
print("Conflict info: ",self.vh_info[vA],self.vh_info[vB])
constrained.append( (vA,vB) )
if p1 is not None:
del self.vh_info[v1]
self.DT.remove( v1 )
if p2 is not None:
del self.vh_info[v2]
self.DT.remove( v2 )
return constrained
LiveDtGrid=LiveDtCGAL
except ImportError as exc:
log.warning("CGAL unavailable.")
# Seems like the Edge class is something provided by each
# implementation, and is essentially opaque to LiveDtGridBase.
# it just needs to supply a vertices() method which gives
# the handles for the relevant vertices.
class LiveDtPython(LiveDtGridBase):
vh_dtype=object
class Edge(object):
def __init__(self,g,j):
self.g=g
self.j=j
def vertices(self):
return self.g.edges['nodes'][self.j]
def dt_allocate(self):
self.DT=exact_delaunay.Triangulation()
def dt_insert_constraint(self, a, b):
self.DT.add_constraint(self.vh[a], self.vh[b])
def dt_remove_constraints(self, vh):
"""
remove all constraints in which node n participates
"""
# this used to pass the node, but it should be the vertex handle:
for e in self.dt_incident_constraints(vh):
a,b = self.DT.edges['nodes'][e.j]
self.DT.remove_constraint(j=e.j)
def dt_insert(self, n):
""" Given a point that is correctly in self.points, and vh that
is large enough, do the work of inserting the node and updating
the vertex handle.
"""
# pnt = Point_2( self.points[n,0], self.points[n,1] )
xy=[self.points[n,0],self.points[n,1]]
self.vh[n] = self.DT.add_node(x=xy)
self.vh_info[self.vh[n]] = n
if self.verbose > 2:
print(" dt_insert node %d"%n)
self.check()
def dt_remove(self,n):
self.DT.delete_node( self.vh[n] )
del self.vh_info[self.vh[n]]
self.vh[n] = None # had been 0, but that's a valid index
if self.verbose > 2:
print(" dt_remove node %d"%n)
self.check()
def dt_remove_constrained_edge(self,edge):
self.DT.remove_constraint(j=edge.j)
def dt_incident_constraints(self,vh):
return [self.Edge(g=self.DT,j=e)
for e in self.DT.node_to_constraints(vh)]
def dt_cell_node_iter(self):
""" generator for going over finite cells, returning
nodes as triples
"""
for c in self.DT.valid_cell_iter():
yield [self.vh_info[n] for n in self.DT.cells['nodes'][c,:3]]
def delaunay_face(self, pnt):
"""
Returns node indices making up the face of the DT in which pnt lies.
Always returns 3 items, but any number of them could be None.
In the case that pnt is on an edge or vertex adjacent to a cell,
then all three of the cell's nodes are returned, though the specific
choice of cell is arbitrary. Not sure if that's the right behavior
for the current usage of delaunay_face()
"""
face,loc_type,loc_index = self.DT.locate(pnt)
if face != self.DT.INF_CELL:
nodes = [self.vh_info[n] for n in self.DT.cells['nodes'][face]]
elif loc_type == self.DT.IN_VERTEX:
nodes = [self.vh_info[loc_index],None,None]
elif loc_type == self.DT.IN_EDGE:
e_nodes=self.DT.edges['nodes'][loc_index]
nodes = [self.vh_info[e_nodes[0]],
self.vh_info[e_nodes[1]],
None]
else:
return [None,None,None]
return n
def delaunay_neighbors(self, n):
""" returns an ndarray of node ids that the DT connects the given node
to. Includes existing edges.
"""
# some callers assume this is an ndarray
return np.array( [self.vh_info[vh]
for vh in self.DT.node_to_nodes(self.vh[n]) ] )
def plot_dt(self,clip=None):
self.DT.plot_edges(clip=clip,color='m')
def shoot_ray(self,n1,vec,max_dist=1e6):
""" Shoot a ray from self.points[n] in the given direction vec
returns (e_index,pnt), the first edge that it encounters and the location
of the intersection.
max_dist: stop checking beyond this distance -- currently doesn't make it faster
but will return None,None if the point that it finds is too far away
"""
# is it just constrained edges? yes -- just an "edge" in self, but a constrained
# edge in self.DT.
nA=self.vh[n1] # map to DT node
# construct target point
probe=self.DT.nodes['x'][nA] + max_dist*utils.to_unit(vec)
for elt_type,elt_idx in self.DT.gen_intersected_elements(nA=nA,pB=probe):
if elt_type=='node':
if elt_idx==nA:
continue
else:
# means that we went exactly through some node, and
# the caller probably would just want one of the edges of that
# node that is facing nA.
X=self.DT.nodes['x'][elt_idx]
# a | |
# We set the population sizes after the split to nu1 and nu2 and the migration rates to m12 and m21
phinr = dadi.Integration.two_pops(phinr, xx, Ts, nu1, nu2, m12=m12, m21=m21)
## calculate the spectrum.
fsnr = dadi.Spectrum.from_phi(phinr, (n1,n2), (xx,xx))
#### Spectrum of low-recombining regions
# phi for the equilibrium ancestral population
philr = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
philr = dadi.Integration.one_pop(philr, xx, Tp, nu=nuA)
# Now do the divergence event
philr = dadi.PhiManip.phi_1D_to_2D(xx, philr)
# We set the population sizes after the split to hrf*nu1 and hrf*nu2 and the migration rates to m12 and m21
philr = dadi.Integration.two_pops(philr, xx, Ts, nu1*hrf, nu2*hrf, m12=m12, m21=m21)
## calculate the spectrum.
fslr = dadi.Spectrum.from_phi(philr, (n1,n2), (xx,xx))
### Sum the spectra
fs = Q*fslr+(1-Q)*fsnr+P*fsN+(1-P)*fsI
return fs
def IMA2N2mG(params, (n1,n2), pts):
nuA, nu1, nu2, b1, b2, hrf, m12, m21, me12, me21, Tp, Ts, P, Q = params
"""
Model of semi permeability with split, ongoing migration with 2 migration rates, heterogenous effective population size (2 classes, shared by the two populations = background selection)
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
b1: Population growth coefficient of population 1
b2: Population growth coefficient of population 2
hrf: Hill-Robertson factor, i.e. the degree to which Ne is locally reduced due to the effects of background selection and selective sweep effects
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
me12: Effective migration from pop 2 to pop 1 in genomic islands.
me21: Effective migration from pop 1 to pop 2 in genomic islands.
Ts: The scaled time between the split and the ancient migration (in units of 2*Na generations).
P: The proportion of the genome evolving neutrally
Q: The proportion of the genome with a reduced effective size due to selection at linked sites
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
#### Calculate the neutral spectrum
# phi for the equilibrium ancestral population
phiN = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phiN = dadi.Integration.one_pop(phiN, xx, Tp, nu=nuA)
# Now do the divergence event
phiN = dadi.PhiManip.phi_1D_to_2D(xx, phiN)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to m12 and m21
bnu1_func = lambda t: nu1 * b1**(t/Ts)
bnu2_func = lambda t: nu2 * b2**(t/Ts)
phiN = dadi.Integration.two_pops(phiN, xx, Ts, bnu1_func, bnu2_func, m12=m12, m21=m21)
###
## calculate the spectrum.
fsN = dadi.Spectrum.from_phi(phiN, (n1,n2), (xx,xx))
#### Calculate the genomic island spectrum
# phi for the equilibrium ancestral population
phiI = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phiI = dadi.Integration.one_pop(phiI, xx, Tp, nu=nuA)
# Now do the divergence event
phiI = dadi.PhiManip.phi_1D_to_2D(xx, phiI)
# We set the population sizes after the split to nu1 and nu2 and the migration rates to me12 and me21
phiI = dadi.Integration.two_pops(phiI, xx, Ts, bnu1_func, bnu2_func, m12=me12, m21=me21)
###
## calculate the spectrum.
fsI = dadi.Spectrum.from_phi(phiI, (n1,n2), (xx,xx))
#### Calculate the spectrum in normally-recombining regions
# phi for the equilibrium ancestral population
phinr = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phinr = dadi.Integration.one_pop(phinr, xx, Tp, nu=nuA)
# Now do the divergence event
phinr = dadi.PhiManip.phi_1D_to_2D(xx, phinr)
# We set the population sizes after the split to nu1 and nu2 and the migration rates to m12 and m21
phinr = dadi.Integration.two_pops(phinr, xx, Ts, bnu1_func, bnu2_func, m12=m12, m21=m21)
###
## calculate the spectrum.
#fsnr = dadi.Spectrum.from_phi(phinr, (n1,n2), (xx,xx))
fsnr = dadi.Spectrum.from_phi(phinr, (n1,n2), (xx,xx))
#### Spectrum of low-recombining regions
# phi for the equilibrium ancestral population
philr = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
philr = dadi.Integration.one_pop(philr, xx, Tp, nu=nuA)
# Now do the divergence event
philr = dadi.PhiManip.phi_1D_to_2D(xx, philr)
# We set the population sizes after the split to hrf*nu1 and hrf*nu2 and the migration rates to m12 and m21
bnu1hrf_func = lambda t: (nu1 * b1**(t/Ts)) * hrf
bnu2hrf_func = lambda t: (nu2 * b2**(t/Ts)) * hrf
philr = dadi.Integration.two_pops(philr, xx, Ts, bnu1hrf_func, bnu2hrf_func, m12=m12, m21=m21)
## calculate the spectrum.
fslr = dadi.Spectrum.from_phi(philr, (n1,n2), (xx,xx))
### Sum the spectra
fs = Q*fslr+(1-Q)*fsnr+P*fsN+(1-P)*fsI
return fs
def IMA2m(params, (n1,n2), pts):
nuA, nu1, nu2, m12, m21, me12, me21, Tp, Ts, P = params
"""
Model with migration during the divergence with two type of migration.
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
me12: Effective migration from pop 2 to pop 1 in genomic islands.
me21: Effective migration from pop 1 to pop 2 in genomic islands.
Ts: The scaled time between the split and the secondary contact (in units of 2*Na generations).
P: The proportion of the genome evolving neutrally
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
### Calculate the neutral spectrum
# phi for the equilibrium ancestral population
phiN = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phiN = dadi.Integration.one_pop(phiN, xx, Tp, nu=nuA)
# Now do the divergence event
phiN = dadi.PhiManip.phi_1D_to_2D(xx, phiN)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to m12 and m21
phiN = dadi.Integration.two_pops(phiN, xx, Ts, nu1, nu2, m12=m12, m21=m21)
# calculate the spectrum.
fsN = dadi.Spectrum.from_phi(phiN, (n1,n2), (xx,xx))
### Calculate the genomic island spectrum
# phi for the equilibrium ancestral population
phiI = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phiI = dadi.Integration.one_pop(phiI, xx, Tp, nu=nuA)
# Now do the divergence event
phiI = dadi.PhiManip.phi_1D_to_2D(xx, phiI)
# We set the population sizes after the split to nu1 and nu2 and set the migration rates to me12 and me21
phiI = dadi.Integration.two_pops(phiI, xx, Ts, nu1, nu2, m12=me12, m21=me21)
# calculate the spectrum.
# oriented
fsI = dadi.Spectrum.from_phi(phiI, (n1,n2), (xx,xx))
### Sum the two spectra in proportion P (and O)
fs = (P*fsN+(1-P)*fsI)
return fs
def AMA2m(params, (n1,n2), pts):
nuA, nu1, nu2, m12, m21, me12, me21, Tp, Ts, Tam, P = params
"""
Model of semi permeability with split, ancient migration with 2 migration rates
nu1: Size of population 1 after split.
nu2: Size of population 2 after split.
m12: Migration from pop 2 to pop 1 (2*Na*m12).
m21: Migration from pop 1 to pop 2.
me12: Effective migration from pop 2 to pop 1 in genomic islands.
me21: Effective migration from pop 1 to pop 2 in genomic islands.
Ts: The scaled time between the split and the ancient migration (in units of 2*Na generations).
Tam: The scale time between the ancient migration and present.
P: The proportion of the genome evolving neutrally
n1,n2: Size of fs to generate.
pts: Number of points to use in grid for evaluation.
"""
# Define the grid we'll use
xx = dadi.Numerics.default_grid(pts)
### Calculate the neutral spectrum
# phi for the equilibrium ancestral population
phiN = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phiN = dadi.Integration.one_pop(phiN, xx, Tp, nu=nuA)
# Now do the divergence event
phiN = dadi.PhiManip.phi_1D_to_2D(xx, phiN)
# We set the population sizes after the split to nu1 and nu2 and the migration rate to m12 and m21
phiN = dadi.Integration.two_pops(phiN, xx, Ts, nu1, nu2, m12=m12, m21=m21)
# We keep the population sizes after the split to nu1 and nu2 and set the migration rates to zero
phiN = dadi.Integration.two_pops(phiN, xx, Tam, nu1, nu2, m12=0, m21=0)
# calculate the spectrum.
fsN = dadi.Spectrum.from_phi(phiN, (n1,n2), (xx,xx))
### Calculate the genomic island spectrum
# phi for the equilibrium ancestral population
phiI = dadi.PhiManip.phi_1D(xx)
#Now do the population growth event.
phiI = dadi.Integration.one_pop(phiI, xx, Tp, nu=nuA)
# Now do the | |
# Copyright The IETF Trust 2019, All Rights Reserved
# Copyright 2018 Cisco and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is a class of a single module to parse all the more complicated
metadata that we can get out of the module. From this class parse
method is called which will call all the other methods that
will get the rest of the metadata. This is parsed separately to
make sure that metadata that are quickly parsed are already pushed
into the database and these metadata will get there later.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2018 Cisco and its affiliates, Copyright The IETF Trust 2019, All Rights Reserved"
__license__ = "Apache License, Version 2.0"
__email__ = "<EMAIL>"
import io
import json
import os
from collections import defaultdict
from copy import deepcopy
from datetime import datetime
import requests
from pyang import plugin
from pyang.plugins.json_tree import emit_tree as emit_json_tree
from pyang.plugins.tree import emit_tree
from redisConnections.redisConnection import RedisConnection
from utility import log, messageFactory
from utility.confdService import ConfdService
from utility.staticVariables import json_headers
from utility.util import (context_check_update_from, fetch_module_by_schema,
find_first_file)
from utility.yangParser import create_context
class ModulesComplicatedAlgorithms:
def __init__(self, log_directory: str, yangcatalog_api_prefix: str, credentials: list, save_file_dir: str,
direc: str, all_modules, yang_models_dir: str, temp_dir: str, ytree_dir: str):
global LOGGER
LOGGER = log.get_logger('modulesComplicatedAlgorithms', '{}/parseAndPopulate.log'.format(log_directory))
if all_modules is None:
with open('{}/prepare.json'.format(direc), 'r') as f:
self.__all_modules = json.load(f)
else:
self.__all_modules = all_modules
self.__yangcatalog_api_prefix = yangcatalog_api_prefix
self.new_modules = defaultdict(dict)
self.__credentials = credentials
self.__save_file_dir = save_file_dir
self.__path = None
self.__yang_models = yang_models_dir
self.temp_dir = temp_dir
self.ytree_dir = ytree_dir
self.__direc = direc
self.__trees = defaultdict(dict)
self.__unavailable_modules = []
LOGGER.info('get all existing modules')
response = requests.get('{}search/modules'.format(self.__yangcatalog_api_prefix),
headers=json_headers)
existing_modules = response.json().get('module', [])
self.__existing_modules_dict = defaultdict(dict)
self.__latest_revisions = {}
for module in existing_modules:
# Store latest revision of each module - used in resolving tree-type
latest_revision = self.__latest_revisions.get(module['name'])
if latest_revision is None:
self.__latest_revisions[module['name']] = module['revision']
else:
self.__latest_revisions[module['name']] = max(module['revision'], latest_revision)
self.__existing_modules_dict[module['name']][module['revision']] = module
def parse_non_requests(self):
LOGGER.info('parsing tree types')
self.resolve_tree_type(self.__all_modules)
def parse_requests(self):
LOGGER.info('parsing semantic version')
self.parse_semver()
LOGGER.info('parsing dependents')
self.parse_dependents()
def populate(self):
new_modules = [revision for name in self.new_modules.values() for revision in name.values()]
LOGGER.info('populate with module complicated data. amount of new data is {}'
.format(len(new_modules)))
confdService = ConfdService()
confdService.patch_modules(new_modules)
redisConnection = RedisConnection()
redisConnection.populate_modules(new_modules)
if len(new_modules) > 0:
url = '{}load-cache'.format(self.__yangcatalog_api_prefix)
response = requests.post(url, None,
auth=(self.__credentials[0], self.__credentials[1]))
if response.status_code != 201:
LOGGER.warning('Could not send a load-cache request. Status code: {} Message: {}'
.format(response.status_code, response.text))
else:
LOGGER.info('load-cache responded with status code {}'.format(response.status_code))
def resolve_tree_type(self, all_modules):
def is_openconfig(rows, output):
count_config = output.count('+-- config')
count_state = output.count('+-- state')
if count_config != count_state:
return False
row_number = 0
skip = []
for row in rows:
if 'x--' in row or 'o--' in row:
continue
if '' == row.strip(' '):
break
if '+--rw' in row and row_number != 0 \
and row_number not in skip and '[' not in row and \
(len(row.replace('|', '').strip(' ').split(
' ')) != 2 or '(' in row):
if '->' in row and 'config' in row.split('->')[
1] and '+--rw config' not in rows[row_number - 1]:
row_number += 1
continue
if '+--rw config' not in rows[row_number - 1]:
if 'augment' in rows[row_number - 1]:
if not rows[row_number - 1].endswith(':config:'):
return False
else:
return False
length_before = set([len(row.split('+--')[0])])
skip = []
for x in range(row_number, len(rows)):
if 'x--' in rows[x] or 'o--' in rows[x]:
continue
if len(rows[x].split('+--')[0]) not in length_before:
if (len(rows[x].replace('|', '').strip(' ').split(
' ')) != 2 and '[' not in rows[x]) \
or '+--:' in rows[x] or '(' in rows[x]:
length_before.add(len(rows[x].split('+--')[0]))
else:
break
if '+--ro' in rows[x]:
return False
duplicate = \
rows[x].replace('+--rw', '+--ro').split('+--')[1]
if duplicate.replace(' ', '') not in output.replace(' ',
''):
return False
skip.append(x)
if '+--ro' in row and row_number != 0 and row_number not in skip and '[' not in row and \
(len(row.replace('|', '').strip(' ').split(
' ')) != 2 or '(' in row):
if '->' in row and 'state' in row.split('->')[
1] and '+--ro state' not in rows[row_number - 1]:
row_number += 1
continue
if '+--ro state' not in rows[row_number - 1]:
if 'augment' in rows[row_number - 1]:
if not rows[row_number - 1].endswith(':state:'):
return False
else:
return False
length_before = len(row.split('+--')[0])
skip = []
for x in range(row_number, len(rows)):
if 'x--' in rows[x] or 'o--' in rows[x]:
continue
if len(rows[x].split('+--')[0]) < length_before:
break
if '+--rw' in rows[x]:
return False
skip.append(x)
row_number += 1
return True
def is_combined(rows, output):
for row in rows:
if row.endswith('-state') and not ('x--' in row or 'o--' in row):
return False
next_obsolete_or_deprecated = False
for row in rows:
if next_obsolete_or_deprecated:
if 'x--' in row or 'o--' in row:
next_obsolete_or_deprecated = False
else:
return False
if 'x--' in row or 'o--' in row:
continue
if '+--rw config' == row.replace('|', '').strip(
' ') or '+--ro state' == row.replace('|', '').strip(
' '):
return False
if len(row.split('+--')[0]) == 4:
if '-state' in row and '+--ro' in row:
return False
if 'augment' in row and len(row.split('augment')[0]) == 2:
part = row.strip(' ').split('/')[1]
if '-state' in part:
next_obsolete_or_deprecated = True
part = row.strip(' ').split('/')[-1]
if ':state:' in part or '/state:' in part \
or ':config:' in part or '/config:' in part:
next_obsolete_or_deprecated = True
return True
def is_transitional(rows, output):
if output.split('\n')[1].endswith('-state') and output.split('\n')[0].endswith('-state'):
if '+--rw' in output:
return False
if output.startswith('\n'):
name_of_module = output.split('\n')[1].split(': ')[1]
else:
name_of_module = output.split('\n')[0].split(': ')[1]
name_of_module = name_of_module.split('-state')[0]
coresponding_nmda_file = self.__find_file(name_of_module)
if coresponding_nmda_file:
name = coresponding_nmda_file.split('/')[-1].split('.')[0]
revision = name.split('@')[-1]
name = name.split('@')[0]
if '{}@{}'.format(name, revision) in self.__trees:
stdout = self.__trees['{}@{}'.format(name, revision)]
pyang_list_of_rows = stdout.split('\n')[2:]
else:
plugin.plugins = []
plugin.init([])
ctx = create_context('{}:{}'.format(os.path.abspath(self.__yang_models), self.__save_file_dir))
ctx.opts.lint_namespace_prefixes = []
ctx.opts.lint_modulename_prefixes = []
for p in plugin.plugins:
p.setup_ctx(ctx)
with open(coresponding_nmda_file, 'r') as f:
a = ctx.add_module(coresponding_nmda_file, f.read())
if ctx.opts.tree_path is not None:
path = ctx.opts.tree_path.split('/')
if path[0] == '':
path = path[1:]
else:
path = None
ctx.validate()
try:
f = io.StringIO()
emit_tree(ctx, [a], f, ctx.opts.tree_depth,
ctx.opts.tree_line_length, path)
stdout = f.getvalue()
except:
stdout = ''
pyang_list_of_rows = stdout.split('\n')[2:]
if len(ctx.errors) != 0 and len(stdout) == 0:
return False
if stdout == '':
return False
for x in range(0, len(rows)):
if 'x--' in rows[x] or 'o--' in rows[x]:
continue
if rows[x].strip(' ') == '':
break
if len(rows[x].split('+--')[0]) == 4:
if '-state' in rows[x]:
return False
if len(rows[x].split('augment')[0]) == 2:
part = rows[x].strip(' ').split('/')[1]
if '-state' in part:
return False
if '+--ro ' in rows[x]:
leaf = \
rows[x].split('+--ro ')[1].split(' ')[0].split(
'?')[0]
for y in range(0, len(pyang_list_of_rows)):
if leaf in pyang_list_of_rows[y]:
break
else:
return False
return True
else:
return False
else:
return False
def is_split(rows, output):
failed = False
row_num = 0
if output.split('\n')[1].endswith('-state'):
return False
for row in rows:
if 'x--' in row or 'o--' in row:
continue
if '+--rw config' == row.replace('|', '').strip(
' ') or '+--ro state' == row.replace('|', '') \
.strip(' '):
return False
if 'augment' in row:
part = row.strip(' ').split('/')[-1]
if ':state:' in part or '/state:' in part or ':config:' in part or '/config:' in part:
return False
for row in rows:
if 'x--' in row or 'o--' in row:
continue
if row == '':
break
if (len(row.split('+--')[0]) == 4 and 'augment' not in rows[
row_num - 1]) or len(row.split('augment')[0]) == 2:
if '-state' in row:
if 'augment' in row:
part = row.strip(' ').split('/')[1]
if '-state' not in part:
row_num += 1
continue
for x in range(row_num + 1, len(rows)):
if 'x--' in rows[x] or 'o--' in rows[x]:
continue
if rows[x].strip(' ') == '' \
or (len(rows[x].split('+--')[
0]) == 4 and 'augment' not in
rows[row_num - 1]) \
or len(row.split('augment')[0]) == 2:
break
if '+--rw' in rows[x]:
failed = True
break
row_num += 1
if failed:
return False
else:
return True
x = 0
for module | |
width=100, disabled=True
)
self.port_status = bokeh.models.Div(
text="<p><b>port status:</b> disconnected</p>", width=200
)
self.time_column = bokeh.models.Select(
title="time column",
value=timecolumn,
options=[str(tc) for tc in allowed_time_columns],
width=100,
)
self.time_units = bokeh.models.Select(
title="time units",
value=timeunits,
options=list(allowed_timeunits),
width=100,
)
self.input_window = bokeh.models.TextAreaInput(
title="input", value="", width=150
)
self.input_send = bokeh.models.Button(
label="send", button_type="primary", width=50, disabled=True
)
self.ascii_bytes = bokeh.models.RadioGroup(
labels=["ascii", "bytes"], active=(0 if inputtype == "ascii" else 1)
)
self.shutdown = bokeh.models.Button(
label="shut down dashboard", button_type="danger", width=310
)
self.confirm_shutdown = bokeh.models.Button(
label="confirm shutdown",
button_type="danger",
width=150,
visible=False,
disabled=True,
)
self.cancel_shutdown = bokeh.models.Button(
label="cancel shutdown",
button_type="primary",
width=150,
visible=False,
disabled=True,
)
class SerialPlotter(object):
def __init__(
self,
max_cols=max_max_cols,
delimiter="comma",
columnlabels="",
timecolumn="none",
timeunits="ms",
rollover=400,
glyph="lines",
):
"""Create a serial plotter."""
self.prev_data_length = 0
self.data = []
self.time_column = "none" if timecolumn == "none" else int(timecolumn)
self.time_units = timeunits
self.max_cols = max_cols
self.streaming = False
self.sources = []
self.delimiter = parsers._delimiter_convert(delimiter)
self.col_labels = parsers._column_labels_str_to_list(
columnlabels, self.delimiter, self.max_cols
)
self.lines = None
self.dots = None
self.lines_visible = glyph in ("lines", "both")
self.dots_visible = glyph in ("dots", "both")
self.rollover = rollover
self.plot, self.legend, self.phantom_source = self.base_plot()
def base_plot(self):
"""Build a plot of voltage vs time data"""
# Set up plot area
p = bokeh.plotting.figure(
frame_width=600,
frame_height=175,
x_axis_label=parsers._xaxis_label(self.time_column, self.time_units),
y_axis_label=" ",
toolbar_location="above",
title="serial plotter",
)
# No range padding on x: signal spans whole plot
p.x_range.range_padding = 0
# We'll sue whitesmoke backgrounds
p.border_fill_color = "whitesmoke"
# Put a phantom circle so axis labels show before data arrive
phantom_source = bokeh.models.ColumnDataSource(
data=dict(phantom_t=[0], phantom_y=[0])
)
p.circle(source=phantom_source, x="phantom_t", y="phantom_y", visible=False)
# Make an empty legend
legend = bokeh.models.Legend(
items=[],
location="center",
label_text_font_size="8pt",
spacing=1,
label_height=15,
glyph_height=15,
click_policy="hide",
)
p.add_layout(legend, "right")
return p, legend, phantom_source
class SerialMonitor(object):
def __init__(self, scroll_snap=True):
"""Create a serial monitor.
Parameters
----------
scroll_snap : bool, default True
If True, use CSS scroll-snap in serial monitor. This should
only be set to False for an old browser that does not have
CSS scroll-snap.
"""
# Use CSS scroll-snap to enable scrolling with default at bottom
self.base_text = """<style>
.monitorHeader {
background-color: whitesmoke;
height: 20px;
width: 630px;
}
.monitorData {
border-style: solid;
border-width: 10px;
border-color: whitesmoke;
background-color: white;
width: 630px;
height: 200px;
overflow-y: scroll;
overscroll-behavior-y: contain;
scroll-snap-type: y proximity;
}
.monitorData > div:last-child {
scroll-snap-align: end;
}
.monitorTitle {
margin-left: 50px;
margin-bottom: 0px;
}
</style>
<div class="monitorHeader">
<p class="monitorTitle">
<b>serial monitor</b>
</p>
</div>
<div class="monitorData"><div><pre></pre></div></div>"""
# As an alternative, can use text below. This is a hacky way to do
# it with some rotations. The scroll bar will be on the left, and
# mouse scroll wheel directions will be reversed from their usual.
# This method may be useful for older browsers that do not have
# CSS scroll-snap.
self.alternative_base_text = """<style>
.monitorHeader {
background-color: whitesmoke;
height: 20px;
width: 630px;
}
.monitorData {
border-style: solid;
border-width: 10px;
border-color: whitesmoke;
background-color: white;
width: 630px;
height: 200px;
overflow: auto;
transform: rotate(180deg);
}
.monitorInner {
overflow: hidden;
transform: rotate(180deg);
}
.monitorTitle {
margin-left: 50px;
margin-bottom: 0px;
}
</style>
<div class="monitorHeader">
<p class="monitorTitle">
<b>serial monitor</b>
</p>
</div>
<div class="monitorData"><div class="monitorInner"><pre></pre></div></div>"""
self.monitor = bokeh.models.Div(
text=self.base_text if scroll_snap else self.alternative_base_text,
background="whitesmoke",
height=250,
width=650,
sizing_mode="fixed",
)
self.prev_data_length = 0
self.data = []
self.streaming = False
def _layout(plotter, monitor, controls):
"""Build layout of serial dashboard.
Parameters
----------
plotter : serial_dashboard.SerialPlotter instance
Instance of plot and related data structures.
monitor : serial_dashboard.SerialMonitor instance
Instance of monitor and related data structures.
controls : serial_dashboard.Controls instance
Instance of widget controls.
Returns
-------
output : bokeh.models.layouts.Row instance
Layout of the dashboard.
"""
plotter_buttons = bokeh.layouts.column(
bokeh.models.Spacer(height=20),
controls.plot_stream,
bokeh.models.Spacer(height=20),
controls.plot_clear,
bokeh.models.Spacer(height=20),
controls.plot_save,
bokeh.layouts.row(
controls.plot_file_input,
bokeh.layouts.column(bokeh.models.Spacer(height=20), controls.plot_write),
),
controls.plot_save_notice,
)
plotter_layout = bokeh.layouts.row(
plotter_buttons,
plotter.plot,
bokeh.layouts.column(bokeh.models.Spacer(height=85), controls.glyph),
margin=(30, 0, 0, 0),
background="whitesmoke",
)
input_layout = bokeh.layouts.row(
bokeh.models.Spacer(width=10),
controls.input_window,
bokeh.models.Spacer(width=20),
bokeh.layouts.column(bokeh.models.Spacer(height=20), controls.input_send),
bokeh.models.Spacer(width=20),
bokeh.layouts.column(bokeh.models.Spacer(height=17), controls.ascii_bytes),
background="whitesmoke",
width=350,
)
shutdown_layout = bokeh.layouts.row(
bokeh.layouts.column(bokeh.models.Spacer(height=10), controls.shutdown),
bokeh.layouts.column(bokeh.models.Spacer(height=10), controls.cancel_shutdown),
bokeh.layouts.column(bokeh.models.Spacer(height=10), controls.confirm_shutdown),
)
port_controls = bokeh.layouts.column(
controls.port,
controls.baudrate,
bokeh.models.Spacer(height=10),
controls.port_connect,
controls.port_disconnect,
controls.port_status,
background="whitesmoke",
)
specs = bokeh.layouts.column(
controls.max_cols,
bokeh.models.Spacer(height=10),
controls.delimiter,
bokeh.models.Spacer(height=10),
controls.col_labels,
bokeh.models.Spacer(height=10),
controls.time_column,
bokeh.models.Spacer(height=10),
controls.time_units,
bokeh.models.Spacer(height=10),
controls.rollover,
background="whitesmoke",
)
monitor_buttons = bokeh.layouts.column(
bokeh.models.Spacer(height=20),
controls.monitor_stream,
bokeh.models.Spacer(height=20),
controls.monitor_clear,
bokeh.models.Spacer(height=20),
controls.monitor_save,
bokeh.layouts.row(
controls.monitor_file_input,
bokeh.layouts.column(
bokeh.models.Spacer(height=20), controls.monitor_write
),
),
controls.monitor_save_notice,
)
monitor_layout = bokeh.layouts.row(
monitor_buttons,
bokeh.models.Spacer(width=15),
monitor.monitor,
bokeh.models.Spacer(width=10),
margin=(30, 0, 30, 0),
background="whitesmoke",
)
return bokeh.layouts.row(
bokeh.layouts.column(port_controls, bokeh.models.Spacer(height=30), specs),
bokeh.models.Spacer(width=20),
bokeh.layouts.column(
bokeh.layouts.row(
input_layout, bokeh.models.Spacer(width=100), shutdown_layout,
),
plotter_layout,
monitor_layout,
),
)
def app(
baudrate=115200,
maxcols=10,
delimiter="comma",
columnlabels="",
timecolumn=None,
timeunits="ms",
rollover=400,
glyph="lines",
inputtype="ascii",
fileprefix="_tmp",
daqdelay=20,
streamdelay=90,
portsearchdelay=1000,
):
"""Returns a function that can be used as a Bokeh app.
The app can be launched using `bokeh serve --show launchscript.py`,
from the command line where the contents of `launchscript.py` are:
.. code-block:: python
import bokeh.plotting
import serial_dashboard
app = serial_dashboard.app()
app(bokeh.plotting.curdoc())
To launch the app programmatically with Python, do the following:
This function should only be used if you need to programmatically
access the app builder, for example for using the dashboard within
a Jupyter notebook. To launch a dashboard in its own browser window,
use `launch()` instead.
Alternatively, if you want to launch in its own browser window
programmatically, you can do the following.
.. code-block:: python
from bokeh.server.server import Server
from bokeh.application import Application
from bokeh.application.handlers.function import FunctionHandler
import serial_dashboard
app = serial_dashboard.app()
app_dict = {'/serial-dashboard': Application(FunctionHandler(app))}
server = Server(app_dict, port=5006)
server.show('/serial-dashboard')
server.run_until_shutdown()
Parameters
----------
port : int, default 5006
Port at localhost for serving dashboard.
browser : str, default None
Browser to use for dashboard. If None, uses OS default.
baudrate : int, default 115200
Baud rate of serial connection. Allowed values are 300, 1200,
2400, 4800, 9600, 19200, 38400, 57600, 74880, 115200, 230400,
250000, 500000, 1000000, 2000000.
maxcols : int, default 10
Maximum number of columns of data coming off of the board.
delimiter : str, default "comma"
Delimiter of data coming off of the board. Allowed values are
"comma", "space", "tab", "whitespace", "vertical line",
"semicolon", "asterisk", "slash"
columnlabels : str, default ""
Labels for columnbs using the delimiter specified with
`delimiter` keyword argument.
timecolumn : int, default None
Column (zero-indexed) of incoming data that specifies time
timeunits : str, default "ms"
Units of incoming time data. Allowed values are "none", "µs",
"ms", "s", "min", "hr".
rollover : int, default 400
Number of data points to be shown on a plot for each column.
Allowed values are 100, 200, 400, 800, 1600, 3200.
glyph : str, default "lines"
Which glyphs to display in the plotter. Allowed values are
"lines", "dots", "both".
inputtype : str, default "ascii"
Whether input sent to the board is ASCII or bytes. Allowed
values are "ascii", "bytes".
fileprefix : str, default "_tmp"
Prefix for output files
daqdelay : float, default 20.0
Roughly the delay in data acquisition from the board in
milliseconds. The true delay is a bit above 80% of this value.
streamdelay : int, default 90
Delay between updates of the plotter and monitor in
milliseconds.
portsearchdelay : int, default 1000
Delay between checks of connected serial devices in
milliseconds.
"""
# Time column is expected to be a string or an integer
if timecolumn is None:
timecolumn = "none"
# We can be a bit flexible on delimiters
delimiter_conversion = {
",": "comma",
" ": "space",
"\t": "tab",
"\s": "whitespace",
"|": "vertical line",
";": "semicolon",
"*": "asterisk",
"/": "slash",
}
if delimiter in delimiter_conversion:
delimiter = delimiter_conversion[delimiter]
# Check inputs
_check_baudrate(baudrate),
_check_maxcols(maxcols),
_check_delimiter(delimiter),
_check_timecolumn(timecolumn, maxcols),
_check_timeunits(timeunits),
_check_rollover(rollover),
_check_glyph(glyph),
_check_inputtype(inputtype),
def _app(doc):
# "Global" variables
serial_connection = SerialConnection(
baudrate=baudrate, daq_delay=daqdelay, port_search_delay=portsearchdelay
)
controls = Controls(
baudrate=baudrate,
max_cols=maxcols,
delimiter=delimiter,
columnlabels=columnlabels,
timecolumn=timecolumn,
timeunits=timeunits,
rollover=rollover,
glyph=glyph,
inputtype=inputtype,
fileprefix=fileprefix,
)
plotter = SerialPlotter(
max_cols=maxcols,
delimiter=delimiter,
columnlabels=columnlabels,
timecolumn=timecolumn,
timeunits=timeunits,
rollover=rollover,
glyph=glyph,
)
monitor = SerialMonitor()
app_layout = _layout(plotter, monitor, controls)
# Start port sniffer
serial_connection.port_search_task = asyncio.create_task(
comms.port_search(serial_connection)
)
# Define and link on_click callbacks
def _port_connect_callback(event=None):
callbacks.port_connect_callback(
plotter, monitor, controls, serial_connection
)
controls.port_connect.on_click(_port_connect_callback)
def _port_disconnect_callback(event=None):
callbacks.port_disconnect_callback(
plotter, monitor, controls, serial_connection
)
controls.port_disconnect.on_click(_port_disconnect_callback)
def _input_send_callback(event=None):
callbacks.input_send_callback(plotter, monitor, controls, serial_connection)
controls.input_send.on_click(_input_send_callback)
def _monitor_stream_callback(event=None):
callbacks.monitor_stream_callback(
plotter, monitor, controls, serial_connection
)
controls.monitor_stream.on_click(_monitor_stream_callback)
def _monitor_clear_callback(event=None):
callbacks.monitor_clear_callback(
plotter, monitor, controls, serial_connection
)
controls.monitor_clear.on_click(_monitor_clear_callback)
def _monitor_save_callback(event=None):
callbacks.monitor_save_callback(
plotter, monitor, controls, serial_connection
)
controls.monitor_save.on_click(_monitor_save_callback)
def _monitor_write_callback(event=None):
callbacks.monitor_write_callback(
plotter, monitor, controls, serial_connection
)
controls.monitor_write.on_click(_monitor_write_callback)
def _plot_stream_callback(event=None):
| |
# pylint: skip-file
"""
UBC Eye Movement Data Analysis Toolkit
Generic Participant Class
Created on 2011-09-25
@author: skardan
"""
import string
from skgaze.EMDAT.data_structures import *
import skgaze.EMDAT.params as params
from skgaze.EMDAT.Scene import Scene
import skgaze.EMDAT.Recording as Recording
class Participant():
"""
A class that holds the information for one Participant in the experiment
"""
def __init__(self, pid, eventfile, datafile, fixfile, segfile, log_time_offset = None, aoifile = None, prune_length= None,
require_valid_segs = True, auto_partition_low_quality_segments = False):
"""Inits BasicParticipant class
Args:
pid: Participant id
eventfile: a string containing the name of the "Event-Data.tsv" file for this participant
datafile: a string containing the name of the "all-Data.tsv" file for this participant
fixfile: a string containing the name of the "Fixation-Data.tsv" file for this participant
segfile: a string containing the name of the '.seg' file for this participant
log_time_offset: If not None, an integer indicating the time offset between the
external log file and eye tracking logs
aoifile: If not None, a string conatining the name of the '.aoi' file
with definitions of the "AOI"s.
prune_length: If not None, an integer that specifies the time
interval (in ms) from the begining of each Segment in which
samples are considered in calculations. This can be used if,
for example, you only wish to consider data in the first
1000 ms of each Segment. In this case (prune_length = 1000),
all data beyond the first 1000ms of the start of the "Segment"s
will be disregarded.
require_valid_segs: a boolean determining whether invalid "Segment"s
will be ignored when calculating the features or not. default = True
auto_partition_low_quality_segments: a boolean indicating whether EMDAT should
split the "Segment"s which have low sample quality, into two new
sub "Segment"s discarding the largest gap of invalid samples.
Yields:
a Participant object
"""
self.pid = pid
self.require_valid_segments = require_valid_segs
def export_features(self, featurelist=None, aoifeaturelist=None, aoifeaturelabels = None,
id_prefix = False, require_valid = True):
"""Returns feature names and their values for this Participant
Args:
featurelist: if not None, a list of strings containing the name of the features to be
returned
aoifeaturelist: if not None, a list of features to be returned for each of the "AOI"s.
aoifeaturelabels: if not None, a list of AOI related features to be returned.
*Note: while aoifeaturelist is a subset of features that will be returned for all
"AOI"s, aoifeaturelabels contains the exact AOI feature name, i.e., a feature
of the form: [AOI name]_[feature name]
For example for an AOI called 'graph', aoifeaturelabels may contain 'graph_fixationrate'
id_prefix: a boolean determining if the method should also export the participant id
require_valid: a boolean determining if only valid segments should be used when
calculating the features. default = True
Returns:
featnames: a list of feature names sorted alphabetically
featvals: a corresponding list of feature values
e.g.
featnames = ['fixationrate', 'length', 'meanabspathangles']
featvals = [0.00268522882294', '1529851', '1.60354714212']
"""
data = []
featnames = []
if id_prefix:
featnames.append('Part_id')
featnames.append('Sc_id')
first = True
for sc in self.scenes:
if not sc.is_valid and require_valid:
print("User %s:Scene %s dropped because of 'require_valid'" %(self.id,sc.scid))
continue
sc_feats = []
if id_prefix:
sc_feats.append(self.id)
sc_feats.append(sc.scid)
fnames, fvals = sc.get_features(featurelist = featurelist,
aoifeaturelist = aoifeaturelist,
aoifeaturelabels = aoifeaturelabels)
if first: featnames += fnames
sc_feats += fvals
first = False
data.append(sc_feats)
return featnames, data
def export_features_tsv(self, featurelist=None, aoifeaturelist=None, id_prefix = False,
require_valid = True):
"""Returns feature names and their values for this Participant in a tab separated format
Args:
featurelist: if not None, a list of strings containing the name of the features to be
returned
aoifeaturelist: if not None, a list of features to be returned for each of the "AOI"s.
id_prefix: a boolean determining if the method should also export the participant id.
require_valid: a boolean determining if only valid segments should be used when
calculating the features. default = True
Returns:
A two-line string with the first line having the feature names sorted alphabetically
and separated by a tab '/t', and the second line containing the corresponding values
separated by a tab '/t'
For example:
fixationrate length meanabspathangles
0.00268522882294 1529851 1.60354714212
"""
featnames, data = self.export_features(featurelist, aoifeaturelist = aoifeaturelist,
id_prefix = id_prefix, require_valid = require_valid)
ret = ''
for t in featnames:
ret += t + '\t'
ret += '\n'
for t in data:
for item in t:
ret += str(item) + '\t'
ret += '\n'
return ret
def print_(self):
"""Outputs all feature names and their values for this Participant to the console
"""
def format_list(list,leng=None):
"""
"""
out=''
if leng == None:
maxlen=0
for j in list:
st = repr(j)
if len(st) > maxlen:
maxlen = len(st)
for j in list:
out+= repr(j).rjust(maxlen+1)
return out,maxlen+1
else:
for j in list:
st = repr(j)
out+= st.rjust(leng)
return out,leng
print("PID:",self.id)
for seg in self.segments:
featnames = []
if not seg.is_valid:
continue
seg_feats = []
featnames.append('seg_id')
seg_feats.append(seg.segid)
fnames, fvals = seg.get_features()
featnames += fnames
seg_feats += fvals
o,l= format_list(featnames)
print(o)
print(format_list(seg_feats,l))
for sc in self.scenes:
featnames = []
if not sc.is_valid:
continue
sc_feats = []
featnames.append('sc_id')
sc_feats.append(sc.scid)
fnames, fvals = sc.get_features()
featnames += fnames
sc_feats += fvals
o,l= format_list(featnames)
print(o)
print(format_list(sc_feats,l))
def read_participants(segsdir, datadir, prune_length = None, aoifile = None):
"""Placeholder for a method that generates Participant objects for each participant
in the experiment
"""
participants = []
raise Exception("You should override the default Participant.read_participants method!")
return participants
def export_features_all(participants, featurelist = None, aoifeaturelist = None, aoifeaturelabels=None,
id_prefix = False, require_valid = True):
"""Returns feature names and their values for a list of "Participant"s
Args:
participants: a list of "Participant"s
featurelist: if not None, a list of strings containing the name of the features to be
returned
aoifeaturelist: if not None, a list of features to be returned for each of the "AOI"s.
aoifeaturelabels: if not None, a list of AOI related features to be returned.
*Note: while aoifeaturelist is a subset of features that will be returned for all
"AOI"s, aoifeaturelabels contains the exact AOI feature name, i.e., a feature
of the form: [AOI name]_[feature name]
For example for an AOI called 'graph', aoifeaturelabels may contain 'graph_fixationrate'
id_prefix: a boolean determining if the method should also export the participant id
require_valid: a boolean determining if only valid segments should be used when
calculating the features. default = True
Returns:
featnames: a list of feature names sorted alphabetically
featvals: a corrsponding list of feature values
e.g.
featnames = ['fixationrate', 'length', 'meanabspathangles']
featvals = [0.00268522882294', '1529851', '1.60354714212']
"""
data = []
featnames = []
if participants:
for p in participants:
if not(p.is_valid()):
print("user",p.id,"was not valid")
continue
fnames, fvals = p.export_features(featurelist=featurelist, aoifeaturelist=aoifeaturelist,
aoifeaturelabels = aoifeaturelabels,
id_prefix=id_prefix, require_valid = require_valid)
featnames = fnames
data += fvals
else:
raise NameError('No participants were passed to the function')
return featnames, data
def write_features_tsv(participants, outfile, featurelist = None, aoifeaturelist = None,
aoifeaturelabels=None, id_prefix = False):
"""Returns feature names and their values for a list of "Participant"s in a tsv-format file
This method writes to a multi-line tab separated values (tsv) file with the first
line having the feature names sorted alphabetically and separated by a tab '/t',
and the rest of the lines containing the corresponding values for each participant
separated by a tab '/t'
For example:
fixationrate length meanabspathangles
0.0026852294 1529851 1.60354714212
0.00456324344 453455 1.74324423
Args:
participants: a list of "Participant"s
outfile: a string containing the name of the output file
featurelist: if not None, a list of strings containing the name of the features to be
returned
aoifeaturelist: if not None, a list of features to be returned for each of the "AOI"s.
id_prefix: a boolean determining if the method should | |
"""
Global settings - these are loaded then overwritten in hostname specific files
See __init__.py in this dir for details
"""
import os
import socket
from collections import defaultdict
from library.django_utils.django_secret_key import get_or_create_django_secret_key
from library.git import Git
# if certain user settings are not relevant for the environment, list the columns in this
from variantgrid.settings.components.secret_settings import get_secret, get_secrets
CSRF_FAILURE_VIEW = 'variantgrid.views.csrf_error'
# used by
# python3 manage.py collectstatic_js_reverse
# after you need to refer to a JavaScript file, please checkin the resulting reverse.js
JS_REVERSE_OUTPUT_PATH = './variantgrid/static_files/default_static/django_js_reverse'
# Up 2 more dirs than normal (as we're in variantgrid.settings.components dir)
_THIS_DIR = os.path.dirname(os.path.abspath(__file__))
SETTINGS_DIR = os.path.dirname(_THIS_DIR)
BASE_DIR = os.path.dirname(os.path.dirname(SETTINGS_DIR))
PRIVATE_DATA_ROOT = os.path.join(BASE_DIR, "data")
UPLOAD_RELATIVE_PATH = "data/uploads" # Needed for FileSystemStorage
UPLOAD_DIR = os.path.join(BASE_DIR, UPLOAD_RELATIVE_PATH)
UPLOAD_ENABLED = True # This disables uploading files or creating variants (eg if out of disk)
# Absolute filesystem path to the directory that will hold GLOBALLY VISIBLE user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'media_root')
MEDIA_URL = '/media/'
PYTHON_COMMAND = "python3"
MANAGE_COMMAND = [PYTHON_COMMAND, os.path.join(BASE_DIR, "manage.py")]
# Need 5x as many as largest cohort for CohortNode zygosity query
DATA_UPLOAD_MAX_NUMBER_FIELDS = 5000
# Nightly task to fix missing GRCh37/38 representations
ALLELE_VALIDATION = False
# if None, discordance emails wont be sent
DISCORDANCE_EMAIL = None
ACCOUNTS_EMAIL = None
# If you change this value you should run 'recalc' for all ClinicalContexts in admin
DISCORDANCE_ENABLED = False
# How long you have to update flags after a discordance is closed for them to still
# be considered in the report
DISCORDANCE_REPORT_LEEWAY = 14
# If doing large amount of variant re-matching, don't want to fire off discordance reports
DISCORDANCE_PAUSE_TEMP_VARIANT_MATCHING = False
DEBUG = True
# If SEND_EMAILS is False, all emails that would go through EmailLog will still be record but wont be sent
# Good for test environments where you don't want to spam test accounts
SEND_EMAILS = False
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["localhost"]
ADMINS = (
# ('<NAME>', '<EMAIL>'),
)
AVATAR_PROVIDERS = (
'avatar.providers.PrimaryAvatarProvider',
'library.django_utils.avatar.SpaceThemedAvatarProvider',
)
MANAGERS = ADMINS
BACKEND_ENGINE = "postgres"
CONN_MAX_AGE = 60 # Reuse DB connections
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': get_secret('DB.name'),
'USER': get_secret('DB.user'),
'PASSWORD': get_secret('DB.password'),
'HOST': get_secret('DB.host'),
# Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': get_secret('DB.port'), # Set to empty string for default.
}
}
CACHE_HOURS = 48
TIMEOUT = 60 * 60 * CACHE_HOURS
REDIS_PORT = 6379
CACHE_VERSION = 31 # increment to flush caches (eg if invalid due to upgrade)
CACHES = {
'default': {
"BACKEND": "redis_cache.RedisCache",
"LOCATION": "redis://127.0.0.1:%d/1" % REDIS_PORT,
'TIMEOUT': TIMEOUT,
'VERSION': CACHE_VERSION,
},
'debug-panel': {
"BACKEND": "redis_cache.RedisCache",
"LOCATION": "redis://127.0.0.1:%d/1" % REDIS_PORT,
'TIMEOUT': TIMEOUT,
'OPTIONS': {
'MAX_ENTRIES': 200
}
},
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
TIME_ZONE = 'Australia/Adelaide'
DATE_FORMAT = "%-d/%-m/%y"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# If you set this to True (and setup everything else required) the app
# will do auth through OIDC
USE_OIDC = False
MAINTENANCE_MODE = False # If true, only non-bot admin users will be able to login, currently only works for ODIC
OIDC_REQUIRED_GROUP = None
OIDC_USER_SERVICES = None
VARIANTGRID_APP_DIR = os.path.join(BASE_DIR, "variantgrid")
### annotation
VG_REFERENCE_DIR = os.path.join(VARIANTGRID_APP_DIR, "data", "reference")
ANNOTATION_BASE_DIR = "/data/annotation"
ANNOTATION_VEP_FAKE_VERSION = False # Overridden in unit tests to not call VEP to get version
ANNOTATION_VEP_PERLBREW_RUNNER_SCRIPT = None # os.path.join(BASE_DIR, "scripts", "perlbrew_runner.sh")
# I've had VEP hang on me when running --fork so by default we run in small batches
# This causes a small amount of overhead obtaining an AnnotationRangeLock
# If you get ERROR: Forked process(es) died: read-through of cross-process communication detected
# You may want to set buffer_size in ANNOTATION_VEP_ARGS below
# @see https://github.com/Ensembl/ensembl-vep/issues/150
ANNOTATION_VEP_FORK = 1
# get_unannotated_count_min_max does quick queries to try and get VEP batch sizes within a range
# If it gets below min, it does a slower query to get range lock.
# The variant table is usually ~55% alt variants but may be different due to data or if you've deleted records
ANNOTATION_VEP_BATCH_MIN = 5000 # Dont' set too low due to overhead of running pipeline etc
ANNOTATION_VEP_BATCH_MAX = 100000 # Set to None to do all in 1 job (probably want to set FORK higher)
ANNOTATION_VEP_ARGS = [] # ["--buffer_size", "1000"] # default = 5000
ANNOTATION_VEP_BASE_DIR = os.path.join(ANNOTATION_BASE_DIR, "VEP")
ANNOTATION_VEP_CODE_DIR = os.path.join(ANNOTATION_VEP_BASE_DIR, "ensembl-vep")
ANNOTATION_VEP_CACHE_DIR = os.path.join(ANNOTATION_VEP_BASE_DIR, "vep_cache")
# @see https://asia.ensembl.org/info/docs/tools/vep/script/vep_options.html#opt_pick_order
ANNOTATION_VEP_PICK_ORDER = None
ANNOTATION_VEP_DISTANCE = None # VEP --distance arg (default=5000) - how far up/down to assign to a transcript
_ANNOTATION_FASTA_BASE_DIR = os.path.join(ANNOTATION_BASE_DIR, "fasta")
BUILD_GRCH37 = "GRCh37"
BUILD_GRCH38 = "GRCh38"
ANNOTATION = {
BUILD_GRCH37: {
"enabled": True,
"annotation_consortium": "Ensembl",
"reference_fasta": os.path.join(_ANNOTATION_FASTA_BASE_DIR, "GCF_000001405.25_GRCh37.p13_genomic.fna.gz"),
"reference_fasta_has_chr": False,
"cytoband": os.path.join(VG_REFERENCE_DIR, "hg19", "cytoband.hg19.txt.gz"),
# VEP paths are relative to ANNOTATION_VEP_BASE_DIR - worked out at runtime
# so you can change just that variable and have everything else work
# The names correspond to VEPPlugin or VEPCustom entries (but lower case)
"vep_config": {
"cosmic": "annotation_data/GRCh37/CosmicCodingMuts.normal.grch37.vcf.gz",
"dbnsfp": "annotation_data/GRCh37/dbNSFP4.0a.grch37.stripped.gz",
"dbscsnv": "annotation_data/GRCh37/dbscSNV1.1_GRCh37.txt.gz",
"gnomad2": "annotation_data/GRCh37/gnomad2.1.1_GRCh37_combined_af.vcf.bgz",
"mastermind": "annotation_data/GRCh37/mastermind_cited_variants_reference-2021.04.02-grch37.vcf.gz",
"maxentscan": "annotation_data/all_builds/maxentscan",
'phastcons100way': "annotation_data/GRCh37/hg19.100way.phastCons.bw",
'phastcons30way': None, # n/a for GRCh37
'phastcons46way': "annotation_data/GRCh37/hg19.phastCons46way.placental.bw",
'phylop100way': "annotation_data/GRCh37/hg19.100way.phyloP100way.bw",
'phylop30way': None, # n/a for GRCh37
'phylop46way': "annotation_data/GRCh37/hg19.phyloP46way.placental.bw",
"repeatmasker": "annotation_data/GRCh37/repeatmasker_hg19.bed.gz",
"spliceai_snv": "annotation_data/GRCh37/spliceai_scores.raw.snv.hg19.vcf.gz",
"spliceai_indel": "annotation_data/GRCh37/spliceai_scores.raw.indel.hg19.vcf.gz",
"topmed": "annotation_data/GRCh37/TOPMED_GRCh37.vcf.gz",
"uk10k": "annotation_data/GRCh37/UK10K_COHORT.20160215.sites.vcf.gz",
}
},
# GRCh38 is NOT enabled by default - overwrite "enabled" in your server settings to use
BUILD_GRCH38: {
"enabled": False,
"annotation_consortium": "Ensembl",
"reference_fasta": os.path.join(_ANNOTATION_FASTA_BASE_DIR, "GCF_000001405.39_GRCh38.p13_genomic.fna.gz"),
"reference_fasta_has_chr": False,
"cytoband": os.path.join(VG_REFERENCE_DIR, "hg38", "cytoband.hg38.txt.gz"),
# VEP paths are relative to ANNOTATION_VEP_BASE_DIR - worked out at runtime
# so you can change just that variable and have everything else work
# The names correspond to VEPPlugin or VEPCustom entries (but lower case)
"vep_config": {
"cosmic": "annotation_data/GRCh38/CosmicCodingMuts.normal.grch38.vcf.gz",
"dbnsfp": "annotation_data/GRCh38/dbNSFP4.0a.grch38.stripped.gz",
"dbscsnv": "annotation_data/GRCh38/dbscSNV1.1_GRCh38.txt.gz",
"gnomad2": "annotation_data/GRCh38/gnomad2.1.1_GRCh38_combined_af.vcf.bgz",
"gnomad3": "annotation_data/GRCh38/gnomad3.1_GRCh38_merged.vcf.bgz",
"mastermind": "annotation_data/GRCh38/mastermind_cited_variants_reference-2021.04.02-grch38.vcf.gz",
"maxentscan": "annotation_data/all_builds/maxentscan",
'phastcons100way': "annotation_data/GRCh38/hg38.phastCons100way.bw",
'phastcons30way': "annotation_data/GRCh38/hg38.phastCons30way.bw",
'phylop100way': "annotation_data/GRCh38/hg38.phyloP100way.bw",
'phylop30way': "annotation_data/GRCh38/hg38.phyloP30way.bw",
"repeatmasker": "annotation_data/GRCh38/repeatmasker_hg38.bed.gz",
"spliceai_snv": "annotation_data/GRCh38/spliceai_scores.raw.snv.hg38.vcf.gz",
"spliceai_indel": "annotation_data/GRCh38/spliceai_scores.raw.indel.hg38.vcf.gz",
"topmed": "annotation_data/GRCh38/TOPMED_GRCh38_20180418.vcf.gz",
"uk10k": "annotation_data/GRCh38/UK10K_COHORT.20160215.sites.GRCh38.vcf.gz",
}
},
}
# Build independent config
ANNOTATION_VCF_DUMP_DIR = os.path.join(PRIVATE_DATA_ROOT, 'annotation_dump')
# Admin email used in PubMed queries to contact before throttling/banning
ANNOTATION_ENTREZ_EMAIL = get_secret("ENTREZ.email") # Automatically set in in annotation.apps.AnnotationConfig
ANNOTATION_ENTREZ_API_KEY = get_secret("ENTREZ.api_key")
ANNOTATION_PUBMED_GENE_SYMBOL_COUNT_CACHE_DAYS = 30
ANNOTATION_PUBMED_SEARCH_TERMS_ENABLED = False
MUTATIONAL_SIGNATURE_CALCULATOR = "Mutational Signature Calculator"
MUTATIONAL_SIGNATURE_ITERATIONS = 100
MUTATIONAL_SIGNATURE_SAMPLING_FRACTION = 0.8
MUTATIONAL_SIGNATURE_DATA_DIR = os.path.join(VG_REFERENCE_DIR, "mutational_signatures")
MUTATIONAL_SIGNATURE_DATA_FILE = os.path.join(MUTATIONAL_SIGNATURE_DATA_DIR, "signatures_probabilities.sorted.txt")
MUTATIONAL_SIGNATURE_INFO_FILE = os.path.join(MUTATIONAL_SIGNATURE_DATA_DIR, "signature_analysis_data.formatted.txt")
CACHED_WEB_RESOURCE_CLINGEN_DISEASE_VALIDITY = "ClinGenDiseaseValidity"
CACHED_WEB_RESOURCE_CLINVAR_CITATIONS = "ClinVarCitations"
CACHED_WEB_RESOURCE_GNOMAD_GENE_CONSTRAINT = "GnomADGeneConstraint"
CACHED_WEB_RESOURCE_HGNC = "HGNC"
CACHED_WEB_RESOURCE_LRG_REF_SEQ_GENE = "LRGRefSeqGene"
CACHED_WEB_RESOURCE_PANEL_APP_AUSTRALIA_PANELS = "PanelApp Australia Panels"
CACHED_WEB_RESOURCE_PANEL_APP_ENGLAND_PANELS = "Genomics England PanelApp Panels"
CACHED_WEB_RESOURCE_PFAM = "Pfam"
CACHED_WEB_RESOURCE_REFSEQ_GENE_SUMMARY = "RefSeq Gene Summary"
CACHED_WEB_RESOURCE_UNIPROT = "UniProt"
ANNOTATION_CACHED_WEB_RESOURCES = [
CACHED_WEB_RESOURCE_GNOMAD_GENE_CONSTRAINT,
CACHED_WEB_RESOURCE_HGNC,
CACHED_WEB_RESOURCE_LRG_REF_SEQ_GENE,
CACHED_WEB_RESOURCE_PANEL_APP_AUSTRALIA_PANELS,
CACHED_WEB_RESOURCE_PANEL_APP_ENGLAND_PANELS,
CACHED_WEB_RESOURCE_PFAM,
CACHED_WEB_RESOURCE_REFSEQ_GENE_SUMMARY,
CACHED_WEB_RESOURCE_UNIPROT,
CACHED_WEB_RESOURCE_CLINGEN_DISEASE_VALIDITY,
CACHED_WEB_RESOURCE_CLINVAR_CITATIONS,
]
VARIANT_ANNOTATION_TRANSCRIPT_PREFERENCES = ['lrg_identifier', 'refseq_transcript_accession', 'ensembl_transcript_accession']
VARIANT_ANNOTATION_DELETE_TEMP_FILES_ON_SUCCESS = not DEBUG
# If true, then if we don't have a specific transcript version, we'll match it to the closest one we can
VARIANT_TRANSCRIPT_VERSION_BEST_ATTEMPT = True
VARIANT_ZYGOSITY_GLOBAL_COLLECTION = "global"
PREFER_ALLELE_LINKS = False
CLINGEN_ALLELE_REGISTRY_DOMAIN = "http://reg.genome.network"
CLINGEN_ALLELE_REGISTRY_MAX_RECORDS = 2000
CLINGEN_ALLELE_REGISTRY_LOGIN = get_secret("CLINGEN_ALLELE_REGISTRY.login")
CLINGEN_ALLELE_REGISTRY_PASSWORD = get_secret("CLINGEN_ALLELE_REGISTRY.password")
CLINGEN_ALLELE_REGISTRY_MAX_MANUAL_REQUESTS = 10_000 # On nodes and VCFs
NO_DNA_CONTROL_REGEX = "(^|[^a-zA-Z])NDC([^a-zA-Z]|$)" # No DNA Control - eg _NDC_ or -NDC_
VCF_DOWNLOAD_ADMIN_ONLY = False
VCF_IMPORT_DELETE_TEMP_FILES_ON_SUCCESS = not DEBUG
VCF_IMPORT_CREATE_COHORT_FROM_MULTISAMPLE_VCFS = True
VCF_IMPORT_NO_DNA_CONTROL_SAMPLE_REGEX = None
VCF_IMPORT_FILE_SPLIT_ROWS = 50000
VCF_IMPORT_STORE_GVCF_NON_VAR_BLOCKS = False
VCF_IMPORT_VT_COMMAND = "vt" # Needs to be installed and in path
VCF_IMPORT_SKIP_RECORD_REGEX = {
"CNV": "<CNV>",
"Fusion": "VARTYPE=fusion",
}
COMPANY = None # Used for gene list categories
GENERATED_DIR = os.path.join(MEDIA_ROOT, 'generated')
PATIENTS_READ_ONLY_SHOW_AGE_NOT_DOB = False
IMPORT_PROCESSING_DIR = os.path.join(PRIVATE_DATA_ROOT, 'import_processing')
# @see https://github.com/SACGF/variantgrid/wiki/Liftover
LIFTOVER_CLASSIFICATIONS = True
LIFTOVER_TO_CHROMOSOMES_ONLY = True # False = Liftover to alt/patches
LIFTOVER_DBSNP_ENABLED = False # Default=False - doesn't work so well due to dbSNP IDs being for loci
LIFTOVER_NCBI_REMAP_ENABLED = False
LIFTOVER_NCBI_REMAP_PERLBREW_RUNNER_SCRIPT = None # os.path.join(BASE_DIR, "scripts", "perlbrew_runner.sh")
PANEL_APP_CACHE_DAYS = 7 # Automatically re-check after this time
PANEL_APP_CHECK_ENABLED = False
# Non-authenticated (no login) sample gene matrix
PUBLIC_SAMPLE_GENE_MATRIX_GENOME_BUILD = None # Must be set if system has multiple genome builds
PUBLIC_SAMPLE_GENE_MATRIX_GENE_LIST_ID = None # Sample gene matrix of these genes
PUBLIC_SAMPLE_GENE_MATRIX_SHOW_PRIVATE_SAMPLES = False # True = show all samples
PUBLIC_SAMPLE_GENE_MATRIX_TYPE = 'Damage'
PUBLIC_SAMPLE_GENE_MATRIX_HIGHLIGHT_GENE_SYMBOLS = []
PUBLIC_LAB_LOCATION_PAGE_VISIBLE = False
PROCESSED_BED_FILES_DIR = os.path.join(PRIVATE_DATA_ROOT, 'processed_bed_files')
INTERSECT_BED_SCRIPT = os.path.join(BASE_DIR, 'scripts', 'intersect_bed_and_upload_variant_collection.sh')
PUBLIC_GROUP_NAME = "public"
LOGGED_IN_USERS_GROUP_NAME = "logged_in_users"
# key/value = Organization.group_name : lab group name pattern
# Org must already exist, lab pattern is filled with User values (if you want to create a group for each user)
USER_CREATE_ORG_LABS = {
# "test_organization": "test_lab",
# "test_organization": "user_group_%(username)s",
}
# To use SeqAuto, your settings need to have:
# "from variantgrid.settings.defaults.seqauto_default_settings import *"
# after including this file
SEQAUTO_ENABLED = False
# Occasionally turn off if | |
## evolution.py
## <NAME>
from thesis_utils import *
from thesis_defaults import *
from thesis_poincare_utils import *
from thesis_plot_utils import *
class Evolution:
"""
Evolution functions defining a dynamical systems on C^2. Need to specify a __call__ and __str__ method.
"""
def __init__(self, dt = 0.01):
self.dt = 0.01
return
def f(self, x_0):
return self.__call__(x_0)
def F(self, x_0, T = 1, dt = 0.01):
"""
Returns path at time T starting from x_0, resolution dt.
"""
if dt == None:
dt = self.dt
forwards = T >= 0
stepCnt = abs(math.ceil(T / self.dt))
# Need one more for the initial values
ws = np.empty((stepCnt + 1, ))
xs = np.empty((stepCnt + 1, ))
ys = np.empty((stepCnt + 1, ))
zs = np.empty((stepCnt + 1, ))
# Setting initial values
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
# Stepping through "time".
for i in range(stepCnt):
derivs = self.__call__([ ws[i], xs[i], ys[i], zs[i] ])
if forwards:
ws[i + 1] = ws[i] + (derivs[0] * dt)
xs[i + 1] = xs[i] + (derivs[1] * dt)
ys[i + 1] = ys[i] + (derivs[2] * dt)
zs[i + 1] = zs[i] + (derivs[3] * dt)
else:
ws[i + 1] = ws[i] - (derivs[0] * dt)
xs[i + 1] = xs[i] - (derivs[1] * dt)
ys[i + 1] = ys[i] - (derivs[2] * dt)
zs[i + 1] = zs[i] - (derivs[3] * dt)
return [ ws[-1], xs[-1], ys[-1], zs[-1] ]
def plot_sim(self, x_0, T = 1000, plot_type = 0, dt = None):
"""
Plots simulated path of system starting at x_0 for T, at resolution dt.
Plot types: see thesis_plot_utils.py
"""
if dt == None:
dt = self.dt
plt = self.gen_plot(x_0 = x_0, T = T, plot_type = plot_type, dt = dt, )
plt.show()
def gen_plot(self, x_0, T = 1000.0, plot_type = 0, dt = None, stepCnt = None, DEBUG = False):
"""
Generates a plot of simulated path of system starting at x_0 for T, at resolution dt.
Plot types: see thesis_plot_utils.py
"""
if dt == None:
dt = self.dt
if stepCnt != None:
dt = float(T) / stepCnt
else:
stepCnt = math.ceil(T / dt)
if DEBUG:
print("DEBUG: Step Count: " + str(stepCnt))
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
derivs = self.__call__([ ws[i], xs[i], ys[i], zs[i] ] )
ws[i + 1] = ws[i] + (derivs[0] * dt)
xs[i + 1] = xs[i] + (derivs[1] * dt)
ys[i + 1] = ys[i] + (derivs[2] * dt)
zs[i + 1] = zs[i] + (derivs[3] * dt)
if DEBUG:
print( "DEBUG: Pt: "+ str([ws[i+1], xs[i+1], ys[i+1], zs[i+1]] ))
txt = self.__str__() + '\n' + param_string(x_0, T, dt)
return gen_plot(ws, xs, ys, zs, plot_type, txt = txt)
class Evolution_ColluciNunez(Evolution):
def __init__(self, e = 1.0,
u_c = 1.0,
u_g = 1.0,
a_c = 3.0,
a_g = 1.0,
m = 1.0,
dt = 0.01):
self.dt = 0.01
self.e, self.u_c, self.u_g, self.a_c, self.a_g, self.m = e, u_c, u_g, a_c, a_g, m
return
def __call__(self, x_0):
e, u_c, u_g, a_c, a_g, m = self.e, self.u_c, self.u_g, self.a_c, self.a_g, self.m
Z = x_0[0]
C = x_0[1]
G = x_0[2]
P = x_0[3]
Z_dot = Z * (-e + u_c * C + u_g * G)
C_dot = C * (a_c * P - u_c * Z) - m * C * Z
G_dot = G * (a_g * P - u_g * Z) + m * C * Z
P_dot = P * (-a_g * G - a_c * C) + e * Z
return [Z_dot, C_dot, G_dot, P_dot]
# def f(self, x_0, T = 1, dt = 0.01):
# return super(Evolution_1a, self).f(x_0, T = 1, dt = 0.01)
def __str__(self):
return "Z' = Z * (-e + u_c * C + u_g * G) \n" + \
"C' = C * (a_c * P - u_c * Z) - m * C * Z \n" + \
"G' = G * (a_g * P - u_g * Z) + m * C * Z \n" + \
"P' = P * (-a_g * G - a_c * C) + e * Z \n" + \
"e = " + str(self.e) + "; u_c = " + str(self.u_c) + \
"; u_g = " + str(self.u_g) + "; a_c = " + \
str(self.a_c) + "; a_g = " + str(self.a_g) + \
"; m = " + str(self.m)
class Evolution_Toy_A(Evolution):
def __call__(self, x_0):
x_1 = x_0[0]
x_2 = x_0[1]
x_3 = x_0[2]
x_4 = x_0[3]
x_1_dot = -x_1 + x_1 * x_2 + x_1 * x_3
y_1_dot = 3 * x_2 * x_4 - 2 * x_1 * x_2
x_2_dot = x_3 * x_4 - x_3 * x_1 + x_2 * x_1
y_2_dot = -x_3 * x_4 - 3 * x_2 * x_4 + x_1
return [x_1_dot, y_1_dot, x_2_dot, y_2_dot]
# def f(self, x_0, T = 1, dt = 0.01):
# return super(Evolution_1a, self).f(x_0, T = 1, dt = 0.01)
def __str__(self):
return "x1' = -x1 + x1*x2 + x1*x3 \n" + \
"x2' = 3*x2*x4 - 2*x1*x2 \n" + \
"x3' = x3*x4 - x3*x1 + x2*x1 \n" + \
"x4' = -x3*x4 - 3*x2*x4 + x1 \n"
class EvolutionValdez(Evolution):
def __init__(self, lmbda, dt = 0.01):
self.lmbda = lmbda
self.dt = dt
def __call__(self, x_0):
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
x_1_dot = self.lmbda[1] * (x_1**2 - y_1**2) - (self.lmbda[1] + self.lmbda[2]) * (x_1*x_2 - y_1*y_2)
y_1_dot = 2 * self.lmbda[1] * x_1 * y_1 - (self.lmbda[1] + self.lmbda[2]) * (x_1*y_2 + y_1*x_2)
x_2_dot = self.lmbda[0] * (x_2**2 - y_2**2) - (self.lmbda[0] + self.lmbda[2]) * (x_1*x_2 - y_1*y_2)
y_2_dot = 2 * self.lmbda[0] * x_2 * y_2 - (self.lmbda[0] +self.lmbda[2]) * (x_1*y_2 + y_1*x_2)
return [x_1_dot, y_1_dot, x_2_dot, y_2_dot]
# def f(self, x_0, T = 1, dt = 0.01):
# return super(Evolution_1a, self).f(x_0, T = 1, dt = 0.01)
def __str__(self):
return "dz1/dt = lambda_2 * z1^2 - (lambda_2 + lambda_3) * z1 * z2 \n" + \
"dz2/dt = lambda_1 * z2^2 - (lambda_1 + lambda_3) * z1 * z2 \n" + \
"lambda_1: " + str(self.lmbda[0]) + \
"; lambda_2: " + str(self.lmbda[1]) + "; lambda_3: " + str(self.lmbda[2])
class Evolution_1a(Evolution):
def __init__(self, lmbda, dt = 0.01):
self.lmbda = lmbda
self.dt = dt
def __call__(self, x_0):
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
x_1_dot = self.lmbda[1] * (x_1**2 - y_1**2) - (self.lmbda[1] + self.lmbda[2]) * (x_1*x_2 - y_1*y_2)
y_1_dot = 2 * self.lmbda[1] * x_1 * y_1 - (self.lmbda[1] + self.lmbda[2]) * (x_1*y_2 + y_1*x_2)
x_2_dot = self.lmbda[0] * (x_2**2 - y_2**2) - (self.lmbda[0] + self.lmbda[2]) * (x_1*x_2 - y_1*y_2)
y_2_dot = 2 * self.lmbda[0] * x_2 * y_2 - (self.lmbda[0] +self.lmbda[2]) * (x_1*y_2 + y_1*x_2)
return [x_1_dot, y_1_dot, x_2_dot, y_2_dot]
# def f(self, x_0, T = 1, dt = 0.01):
# return super(Evolution_1a, self).f(x_0, T = 1, dt = 0.01)
def __str__(self):
return "dz1/dt = lambda_2 * z1^2 - (lambda_2 + lambda_3) * z1 * z2 \n" + \
"dz2/dt = lambda_1 * z2^2 - (lambda_1 + lambda_3) * z1 * z2 \n" + \
"lambda_1: " + str(self.lmbda[0]) + \
"; lambda_2: " + str(self.lmbda[1]) + "; lambda_3: " + str(self.lmbda[2])
class Evolution_Toy_B(Evolution):
def __call__(self, x_0, mu = 3):
"""
mu < 4
"""
x_1 = x_0[0]
x_2 = | |
from copy import deepcopy
import logging
import numpy as np
import pandas as pd
from tqdm import tqdm
from scipy.stats import norm
from sklearn.model_selection import cross_val_predict, KFold, train_test_split
from xgboost import XGBRegressor
from causalml.inference.meta.utils import (check_treatment_vector, check_p_conditions,
get_xgboost_objective_metric, convert_pd_to_np)
from causalml.inference.meta.explainer import Explainer
from causalml.propensity import compute_propensity_score
logger = logging.getLogger('causalml')
class BaseRLearner(object):
"""A parent class for R-learner classes.
An R-learner estimates treatment effects with two machine learning models and the propensity score.
Details of R-learner are available at <NAME> (2019) (https://arxiv.org/abs/1712.04912).
"""
def __init__(self,
learner=None,
outcome_learner=None,
effect_learner=None,
ate_alpha=.05,
control_name=0,
n_fold=5,
random_state=None):
"""Initialize an R-learner.
Args:
learner (optional): a model to estimate outcomes and treatment effects
outcome_learner (optional): a model to estimate outcomes
effect_learner (optional): a model to estimate treatment effects. It needs to take `sample_weight` as an
input argument for `fit()`
ate_alpha (float, optional): the confidence level alpha of the ATE estimate
control_name (str or int, optional): name of control group
n_fold (int, optional): the number of cross validation folds for outcome_learner
random_state (int or RandomState, optional): a seed (int) or random number generator (RandomState)
"""
assert (learner is not None) or ((outcome_learner is not None) and (effect_learner is not None))
if outcome_learner is None:
self.model_mu = deepcopy(learner)
else:
self.model_mu = outcome_learner
if effect_learner is None:
self.model_tau = deepcopy(learner)
else:
self.model_tau = effect_learner
self.ate_alpha = ate_alpha
self.control_name = control_name
self.random_state = random_state
self.cv = KFold(n_splits=n_fold, shuffle=True, random_state=random_state)
self.propensity = None
self.propensity_model = None
def __repr__(self):
return ('{}(model_mu={},\n'
'\tmodel_tau={})'.format(self.__class__.__name__,
self.model_mu.__repr__(),
self.model_tau.__repr__()))
def fit(self, X, treatment, y, p=None, verbose=True):
"""Fit the treatment effect and outcome models of the R learner.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the
single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of
float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores.
verbose (bool, optional): whether to output progress logs
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
check_treatment_vector(treatment, self.control_name)
self.t_groups = np.unique(treatment[treatment != self.control_name])
self.t_groups.sort()
if p is None:
logger.info('Generating propensity score')
p = dict()
p_model = dict()
for group in self.t_groups:
mask = (treatment == group) | (treatment == self.control_name)
treatment_filt = treatment[mask]
X_filt = X[mask]
w_filt = (treatment_filt == group).astype(int)
w = (treatment == group).astype(int)
p[group], p_model[group] = compute_propensity_score(X=X_filt, treatment=w_filt,
X_pred=X, treatment_pred=w)
self.propensity_model = p_model
self.propensity = p
else:
check_p_conditions(p, self.t_groups)
if isinstance(p, (np.ndarray, pd.Series)):
treatment_name = self.t_groups[0]
p = {treatment_name: convert_pd_to_np(p)}
elif isinstance(p, dict):
p = {treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items()}
self._classes = {group: i for i, group in enumerate(self.t_groups)}
self.models_tau = {group: deepcopy(self.model_tau) for group in self.t_groups}
self.vars_c = {}
self.vars_t = {}
if verbose:
logger.info('generating out-of-fold CV outcome estimates')
yhat = cross_val_predict(self.model_mu, X, y, cv=self.cv, n_jobs=-1)
for group in self.t_groups:
mask = (treatment == group) | (treatment == self.control_name)
treatment_filt = treatment[mask]
X_filt = X[mask]
y_filt = y[mask]
yhat_filt = yhat[mask]
p_filt = p[group][mask]
w = (treatment_filt == group).astype(int)
if verbose:
logger.info('training the treatment effect model for {} with R-loss'.format(group))
self.models_tau[group].fit(X_filt, (y_filt - yhat_filt) / (w - p_filt),
sample_weight=(w - p_filt) ** 2)
self.vars_c[group] = (y_filt[w == 0] - yhat_filt[w == 0]).var()
self.vars_t[group] = (y_filt[w == 1] - yhat_filt[w == 1]).var()
def predict(self, X):
"""Predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
Returns:
(numpy.ndarray): Predictions of treatment effects.
"""
X = convert_pd_to_np(X)
te = np.zeros((X.shape[0], self.t_groups.shape[0]))
for i, group in enumerate(self.t_groups):
dhat = self.models_tau[group].predict(X)
te[:, i] = dhat
return te
def fit_predict(self, X, treatment, y, p=None, return_ci=False,
n_bootstraps=1000, bootstrap_size=10000, verbose=True):
"""Fit the treatment effect and outcome models of the R learner and predict treatment effects.
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the
single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of
float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores.
return_ci (bool): whether to return confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
verbose (bool): whether to output progress logs
Returns:
(numpy.ndarray): Predictions of treatment effects. Output dim: [n_samples, n_treatment].
If return_ci, returns CATE [n_samples, n_treatment], LB [n_samples, n_treatment],
UB [n_samples, n_treatment]
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
self.fit(X, treatment, y, p, verbose=verbose)
te = self.predict(X)
if p is None:
p = self.propensity
else:
check_p_conditions(p, self.t_groups)
if isinstance(p, (np.ndarray, pd.Series)):
treatment_name = self.t_groups[0]
p = {treatment_name: convert_pd_to_np(p)}
elif isinstance(p, dict):
p = {treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items()}
if not return_ci:
return te
else:
t_groups_global = self.t_groups
_classes_global = self._classes
model_mu_global = deepcopy(self.model_mu)
models_tau_global = deepcopy(self.models_tau)
te_bootstraps = np.zeros(shape=(X.shape[0], self.t_groups.shape[0], n_bootstraps))
logger.info('Bootstrap Confidence Intervals')
for i in tqdm(range(n_bootstraps)):
te_b = self.bootstrap(X, treatment, y, p, size=bootstrap_size)
te_bootstraps[:, :, i] = te_b
te_lower = np.percentile(te_bootstraps, (self.ate_alpha / 2) * 100, axis=2)
te_upper = np.percentile(te_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=2)
# set member variables back to global (currently last bootstrapped outcome)
self.t_groups = t_groups_global
self._classes = _classes_global
self.model_mu = deepcopy(model_mu_global)
self.models_tau = deepcopy(models_tau_global)
return (te, te_lower, te_upper)
def estimate_ate(self, X, treatment, y, p=None, bootstrap_ci=False, n_bootstraps=1000, bootstrap_size=10000):
"""Estimate the Average Treatment Effect (ATE).
Args:
X (np.matrix or np.array or pd.Dataframe): a feature matrix
treatment (np.array or pd.Series): a treatment vector
y (np.array or pd.Series): an outcome vector
p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the
single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of
float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores.
bootstrap_ci (bool): whether run bootstrap for confidence intervals
n_bootstraps (int): number of bootstrap iterations
bootstrap_size (int): number of samples per bootstrap
Returns:
The mean and confidence interval (LB, UB) of the ATE estimate.
"""
X, treatment, y = convert_pd_to_np(X, treatment, y)
te = self.fit_predict(X, treatment, y, p)
if p is None:
p = self.propensity
else:
check_p_conditions(p, self.t_groups)
if isinstance(p, (np.ndarray, pd.Series)):
treatment_name = self.t_groups[0]
p = {treatment_name: convert_pd_to_np(p)}
elif isinstance(p, dict):
p = {treatment_name: convert_pd_to_np(_p) for treatment_name, _p in p.items()}
ate = np.zeros(self.t_groups.shape[0])
ate_lb = np.zeros(self.t_groups.shape[0])
ate_ub = np.zeros(self.t_groups.shape[0])
for i, group in enumerate(self.t_groups):
w = (treatment == group).astype(int)
prob_treatment = float(sum(w)) / X.shape[0]
_ate = te[:, i].mean()
se = (np.sqrt((self.vars_t[group] / prob_treatment)
+ (self.vars_c[group] / (1 - prob_treatment))
+ te[:, i].var())
/ X.shape[0])
_ate_lb = _ate - se * norm.ppf(1 - self.ate_alpha / 2)
_ate_ub = _ate + se * norm.ppf(1 - self.ate_alpha / 2)
ate[i] = _ate
ate_lb[i] = _ate_lb
ate_ub[i] = _ate_ub
if not bootstrap_ci:
return ate, ate_lb, ate_ub
else:
t_groups_global = self.t_groups
_classes_global = self._classes
model_mu_global = deepcopy(self.model_mu)
models_tau_global = deepcopy(self.models_tau)
logger.info('Bootstrap Confidence Intervals for ATE')
ate_bootstraps = np.zeros(shape=(self.t_groups.shape[0], n_bootstraps))
for n in tqdm(range(n_bootstraps)):
cate_b = self.bootstrap(X, treatment, y, p, size=bootstrap_size)
ate_bootstraps[:, n] = cate_b.mean()
ate_lower = np.percentile(ate_bootstraps, (self.ate_alpha / 2) * 100, axis=1)
ate_upper = np.percentile(ate_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=1)
# set member variables back to global (currently last bootstrapped outcome)
self.t_groups = t_groups_global
self._classes = _classes_global
self.model_mu = deepcopy(model_mu_global)
self.models_tau = deepcopy(models_tau_global)
return ate, ate_lower, ate_upper
def bootstrap(self, X, treatment, y, p, size=10000):
"""Runs a single bootstrap. Fits on bootstrapped sample, then predicts on whole population."""
idxs = np.random.choice(np.arange(0, X.shape[0]), size=size)
X_b = X[idxs]
p_b = {group: _p[idxs] for group, _p in p.items()}
treatment_b = treatment[idxs]
y_b = y[idxs]
self.fit(X=X_b, treatment=treatment_b, y=y_b, p=p_b, verbose=False)
te_b = self.predict(X=X)
return te_b
def get_importance(self, X=None, tau=None, model_tau_feature=None, features=None, method='auto', normalize=True,
test_size=0.3, random_state=None):
"""
Builds a model (using X to predict estimated/actual tau), and then calculates feature importances
based on a specified method.
Currently supported methods include:
- auto (calculates importance based on estimator's default implementation of feature importance;
| |
<filename>utils/plot_helper.py
# Plotly Graphs
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
from scipy.spatial.distance import pdist, squareform
# ML funcs
from .ml_helper import calculate_cm
# Others
import numpy as np
import pandas as pd
from itertools import chain
from sklearn.metrics import auc
from sklearn.decomposition import PCA
# Define common colors
blue_color = '#035672'
red_color = '#f84f57'
gray_color ='#ccc'
# Prepare feature importance chart
def plot_feature_importance(feature_importance):
"""
Creates a Plotly barplot to plot feature importance
"""
fi = [pd.DataFrame.from_dict(_, orient='index') for _ in feature_importance]
feature_df_ = pd.concat(fi)
feature_df = feature_df_.groupby(feature_df_.index).sum()
feature_df_std = feature_df_.groupby(feature_df_.index).std()
feature_df_std = feature_df_std/feature_df_std.sum()/feature_df.sum()
feature_df.columns = ['Feature_importance']
feature_df = feature_df/feature_df.sum()
feature_df['Std'] = feature_df_std.values
feature_df = feature_df.sort_values(by='Feature_importance', ascending=False)
feature_df = feature_df[feature_df['Feature_importance'] > 0]
feature_df['Name'] = feature_df.index
display_limit = 20
if len(feature_df) > display_limit:
remainder = pd.DataFrame({'Feature_importance':[feature_df.iloc[display_limit:].sum().values[0]],
'Name':'Remainder'}, index=['Remainder'])
feature_df = feature_df.iloc[:display_limit] # Show at most `display_limit` entries
feature_df = feature_df.append(remainder)
feature_df["Feature_importance"] = feature_df["Feature_importance"].map('{:.3f}'.format).astype(np.float32)
feature_df["Std"] = feature_df["Std"].map('{:.5f}'.format)
feature_df_wo_links = feature_df.copy()
feature_df["Name"] = feature_df["Name"].apply(lambda x: '<a href="https://www.ncbi.nlm.nih.gov/search/all/?term={}" title="Search on NCBI" target="_blank">{}</a>'.format(x, x)
if not x.startswith('_') and x!="Remainder" else x)
feature_df["Plot_Name"] = feature_df_wo_links["Name"].apply(lambda x: '<a href="https://www.ncbi.nlm.nih.gov/search/all/?term={}" title="Search on NCBI" target="_blank">{}</a>'.format(x, x if len(x) < 20 else x[:20]+'..')
if not x.startswith('_') and x!="Remainder" else x)
marker_color = red_color
title = 'Top features from the classifier'
labels={"Feature_importance": "Feature importances from the classifier", "Plot_Name": "Names", "Std": "Standard Deviation"}
# Hide pvalue if it does not exist
hover_data = {"Plot_Name":False, "Name":True, "Feature_importance":True, "Std":True}
p = px.bar(feature_df.iloc[::-1], x="Feature_importance", y="Plot_Name", orientation='h', hover_data=hover_data, labels=labels, height=800, title=title)
p.update_layout(xaxis_showgrid=False, yaxis_showgrid=False, plot_bgcolor= 'rgba(0, 0, 0, 0)', showlegend=False)
p.update_traces(marker_color=marker_color)
p.update_xaxes(showline=True, linewidth=1, linecolor='black')
p.update_yaxes(showline=True, linewidth=1, linecolor='black', type='category')
# Update `feature_df` for NaN values and column naming, ordering
feature_df.dropna(axis='columns', how="all", inplace=True)
feature_df.drop("Plot_Name", inplace=True, axis=1)
feature_df_wo_links.dropna(axis='columns', how="all", inplace=True)
feature_df.rename(columns={'Name': 'Name and NCBI Link', 'Feature_importance': 'Feature Importance', 'Std': 'Standard Deviation'}, inplace=True)
return p, feature_df[['Name and NCBI Link', 'Feature Importance', 'Standard Deviation']], feature_df_wo_links
# Prepare confusion matrix plot
def plot_confusion_matrices(class_0, class_1, results, names):
"Returns Plotly chart for confusion matrices"
cm_results = [calculate_cm(*_) for _ in results]
#also include a summary confusion_matrix
y_test_ = np.array(list(chain.from_iterable([_[0] for _ in results])))
y_pred_ = np.array(list(chain.from_iterable([_[1] for _ in results])))
cm_results.insert(0, calculate_cm(y_test_, y_pred_))
texts = []
for j in cm_results:
texts.append(['{}\n{:.0f} %'.format(_[0], _[1]*100) for _ in zip(*j)])
cats = ['_'.join(class_0), '_'.join(class_1)]
x_ = [cats[0], cats[0], cats[1], cats[1]]
y_ = [cats[0], cats[1], cats[1], cats[0]]
# Heatmap
custom_colorscale = [[0, '#e8f1f7'], [1, "#3886bc"]]
data = [
go.Heatmap(x=x_, y=y_, z=cm_results[step][1], visible=False,
hoverinfo='none', colorscale = custom_colorscale)
for step in range(len(cm_results))
]
data[0]['visible'] = True
# Build slider steps
steps = []
for i in range(len(data)):
step = dict(
method = 'update',
args = [
# Make the i'th trace visible
{'visible': [t == i for t in range(len(data))]},
{'annotations' : [
dict(
x = x_[k],
y = y_[k],
xref= "x1",
yref= "y1",
showarrow = False,
text = texts[i][k].replace("\n", "<br>"),
font= dict(size=16, color="black")
)
for k in range(len(x_))
]
}
],
label = names[i]
)
steps.append(step)
layout_plotly = {
"xaxis": {"title": "Predicted value"},
"yaxis": {"title": "True value"},
"annotations": steps[0]['args'][1]['annotations'],
"plot_bgcolor":'rgba(0,0,0,0)'
}
p = go.Figure(data=data, layout=layout_plotly)
# Add slider
sliders = [dict(currentvalue={"prefix": "CV Split: "}, pad = {"t": 72}, active = 0, steps = steps)]
p.layout.update(sliders=sliders)
p.update_layout(autosize=False, width=700, height=700)
return p
# Prepare ROC Curve
def plot_roc_curve_cv(roc_curve_results, cohort_combos = None):
"""
Plotly chart for roc curve for cross validation
"""
tprs = []
base_fpr = np.linspace(0, 1, 101)
roc_aucs = []
p = go.Figure()
for idx, (fpr, tpr, threshold) in enumerate(roc_curve_results):
roc_auc = auc(fpr, tpr)
roc_aucs.append(roc_auc)
if cohort_combos is not None:
text= "Train: {} <br>Test: {}".format(cohort_combos[idx][0], cohort_combos[idx][1])
hovertemplate = "False positive rate: %{x:.2f} <br>True positive rate: %{y:.2f}" + "<br>" + text
p.add_trace(go.Scatter(x=fpr, y=tpr, hovertemplate=hovertemplate, hoverinfo='all', mode='lines',
name='Train on {}, Test on {}, AUC {:.2f}'.format(cohort_combos[idx][0], cohort_combos[idx][1], roc_auc)))
else:
pass
#p.add_trace(go.Scatter(x=fpr, y=tpr, hoverinfo='skip', mode='lines', line=dict(color=blue_color), showlegend=False, opacity=0.1))
tpr = np.interp(base_fpr, fpr, tpr)
tpr[0]=0.0
tprs.append(tpr)
tprs = np.array(tprs)
mean_tprs = tprs.mean(axis=0)
std = tprs.std(axis=0)
tprs_upper = np.minimum(mean_tprs + std, 1)
tprs_lower = np.maximum(mean_tprs - std, 0)
mean_rocauc = np.mean(roc_aucs).round(2)
sd_rocauc = np.std(roc_aucs, ddof=1).round(2)
if cohort_combos is None:
p.add_trace(go.Scatter(x=base_fpr, y=tprs_lower, fill = None, line_color='gray', opacity=0.1, showlegend=False))
p.add_trace(go.Scatter(x=base_fpr, y=tprs_upper, fill='tonexty', line_color='gray', opacity=0.1, name='±1 std. dev'))
hovertemplate = "Base FPR %{x:.2f} <br>%{text}"
text = ["Upper TPR {:.2f} <br>Mean TPR {:.2f} <br>Lower TPR {:.2f}".format(u, m, l) for u, m, l in zip(tprs_upper, mean_tprs, tprs_lower)]
p.add_trace(go.Scatter(x=base_fpr, y=mean_tprs, text=text, hovertemplate=hovertemplate, hoverinfo = 'y+text',
line=dict(color='black', width=2), name='Mean ROC\n(AUC = {:.2f}±{:.2f})'.format(mean_rocauc, sd_rocauc)))
p.add_trace(go.Scatter(x=[0, 1], y=[0, 1], line=dict(color=red_color, dash='dash'), name="Chance"))
else:
p.add_trace(go.Scatter(x=[0, 1], y=[0, 1], line=dict(color='black', dash='dash'), name="Chance"))
# Setting the figure layouts
p.update_xaxes(showline=True, linewidth=1, linecolor='black')
p.update_yaxes(showline=True, linewidth=1, linecolor='black')
p.update_layout(autosize=True,
width=700,
height=700,
xaxis_title='False Positive Rate',
yaxis_title='True Positive Rate',
xaxis_showgrid=False,
yaxis_showgrid=False,
plot_bgcolor= 'rgba(0, 0, 0, 0)',
yaxis = dict(
scaleanchor = "x",
scaleratio = 1,
zeroline=True,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1,
),
)
return p
# Prepare PR Curve
def plot_pr_curve_cv(pr_curve_results, class_ratio_test, cohort_combos = None):
"""
Returns Plotly chart for Precision-Recall (PR) curve
"""
precisions = []
base_recall = np.linspace(0, 1, 101)
pr_aucs = []
p = go.Figure()
for idx, (precision, recall, _) in enumerate(pr_curve_results):
pr_auc = auc(recall, precision)
pr_aucs.append(pr_auc)
if cohort_combos is not None:
pr_df = pd.DataFrame({'recall':recall,'precision':precision, 'train':cohort_combos[idx][0], 'test':cohort_combos[idx][1]})
text= "Train: {} <br>Test: {}".format(cohort_combos[idx][0], cohort_combos[idx][1])
hovertemplate = "Recall: %{x:.2f} <br>Precision: %{y:.2f}" + "<br>" + text
p.add_trace(go.Scatter(x=recall, y=precision, hovertemplate=hovertemplate, hoverinfo='all', mode='lines',
name='Train on {}, Test on {}, AUC {:.2f}'.format(cohort_combos[idx][0], cohort_combos[idx][1], pr_auc)))
else:
pass
#p.add_trace(go.Scatter(x=recall, y=precision, hoverinfo='skip', mode='lines', line=dict(color=blue_color'), showlegend=False, opacity=0.1))
precision = np.interp(base_recall, recall, precision, period=100)
precision[0]=1.0
precisions.append(precision)
precisions = np.array(precisions)
mean_precisions = precisions.mean(axis=0)
std = precisions.std(axis=0)
precisions_upper = np.minimum(mean_precisions + std, 1)
precisions_lower = np.maximum(mean_precisions - std, 0)
mean_prauc = np.mean(pr_aucs).round(2)
sd_prauc = np.std(pr_aucs, ddof=1).round(2)
if cohort_combos is None:
p.add_trace(go.Scatter(x=base_recall, y=precisions_lower, fill = None, line_color='gray', opacity=0.1, showlegend=False))
p.add_trace(go.Scatter(x=base_recall, y=precisions_upper, fill='tonexty', line_color='gray', opacity=0.2, name='±1 std. dev'))
hovertemplate = "Base Recall %{x:.2f} <br>%{text}"
text = ["Upper Precision {:.2f} <br>Mean Precision {:.2f} <br>Lower Precision {:.2f}".format(u, m, l)
for u, m, l in zip(precisions_upper, mean_precisions, precisions_lower)]
p.add_trace(go.Scatter(x=base_recall, y=mean_precisions, text=text, hovertemplate=hovertemplate, hoverinfo = 'y+text',
line=dict(color='black', width=2), name='Mean PR\n(AUC = {:.2f}±{:.2f})'.format(mean_prauc, sd_prauc)))
no_skill = np.mean(class_ratio_test)
p.add_trace(go.Scatter(x=[0, 1], y=[no_skill, no_skill], line=dict(color=red_color, dash='dash'), name="Chance"))
else:
no_skill = np.mean(class_ratio_test)
p.add_trace(go.Scatter(x=[0, 1], y=[no_skill, no_skill], line=dict(color='black', dash='dash'), name="Chance"))
# Setting the figure layouts
p.update_xaxes(showline=True, linewidth=1, linecolor='black')
p.update_yaxes(showline=True, linewidth=1, range=[0, 1], linecolor='black')
p.update_layout(autosize=True,
width=700,
height=700,
xaxis_title='Recall',
yaxis_title='Precision',
xaxis_showgrid=False,
yaxis_showgrid=False,
plot_bgcolor= 'rgba(0, 0, 0, 0)',
yaxis = dict(
scaleanchor = "x",
scaleratio = 1,
zeroline=True,
),
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1,
),
)
return p
# Generate dendograms for hier. clustering
def generate_dendrogram( matrix, labels, show_distances: bool = False, colorbar_title: str = "", ):
"""Generate Dendrogram."""
# Initialize figure by creating upper dendrogram
fig = ff.create_dendrogram(
matrix,
orientation="bottom",
labels=labels,
colorscale=[gray_color] * 8,
)
for i in range(len(fig["data"])):
fig["data"][i]["yaxis"] = "y2"
# Create side dendrogram
dendro_side = ff.create_dendrogram(
matrix, orientation="right", colorscale=[gray_color] * 8
)
for i in range(len(dendro_side["data"])):
dendro_side["data"][i]["xaxis"] = "x2"
# Add Side Dendrogram Data to Figure
for data in dendro_side["data"]:
fig.add_trace(data)
# define dendro leaves
dendro_leaves = dendro_side["layout"]["yaxis"]["ticktext"]
dendro_leaves = list(map(int, dendro_leaves))
# get heatmap data (z)
if show_distances:
# calculate distances
heat_data = squareform(pdist(matrix))
else:
heat_data = matrix.values
# arrange the heatmap data according to the dendrogram clustering
heat_data = heat_data[dendro_leaves, :]
heat_data = heat_data[:, dendro_leaves]
heatmap = [
go.Heatmap(
x=dendro_leaves,
y=dendro_leaves,
z=heat_data,
colorscale=[
[0.0, blue_color],
[0.5, "#ffffff"],
[1.0, red_color],
],
colorbar={"title": colorbar_title},
hovertemplate=(
"<b>Protein x:</b> %{x}<br><b>Protein y:</b> %{y}"
"<extra>r = %{z:.2f}</extra>"
),
)
]
heatmap[0]["x"] = fig["layout"]["xaxis"]["tickvals"]
heatmap[0]["y"] = dendro_side["layout"]["yaxis"]["tickvals"]
for data in heatmap:
fig.add_trace(data)
# modify layout
fig.update_layout(
{
"width": 800,
"height": 800,
"showlegend": False,
"hovermode": "closest",
}
)
# add labels to yaxis (needed for the hover)
fig["layout"]["yaxis"]["ticktext"] = fig["layout"]["xaxis"]["ticktext"]
fig["layout"]["yaxis"]["tickvals"] = fig["layout"]["xaxis"]["tickvals"]
# modify axes
params: dict = {
"mirror": False,
"showgrid": False,
"showline": False,
"zeroline": False,
"showticklabels": False,
"ticks": "",
}
fig.update_layout(
xaxis={"domain": [0.15, 1], **params},
xaxis2={"domain": [0, 0.15], **params},
yaxis={"domain": [0, 0.85], **params},
yaxis2={"domain": [0.825, 0.975], **params},
)
return fig
# Perform EDA and Prepare their plots
def perform_EDA(state):
"""
Perform EDA on the dataset by given method and return the chart
"""
data = state.df_sub[state.proteins].astype('float').fillna(0.0)
if state.eda_method == "Hierarchical clustering":
data_to_be_correlated = data.iloc[:, state.data_range[0]:state.data_range[1]]
corr = data_to_be_correlated.corr(method="pearson")
labels = corr.columns
p = generate_dendrogram(
matrix=corr,
labels=labels,
colorbar_title="Pearson correlation | |
<filename>lambdo/Column.py
__author__="<NAME>"
import json
from lambdo.utils import *
from lambdo.resolve import *
from lambdo.transform import *
from lambdo.Workflow import *
from lambdo.Table import *
from lambdo.Column import *
import logging
log = logging.getLogger('lambdo.column')
class Column:
"""
The class represents one column definition.
A column definition should be distinguished from a data column because one definition can generate many data columns.
It includes column input-output tables, column definition, its evaluation logic.
A column definition may include extensions, which is a way to define families of columns with small differences in their definitiosn.
There are several definition (operation) types each having its own parameters: calculate, accumulate, aggregate etc.
"""
columnNo = 0
def __init__(self, table, column_json: dict):
self.table = table
self.column_json = column_json
# TODO: Data represents the whole function and is a pandas series with index as row ids of the table data
self.data = [] # It is a list because one column definition may generate many column data objects
self.groupby = None # Link columns store here a groupby objects which is then (re)used by multiple aggregate columns
# Assign id
self.id = self.column_json.get('id', None)
if self.id is None:
self.id = "___column___" + str(self.columnNo)
self.column_json['id'] = self.id
self.columnNo += 1
def __repr__(self):
return '[' + self.table.id + '::' + self.id+']'
def get_inputs(self):
"""Get a list of input column names specified in this definition."""
definition = self.column_json
inputs = definition.get('inputs', [])
# If a single name is provided (not a list), then we wrap it into a list
if isinstance(inputs, str):
inputs = [inputs]
return inputs
def get_outputs(self):
"""Get a list of output column names generated by this definition."""
definition = self.column_json
outputs = definition.get('outputs', [])
# If a single name is provided (not a list), then we wrap it into a list
if isinstance(outputs, str):
outputs = [outputs]
if not outputs: # If outputs are not specified then use id
outputs.append(definition.get('id'))
return outputs
def get_operation(self):
"""
If operation type is specified explicitly then return it.
Otherwise, determine the operation type from parameters.
Each operation type uses its own set of parameters.
Note that sometimes it is not possible to determine the operation type unambigously.
"""
definition = self.column_json
operation = definition.get('operation')
if operation is not None:
return operation
# If operation is not specified explicitly then determine from parameters
window = definition.get('window')
if window:
if window == 'one' or window == '1':
operation = 'calculate'
elif window == 'all':
operation = 'all'
else:
operation = 'roll'
else:
function = definition.get('function')
if function:
operation = 'calculate'
else:
operation = 'noop' # No window, no function
return operation
def is_op_noop(self):
operation = self.column_json.get('operation')
if operation == 'noop':
return True
return False
def is_op_one(self):
operation = self.get_operation()
if operation == 'calculate' or operation == 'calc':
return True
return False
def is_op_roll(self):
operation = self.get_operation()
if operation == 'roll':
return True
return False
def is_op_all(self):
operation = self.get_operation()
if operation == 'all':
return True
return False
def is_op_calc(self):
operation = self.get_operation()
if self.is_op_one() or self.is_op_roll() or self.is_op_all():
return True
return False
def is_op_link(self):
operation = self.get_operation()
if operation == 'link':
return True
return False
def is_op_compose(self):
operation = self.get_operation()
if operation == 'compose':
return True
return False
def is_op_aggregate(self):
operation = self.get_operation()
if operation == 'aggregate' or operation == 'agg':
return True
return False
def get_input_columns(self):
"""
Get column names which are consumed by this operation (and hence they have to be evaluated before this operation can be executed).
The names in the list are fully qualified column names always starting from some table name like 'Table::Link::Column'.
Note that some operations (e.g., link or aggregate) can return columns of another table.
"""
definition = self.column_json
table_name = self.table.id
dependencies = []
if self.is_op_calc():
inputs = self.get_inputs()
dependencies.extend([table_name + '::' + x for x in inputs]) # This table name as a prefix
elif self.is_op_link():
main_keys = definition.get('keys', [])
dependencies.extend([table_name + '::' + x for x in main_keys]) # This table name as a prefix
# Linked (target) columns have to be evaluated (although they are typically attributes)
linked_table_name = definition.get('linked_table', '')
linked_keys = definition.get('linked_keys', [])
dependencies.extend([linked_table_name + '::' + x for x in linked_keys]) # Linked table name as a prefix
elif self.is_op_compose():
inputs = self.get_inputs()
# Link column (first segment) has to be evaluated
link_column_name = inputs[0] or None
dependencies.append(table_name + '::' + link_column_name)
# Linked column path (tail) in the linked table has to exist (recursion)
link_column_definitions = self.table.get_definitions_for_columns(link_column_name)
link_column_definition = link_column_definitions[0] or None
linked_table_name = link_column_definition.column_json['linked_table']
linked_column_name = inputs[1] or None
dependencies.append(linked_table_name + '::' + linked_column_name) # Linked (target) table name as a prefix
elif self.is_op_aggregate():
fact_table_name = definition.get('fact_table')
# Group column
group_column_name = definition.get('group_column')
dependencies.append(fact_table_name + '::' + group_column_name) # Fact table name as a prefix
# Measure columns of the fact table
inputs = self.get_inputs()
dependencies.extend([fact_table_name + '::' + x for x in inputs]) # Fact table name as a prefix
else:
return []
return dependencies
def get_input_tables(self):
"""
Get table names which are consumed by this operation.
Note that by tables here we mean sets (of tuples), that is, these tables have to be populated but necessarily all their columns (functions) have to be evaluated.
Which functions (columns) of the returned tables are really consumed has to be determined by some other method.
Note that this table will be always returned because any derived column can be evaluated only after populating this table.
"""
definition = self.column_json
table_name = self.table.id
dependencies = []
dependencies.append(table_name) # Always add this table
if self.is_op_calc():
pass
elif self.is_op_link():
linked_table_name = definition.get('linked_table', '')
dependencies.append(linked_table_name)
elif self.is_op_compose():
# Link column (first segment) is used to retrieve the linked table
inputs = self.get_inputs()
link_column_name = inputs[0] or None
link_column_definitions = self.table.get_definitions_for_columns(link_column_name)
link_column_definition = link_column_definitions[0] or None
linked_table_name = link_column_definition.column_json['linked_table']
dependencies.append(linked_table_name)
elif self.is_op_aggregate():
fact_table_name = definition.get('fact_table')
dependencies.append(fact_table_name)
else:
return []
return dependencies
def get_dependencies(self):
"""
Get tables and columns this column depends upon.
The returned elements must be executed before this element can be executed because this element consumes their data.
"""
definition = self.column_json
dependencies = []
if self.is_op_calc():
# This table has to be populated
dependencies.append(self.table)
# Input column objects for which we need to find definitions
inputs = self.get_inputs()
dependencies.extend(self.table.get_definitions_for_columns(inputs))
# Remove self-dependency
dependencies = [x for x in dependencies if x != self]
# TODO: input columns can be column paths
elif self.is_op_link():
# This (fact) table has to be populated
dependencies.append(self.table)
# Input (fact table) columns or column paths have to be evaluated
main_keys = definition.get('keys', [])
dependencies.extend(self.table.get_definitions_for_columns(main_keys))
# Target (linked) table has to be populated
linked_table_name = definition.get('linked_table', '')
linked_table = self.table.workflow.get_table(linked_table_name)
dependencies.append(linked_table)
# Target columns have to be evaluated in order to contain values. However, they are supposed to be attributes and hence they will be set during population.
# If we can link to derived columns, then they have to be populated. It might be reasonable, if some attributes are transformed into another form, say date/time conversion to an interval id.
linked_keys = definition.get('linked_keys', [])
dependencies.extend(linked_table.get_definitions_for_columns(linked_keys))
elif self.is_op_compose():
# This (main) table has to be populated
dependencies.append(self.table)
inputs = definition['inputs']
# TODO: Validty check. Two elements must be provided in a compose column. (If not two, then they had to be converted to only two by merging them.)
# Link column (first segment) has to be evaluated
link_column_name = next(iter(inputs), None)
link_column_definitions = self.table.get_definitions_for_columns(link_column_name)
link_column_definition = next(iter(link_column_definitions), None)
dependencies.append(link_column_definition)
# Linked column path (tail) in the linked table has to exist (recursion)
linked_table_name = link_column_definition.column_json['linked_table']
linked_table = self.table.workflow.get_table(linked_table_name)
linked_column_name = inputs[1] if len(inputs) > 1 else None
linked_column_definitions = linked_table.get_definitions_for_columns(linked_column_name)
linked_column_definition = next(iter(linked_column_definitions), None)
if linked_column_definition: # A linked column might not have a definition, e.g., an attribute
dependencies.append(linked_column_definition)
# Here we assume that the tail dependencies will be retrieved separately.
# Alternatively, we could retrieve them here using recursion
# Lined table has to be populated. (Yet, | |
<filename>codes/models/modules/loss.py<gh_stars>0
import torch
import torch.nn as nn
import math
import numbers
from torch.nn import functional as F
import numpy as np
def LoG(imgHF): #Laplacian of Gaussian
# The LoG operator calculates the second spatial derivative of an image.
# This means that in areas where the image has a constant intensity (i.e.
# where the intensity gradient is zero), the LoG response will be zero.
# In the vicinity of a change in intensity, however, the LoG response
# will be positive on the darker side, and negative on the lighter side.
# This means that at a reasonably sharp edge between two regions of
# uniform but different intensities, the LoG response will be:
# - zero at a long distance from the edge,
# - positive just to one side of the edge,
# - negative just to the other side of the edge,
# - zero at some point in between, on the edge itself.
# The enhancement sharpens the edges but also increases noise. If the
# original image is filtered with a simple Laplacian (a LoG filter
# with a very narrow Gaussian), the resulting output is rather noisy.
# Combining this output with the original will give a noisy result.
# On the other hand, using a larger σ for the Gaussian will reduce
# the noise, but the sharpening effect will be reduced.
# The 2-D LoG can be approximated by a 5 by 5 convolution kernel such as:
weight = [
[0, 0, 1, 0, 0],
[0, 1, 2, 1, 0],
[1, 2, -16, 2, 1],
[0, 1, 2, 1, 0],
[0, 0, 1, 0, 0]
]
weight = np.array(weight)
weight_np = np.zeros((1, 1, 5, 5))
"""
# 3x3 Laplacian kernels (without Gaussian smoothing)
# These kernels are approximating a second derivative measurement on
# the image, they are very sensitive to noise. To counter this, the
# image is often Gaussian smoothed before applying the Laplacian filter.
# Note that the output can contain negative and non-integer values,
# so for display purposes the image has been normalized.
## 3x3 v1:
weight = [
[0, -1, 0],
[-1, 4, -1],
[0, -1, 0]
]
## 3x3 v2:
# weight = [
# [-1, -1, -1],
# [-1, 8, -1],
# [-1, -1, -1]
# ]
weight = np.array(weight)
weight_np = np.zeros((1, 1, 3, 3))
"""
weight_np[0, 0, :, :] = weight
weight_np = np.repeat(weight_np, imgHF.shape[1], axis=1)
weight_np = np.repeat(weight_np, imgHF.shape[0], axis=0)
weight = torch.from_numpy(weight_np).type(torch.FloatTensor).to('cuda:0')
return nn.functional.conv2d(imgHF, weight, padding=1)
class GaussianSmoothing(nn.Module):
def __init__(self, channels, kernel_size=15, sigma=3, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = [kernel_size] * dim
if isinstance(sigma, numbers.Number):
sigma = [sigma] * dim
kernel = 1
meshgrids = torch.meshgrid(
[
torch.arange(size, dtype=torch.float32)
for size in kernel_size
]
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \
torch.exp(-((mgrid - mean) / std) ** 2 / 2)
kernel = kernel / torch.sum(kernel)
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
self.register_buffer('weight', kernel)
self.groups = channels
if dim == 1:
self.conv = F.conv1d
elif dim == 2:
self.conv = F.conv2d
elif dim == 3:
self.conv = F.conv3d
else:
raise RuntimeError(
'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim)
)
def forward(self, input):
return self.conv(input, weight=self.weight, groups=self.groups)
class CharbonnierLoss(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=1e-6):
super(CharbonnierLoss, self).__init__()
self.eps = eps
def forward(self, x, y):
b, c, h, w = y.size()
diff = x - y
loss = torch.sum(torch.sqrt(diff * diff + self.eps))
#loss = torch.sum(torch.sqrt((x - y).pow(2) + self.eps **2)) / x.shape[0]
return loss/(c*b*h*w)
# Define GAN loss: [vanilla | lsgan | wgan-gp]
# https://tuatini.me/creating-and-shipping-deep-learning-models-into-production/
class GANLoss(nn.Module):
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):
super(GANLoss, self).__init__()
self.gan_type = gan_type.lower()
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
if self.gan_type == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif self.gan_type == 'lsgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'srpgan':
self.loss = nn.BCELoss() #0.001 * F.binary_cross_entropy(d_sr_out, torch.ones_like(d_sr_out))
elif self.gan_type == 'wgan-gp':
def wgan_loss(input, target):
# target is boolean
return -1 * input.mean() if target else input.mean()
self.loss = wgan_loss
else:
raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type))
def get_target_label(self, input, target_is_real):
if self.gan_type == 'wgan-gp':
return target_is_real
if target_is_real:
return torch.empty_like(input).fill_(self.real_label_val) #torch.ones_like(d_sr_out)
else:
return torch.empty_like(input).fill_(self.fake_label_val) #torch.zeros_like(d_sr_out)
def forward(self, input, target_is_real):
target_label = self.get_target_label(input, target_is_real)
loss = self.loss(input, target_label)
return loss
class GradientPenaltyLoss(nn.Module):
def __init__(self, device=torch.device('cpu')):
super(GradientPenaltyLoss, self).__init__()
self.register_buffer('grad_outputs', torch.Tensor())
self.grad_outputs = self.grad_outputs.to(device)
def get_grad_outputs(self, input):
if self.grad_outputs.size() != input.size():
self.grad_outputs.resize_(input.size()).fill_(1.0)
return self.grad_outputs
def forward(self, interp, interp_crit):
grad_outputs = self.get_grad_outputs(interp_crit)
grad_interp = torch.autograd.grad(outputs=interp_crit, inputs=interp, \
grad_outputs=grad_outputs, create_graph=True, retain_graph=True, only_inputs=True)[0]
grad_interp = grad_interp.view(grad_interp.size(0), -1)
grad_interp_norm = grad_interp.norm(2, dim=1)
loss = ((grad_interp_norm - 1)**2).mean()
return loss
class HFENLoss(nn.Module): # Edge loss with pre_smooth
# In order to further penalize the diferences in fine details, such as edges,
# a gradient-domain L1 loss can be used, where each gradient ∇(·) is computed
# using a High Frequency Error Norm (HFEN). The metric uses a Laplacian of
# Gaussian kernel for edge-detection. The Laplacian works to detect
# edges, but is sensitive to noise, so the image can be pre-smoothed with a
# Gaussian filter first to make edge-detection work better. The recommended
# parameter of σ = 1.5 for Gaussian kernel size can be used.
def __init__(self, loss_f='L1', device='cuda:0', pre_smooth=True, relative=False):
super(HFENLoss, self).__init__()
self.device = device
self.loss_f = loss_f #loss function
self.pre_smooth = pre_smooth
self.relative = relative
self.laplacian = False
if loss_f=='l2':
self.criterion = nn.MSELoss(reduction='sum').to(device)
elif loss_f=='elastic':
self.criterion = ElasticLoss(reduction='sum').to(device)
elif loss_f=='cb':
self.criterion = CharbonnierLoss().to(device)
else: #if loss_f=='l1':
self.criterion = nn.L1Loss(reduction='sum').to(device)
def forward(self, input, target, eps=0.01):
c = input.shape[1]
# Note that, since the range of color values can be significantly
# large, we apply a logarithmic function to the ground truth image to
# compress its range before computing the loss, i.e., c = log(1 + c˜),
# where ˜c is the ground truth image in the linear domain.
# Note: This may not hold true if image range is already [0,1] or [-1,1]
# input = torch.log(1 + input) #(eps=1e-7)
if self.pre_smooth:
# As Laplace operator may detect edges as well as noise (isolated, out-of-range),
# it may be desirable to smooth the image first by a convolution with a Gaussian
# kernel of width sigma. This will add an additional Gaussian smoothing before LoG
# to reduce noise and only focus on Edge loss.
# Configure Gaussian kernel
smoothing = GaussianSmoothing(c, 11, 1.5) #default: (c, 15, 1.5) | paper: (3, 11, 1.5) | simpler: (c, 5, 1)
smoothing = smoothing.to(self.device) #.to('cuda:0')
# Pad input and target
input_smooth = nn.functional.pad(input, (2, 2, 2, 2), mode='reflect')
target_smooth = nn.functional.pad(target, (2, 2, 2, 2), mode='reflect')
# Apply Gaussian kernel
input_smooth = smoothing(input_smooth)
target_smooth = smoothing(target_smooth)
else:
if self.relative:
if self.laplacian:
input_smooth = input
target_smooth = target
else:
input_smooth = nn.functional.pad(input, (1, 1, 1, 1), mode='reflect')
target_smooth = nn.functional.pad(target, (1, 1, 1, 1), mode='reflect')
else:
input_smooth = input
target_smooth = target
# If using Gaussian+laplacian instead of LoG
# Needs more testing, look at SSIM that also uses gaussian convolution
if self.laplacian:
#Gaussian, needs to be applied for "Laplacian of Gauss" (LoG)
if self.pre_smooth:
pad_size = 11 #5,7,9,11
LoG_kernel = 17 #5,9,13,17
else:
pad_size = 7 #>= 2
LoG_kernel = (2*pad_size)+1 #LoG-> pad: 5 -> 2, 15 -> 7, etc
gaussian = GaussianSmoothing(c, LoG_kernel, 1.5).to(self.device) #default: (c, 15, 1.5) | paper: (3, 11, 1.5) | simpler: (c, 5, 1)
input_smooth = nn.functional.pad(input_smooth, (pad_size,pad_size,pad_size,pad_size), mode='reflect')
target_smooth = nn.functional.pad(target_smooth, (pad_size,pad_size,pad_size,pad_size), mode='reflect')
# Apply Gaussian kernel
input_smooth = gaussian(input_smooth)
target_smooth = gaussian(target_smooth)
"""
if self.loss_f == 'L2':
x = torch.sum(torch.pow((LoG(input_smooth-target_smooth)), 2))
elif self.loss_f == 'elastic':
x = torch.sum(torch.pow((LoG(input_smooth-target_smooth)), 2))
| |
# Copyright (c) 2018-2021 Advanced Micro Devices, Inc.
# All rights reserved.
#
# For use for simulation and test purposes only
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import m5
from m5.objects import *
from m5.defines import buildEnv
from m5.util import addToPath
import os, optparse, sys
addToPath('../')
from common import Options
from ruby import Ruby
#
# Add the ruby specific and protocol specific options
#
parser = optparse.OptionParser()
Options.addNoISAOptions(parser)
Ruby.define_options(parser)
# GPU Ruby tester options
parser.add_option("--cache-size", type="choice", default="small",
choices=["small", "large"],
help="Cache sizes to use. Small encourages races between \
requests and writebacks. Large stresses write-through \
and/or write-back GPU caches.")
parser.add_option("--system-size", type="choice", default="small",
choices=["small", "medium", "large"],
help="This option defines how many CUs, CPUs and cache \
components in the test system.")
parser.add_option("--address-range", type="choice", default="small",
choices=["small", "large"],
help="This option defines the number of atomic \
locations that affects the working set's size. \
A small number of atomic locations encourage more \
races among threads. The large option stresses cache \
resources.")
parser.add_option("--episode-length", type="choice", default="short",
choices=["short", "medium", "long"],
help="This option defines the number of LDs and \
STs in an episode. The small option encourages races \
between the start and end of an episode. The long \
option encourages races between LDs and STs in the \
same episode.")
parser.add_option("--test-length", type="int", default=1,
help="The number of episodes to be executed by each \
wavefront. This determines the maximum number, i.e., \
val X #WFs, of episodes to be executed in the test.")
parser.add_option("--debug-tester", action='store_true',
help="This option will turn on DRF checker")
parser.add_option("--random-seed", type="int", default=0,
help="Random seed number. Default value (i.e., 0) means \
using runtime-specific value")
parser.add_option("--log-file", type="string", default="gpu-ruby-test.log")
parser.add_option("--num-dmas", type="int", default=0,
help="The number of DMA engines to use in tester config.")
(options, args) = parser.parse_args()
if args:
print("Error: script doesn't take any positional arguments")
sys.exit(1)
#
# Set up cache size - 2 options
# 0: small cache
# 1: large cache
#
if (options.cache_size == "small"):
options.tcp_size="256B"
options.tcp_assoc=2
options.tcc_size="1kB"
options.tcc_assoc=2
elif (options.cache_size == "large"):
options.tcp_size="256kB"
options.tcp_assoc=16
options.tcc_size="1024kB"
options.tcc_assoc=16
#
# Set up system size - 3 options
#
if (options.system_size == "small"):
# 1 CU, 1 CPU, 1 SQC, 1 Scalar
options.wf_size = 1
options.wavefronts_per_cu = 1
options.num_cpus = 1
options.num_dmas = 1
options.cu_per_sqc = 1
options.cu_per_scalar_cache = 1
options.num_compute_units = 1
elif (options.system_size == "medium"):
# 4 CUs, 4 CPUs, 1 SQCs, 1 Scalars
options.wf_size = 16
options.wavefronts_per_cu = 4
options.num_cpus = 4
options.num_dmas = 2
options.cu_per_sqc = 4
options.cu_per_scalar_cache = 4
options.num_compute_units = 4
elif (options.system_size == "large"):
# 8 CUs, 4 CPUs, 1 SQCs, 1 Scalars
options.wf_size = 32
options.wavefronts_per_cu = 4
options.num_cpus = 4
options.num_dmas = 4
options.cu_per_sqc = 4
options.cu_per_scalar_cache = 4
options.num_compute_units = 8
#
# Set address range - 2 options
# level 0: small
# level 1: large
# Each location corresponds to a 4-byte piece of data
#
options.mem_size = '1024MB'
if (options.address_range == "small"):
num_atomic_locs = 10
num_regular_locs_per_atomic_loc = 10000
elif (options.address_range == "large"):
num_atomic_locs = 100
num_regular_locs_per_atomic_loc = 100000
#
# Set episode length (# of actions per episode) - 3 options
# 0: 10 actions
# 1: 100 actions
# 2: 500 actions
#
if (options.episode_length == "short"):
eps_length = 10
elif (options.episode_length == "medium"):
eps_length = 100
elif (options.episode_length == "long"):
eps_length = 500
#
# Set Ruby and tester deadlock thresholds. Ruby's deadlock detection is the
# primary check for deadlocks. The tester's deadlock threshold detection is
# a secondary check for deadlock. If there is a bug in RubyPort that causes
# a packet not to return to the tester properly, the tester will issue a
# deadlock panic. We set cache_deadlock_threshold < tester_deadlock_threshold
# to detect deadlock caused by Ruby protocol first before one caused by the
# coalescer. Both units are in Ticks
#
options.cache_deadlock_threshold = 1e8
tester_deadlock_threshold = 1e9
# For now we're testing only GPU protocol, so we force num_cpus to be 0
options.num_cpus = 0
# Number of DMA engines
n_DMAs = options.num_dmas
# Number of CUs
n_CUs = options.num_compute_units
# Set test length, i.e., number of episodes per wavefront * #WFs.
# Test length can be 1x#WFs, 10x#WFs, 100x#WFs, ...
n_WFs = n_CUs * options.wavefronts_per_cu
max_episodes = options.test_length * n_WFs
# Number of SQC and Scalar caches
assert(n_CUs % options.cu_per_sqc == 0)
n_SQCs = n_CUs // options.cu_per_sqc
options.num_sqc = n_SQCs
assert(options.cu_per_scalar_cache != 0)
n_Scalars = n_CUs // options.cu_per_scalar_cache
options.num_scalar_cache = n_Scalars
#
# Create GPU Ruby random tester
#
tester = ProtocolTester(cus_per_sqc = options.cu_per_sqc,
cus_per_scalar = options.cu_per_scalar_cache,
wavefronts_per_cu = options.wavefronts_per_cu,
workitems_per_wavefront = options.wf_size,
num_atomic_locations = num_atomic_locs,
num_normal_locs_per_atomic = \
num_regular_locs_per_atomic_loc,
max_num_episodes = max_episodes,
episode_length = eps_length,
debug_tester = options.debug_tester,
random_seed = options.random_seed,
log_file = options.log_file)
#
# Create a gem5 system. Note that the memory object isn't actually used by the
# tester, but is included to ensure the gem5 memory size == Ruby memory size
# checks. The system doesn't have real CPUs or CUs. It just has a tester that
# has physical ports to be connected to Ruby
#
system = System(cpu = tester,
mem_ranges = [AddrRange(options.mem_size)],
cache_line_size = options.cacheline_size,
mem_mode = 'timing')
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
#
# Command processor is not needed for the tester since we don't run real
# kernels. Setting it to zero disables the VIPER protocol from creating
# a command processor and its caches.
#
options.num_cp = 0
#
# Make generic DMA sequencer for Ruby to use
#
dma_devices = [TesterDma()] * n_DMAs
system.piobus = IOXBar()
for _, dma_device in enumerate(dma_devices):
dma_device.pio = system.piobus.mem_side_ports
system.dma_devices = dma_devices
#
# Create the Ruby system
#
Ruby.create_system(options = options, full_system = False,
system = system, dma_ports = system.dma_devices)
#
# The tester is most effective when randomization is turned on and
# artifical delay is randomly inserted on messages
#
system.ruby.randomization = True
# Assert that we got the right number of Ruby ports
assert(len(system.ruby._cpu_ports) == n_CUs + n_SQCs + n_Scalars)
#
# Attach Ruby ports to the tester in the order:
# cpu_sequencers,
# vector_coalescers,
# sqc_sequencers,
# scalar_sequencers
#
# Note that this requires the protocol to create sequencers in this order
#
print("Attaching ruby ports to the tester")
for i, ruby_port in enumerate(system.ruby._cpu_ports):
ruby_port.no_retry_on_stall = True
ruby_port.using_ruby_tester = True
ruby_port.mem_request_port = system.piobus.cpu_side_ports
if i < n_CUs:
tester.cu_vector_ports = ruby_port.in_ports
tester.cu_token_ports = ruby_port.gmTokenPort
tester.max_cu_tokens = 4*n_WFs
elif i < (n_CUs + n_SQCs):
tester.cu_sqc_ports = ruby_port.in_ports
else:
tester.cu_scalar_ports = ruby_port.in_ports
i += 1
#
# Attach DMA ports. Since Ruby.py doesn't return these they need to be found.
# Connect tester's request port to each DMA sequencer's in_ports. This assumes
# the protocol names these system.dma_cntrl<#>.
#
dma_ports = []
for i in range(n_DMAs):
dma_cntrl = getattr(system, 'dma_cntrl' + str(i))
dma_ports.append(dma_cntrl.dma_sequencer.in_ports)
tester.dma_ports = dma_ports
#
# Common variables for all types of threads
#
thread_clock = SrcClockDomain(clock = '1GHz',
voltage_domain = system.voltage_domain)
g_thread_idx = 0
#
# No CPU threads are used for GPU tester
#
tester.cpu_threads = []
#
# Create DMA threads
#
dma_threads = []
print("Creating %i DMAs" % n_DMAs)
for dma_idx in range(n_DMAs):
dma_threads.append(DmaThread(thread_id = g_thread_idx,
num_lanes = 1, clk_domain = thread_clock,
deadlock_threshold = \
tester_deadlock_threshold))
g_thread_idx += 1
tester.dma_threads = dma_threads
#
# Create GPU wavefronts
#
wavefronts = []
print("Creating %i WFs attached to %i CUs" % \
(n_CUs * tester.wavefronts_per_cu, n_CUs))
for cu_idx in range(n_CUs):
for wf_idx in range(tester.wavefronts_per_cu):
wavefronts.append(GpuWavefront(thread_id = g_thread_idx,
cu_id = cu_idx,
num_lanes = options.wf_size,
clk_domain = thread_clock,
deadlock_threshold = \
tester_deadlock_threshold))
g_thread_idx += | |
<filename>ptmcmc.py
import numpy as np
from scipy import interpolate
import matplotlib
from datetime import datetime
from scipy.stats.mstats import mquantiles
import os
from astropy.io import fits
import glob
from parameters import *
import sys
import argparse
from astropy.table import Table
##sys.path.insert(0, 'python')
# matplotlib.rc('text', usetex=True)
# matplotlib.rcParams['text.latex.preamble'] = [r"\usepackage{amsmath}",
# r"\usepackage{color}"]
# matplotlib.use('Agg')
from ptemcee import Sampler as PTSampler
import matplotlib.pyplot as plt
##import corner
startTime = datetime.now()
parser = argparse.ArgumentParser(description="Calculate radial velocities and stellar parameters of WEAVE target spectra.")
parser.add_argument("--infile", type=str, required=True, help="input file", nargs=1)
parser.add_argument("--outdir", type=str, required=True, help="output directory", nargs=1)
parser.add_argument("--targlist", type=str, required=True, help="path to list of FIBREIDs or TARGIDs to be analysed. To analyse all BA stars, enter 'all'.", nargs=1)
parser.add_argument("--params", type=str, required=False, default='parameters.py', help="path to parameter file", nargs=1)
#parser.add_argument("--setups", type=str, default=None, required=True, help="input setups", nargs='*')
args = parser.parse_args()
write_directory = args.outdir[0]
target_list = args.targlist[0]
data_file = args.infile[0]
# processing (cropping, smoothing, rebinning, rotational broadening) templates if required
if process_templates==True:
print('Beginning processing of templates')
from PyAstronomy import pyasl
import scipy.stats
templatelist = np.genfromtxt(template_list_file, dtype=None, encoding=None)
# Ensuring the number of templates to be processed matches the number of points on the grid.
notemps = len(templatelist)
d_size = len(Teff) * len(logg) * len(vsini)
if notemps < d_size:
print('ERROR: The amount of templates to be processed is less than the amount of points in the grid. Add more templates to the list or reduce the grid as necessary (see PARAMETER BOUNDARIES in parameters file).')
sys.exit()
if notemps > d_size:
print('ERROR: The amount of templates to be processed is greater than the amount of points in the grid. Remove templates from the list or expand the grid as necessary (see PARAMETER BOUNDARIES in parameters file).')
sys.exit()
def restrict_range(w, f): # to crop the templates
our_range = (w > template_crop_min_wav) & (w < template_crop_max_wav)
w, f = w[our_range], f[our_range]
return w, f
def smooth(w, f, sig): # to smooth/broaden templates to match resolution of observed spectrum
f = pyasl.broadGaussFast(w, f, sig)
return f
def rotbroad(w, f, vsini): # to rotationally broaden the templates
f = pyasl.rotBroad(w, f, 0.6, float(vsini))
return f
def rebin(w, f, samp): # to rebin the templates to match the sampling of the observed spectrum
f, bin_edges, binnumber = scipy.stats.binned_statistic(w, f, statistic = 'mean', bins = (w[-1] - w[0]) / samp)
bin_width = bin_edges[1] - bin_edges[0]
w = bin_edges[1:] - bin_width/2
return w, f
def process_template(w, f, sig=sigma, samp=sampling):
if sigma != None:
f = smooth(w, f, sig)
if sampling != None:
w, f = rebin(w, f, samp)
file = open(template_write_directory + '/'+ os.path.splitext(os.path.basename(i))[0] + '_processed', "w")
for index in range(len(w)):
file.write(str(w[index]) + " " + str(f[index]) + "\n")
file.close()
return w, f
def process_template_vsini(w, f, vsini, sig=sigma, samp=sampling):
f = rotbroad(w, f, vsini)
if sigma != None:
f = smooth(w, f, sig)
if sampling != None:
w, f = rebin(w, f, samp)
file = open(template_write_directory + '/' + os.path.splitext(os.path.basename(i))[0] + '_processed_vsini' + str(int(vsini)), "w")
for index in range(len(w)):
file.write(str(w[index]) + " " + str(f[index]) + "\n")
file.close()
return w, f
def write_to_grid(templatename, f, t_ind=temp_ind, l_ind=logg_ind, v_ind=vsini_ind, vsini0=None):
# finding the teff and logg information from the template name, to write to corresponding grid point
t1 = float(templatename[temp_ind[0]:temp_ind[1]])
l1 = float(templatename[logg_ind[0]:logg_ind[1]])
if vsini0==None:
v1 = float(templatename[vsini_ind[0]:vsini_ind[1]])
else:
v1 = float(vsini0)
print('Teff: ' + str(t1) + ', logg: ' + str(l1) + ', vsini: ' + str(v1))
d[np.where(Teff == t1)[0][0], np.where(logg == l1)[0][0], np.where(vsini == v1)[0][0]] = f
t_wavelength_0, t_flux_0 = np.genfromtxt(templatelist[0], unpack=True, usecols=(wav_ind, flux_ind), dtype=None, encoding=None)
t_wavelength_0, t_flux_0 = restrict_range(t_wavelength_0, t_flux_0)
if sampling != None:
t_wavelength_0, t_flux_0 = rebin(t_wavelength_0, t_flux_0, sampling)
d = np.zeros((len(Teff), len(logg), len(vsini), len(t_wavelength_0)))
d_filled = 0
for i in templatelist:
print('Processing: ' + i)
t_wavelength, t_flux = np.genfromtxt(i, unpack=True, usecols=(wav_ind, flux_ind), dtype=None, encoding=None)
t_wavelength, t_flux = restrict_range(t_wavelength, t_flux)
if rotbroads == None:
_, t_flux = process_template(t_wavelength, t_flux, sigma, sampling)
print('template processed, writing to grid at:')
write_to_grid(i, t_flux)
d_filled += 1
print('grid is ' + str("{0:.1f}".format(float(d_filled)/float(d_size) * 100.)) + ' percent full')
else:
print('processing for vsini: 0')
_, t_flux_v0 = process_template(t_wavelength, t_flux, sig=sigma, samp=sampling)
print('template processed, writing to grid at:')
write_to_grid(i, t_flux_v0, vsini0=0.)
d_filled += 1
print('grid is ' + str("{0:.1f}".format(float(d_filled)/float(d_size) * 100.)) + ' percent full')
for j in rotbroads:
if j == 0:
continue
print('processing for vsini: ' + str(j))
_, t_flux_vj = process_template_vsini(t_wavelength, t_flux, j, sig=sigma, samp=sampling)
print('template processed, writing to grid at:')
write_to_grid(i, t_flux_vj, vsini0=j)
d_filled += 1
print('grid is ' + str("{0:.1f}".format(float(d_filled)/float(d_size) * 100.)) + ' percent full')
np.save(template_write_directory + 'template_grid.npy', d)
# creating /plots and /results folders in write_directory if they dont already exist
if not os.path.exists(write_directory + '/plots'):
os.mkdir(write_directory + '/plots')
print(write_directory + 'plots folder created')
if not os.path.exists(write_directory + '/results'):
os.mkdir(write_directory + '/results')
print(write_directory + 'results folder created')
if process_templates==False:
# location of folder containing this script
templatedirectory = os.path.dirname(os.path.abspath(__file__)) + '/templates/'
# loading the template flux grid
flux_data_all = np.load(templatedirectory + 'template_grid.npy')
#loading the wavelength data
templatewavelength = np.loadtxt(templatedirectory + 'wavelength_data.dat')
else:
# the first template for the template wavelength data
flux_data_all = np.load(template_write_directory + 'template_grid.npy')
templatewavelength = t_wavelength_0
template_min_wav = min_wav*(1.0 + (min(drv)/299792.458)) - 10.
template_max_wav = max_wav*(1.0 + (max(drv)/299792.458)) + 10.
templatemask = (templatewavelength > template_min_wav) & (templatewavelength < template_max_wav)
flux_data = np.zeros((len(Teff), len(logg), len(vsini), len(templatewavelength[templatemask])))
templatewavelength = templatewavelength[templatemask]
for ii in range(len(Teff)):
for jj in range(len(logg)):
for kk in range(len(vsini)):
flux_data[ii,jj,kk] = flux_data_all[ii,jj,kk][templatemask]
ipo = interpolate.RegularGridInterpolator((Teff, logg, vsini), flux_data, method='linear')
# grid of parameters for walker initial positions
ini_grid = [Teff, logg, vsini, drv, slopes, intercepts]
ndim=6
# define edges of parameter space
teffmin, teffmax, loggmin, loggmax, vsinimin, vsinimax, rvmin, rvmax, slopemin, slopemax, interceptmin, interceptmax = min(Teff), max(Teff), min(logg), max(logg), min(vsini), max(vsini), min(drv), max(drv), min(slopes), max(slopes), min(intercepts), max(intercepts)
def model(X, wavelength):
i, j, k, l, m, n = X
# interpolating template grid with teff (i), logg (j), vsini (k) trial parameter
templateflux = ipo([i, j, k])[0]
# interpolating on wavelength axis for trial RV (l)
fi = interpolate.interp1d(templatewavelength*(1.0 + l/299792.458), templateflux)
return fi(wavelength)
def lnprior(X):
i, j, k, l, m, n = X
# flat prior, edges should corespond to template grid
if (teffmin <= i <= teffmax) & (loggmin <= j <= loggmax) & (vsinimin <= k <= vsinimax) & (rvmin <= l <= rvmax) & (slopemin <= m <= slopemax) & (interceptmin <= n <= interceptmax):
return 0.0
else:
return -np.inf
def lnlike(X, wavelength, flux, noisespec, mask):
i, j, k, l, m, n = X
z = m, n
f = np.poly1d(z)
if exclude_region == False:
return -(np.sum((flux - (model(X, wavelength)*f(wavelength)))**2/(2*PPRE*noisespec**2)))
else:
return -(np.sum((flux[mask] - (model(X, wavelength)*f(wavelength))[mask])**2/(2*PPRE*noisespec[mask]**2)))
def mcmc_one(t):
print("Processing: " + t)
targ_start = datetime.now()
# checking if spectrum file exists, and if result already written
if os.path.exists(write_directory + '/results/' + os.path.splitext(os.path.basename(t))[0] + '_results'):
print('WARNING: result for '+t+' already exists, skipping')
return
# specify unnormalised calibrated flux
with fits.open(data_file) as ALLDATA:
final_spectra = ALLDATA[1].data[info['TARGID'] == t][0]
spectra_before_sky_subtraction = ALLDATA[3].data[info['TARGID'] == t][0]
Calibration_function = ALLDATA[5].data[info['TARGID'] == t][0]
try:
idx=list(ALLDATA[6].data['FIBREID']).index(t)
except:
idx=list(ALLDATA[6].data['TARGID']).index(t)
NSPEC = ALLDATA[6].data['Nspec'][idx]
FIBREID = ALLDATA[6].data['FIBREID'][idx]
CNAME = ALLDATA[6].data['CNAME'][idx]
flux = final_spectra * Calibration_function * 1.0e18
# restrict to desired wavelength range
flux = flux[targetmask]
# create corresponsing noise spectrum
noisespec = np.sqrt((2.*spectra_before_sky_subtraction - final_spectra)*Calibration_function*1.0e18)
noisespec = noisespec[targetmask]
# mask for excluding a wavelength region
if exclude_region == True:
mask = np.zeros(len(wavelength), dtype=bool)
for line in lines:
mask |= (wavelength >= line[0]) & (wavelength <= line[1])
else:
mask = np.ones(len(wavelength), dtype=bool)
# choose initial walker positions
pos = [[[np.random.choice(i) for i in ini_grid] for i in range(nwalkers)] for i in range(ntemps)]
# initialise MCMC sampler
sampler = PTSampler(ntemps=ntemps, nwalkers=nwalkers, dim=ndim, logl=lnlike, logp=lnprior, Tmax=np.inf, loglargs=[wavelength, flux, noisespec, mask])
# run MCMC sampler for burn period
if progress_bar==True:
print("running burn")
pos, prob, state = sampler.run_mcmc(pos, burn, adapt=True, progress_bar=progress_bar)
# reset sampler, run MCMC sampler for run period with walkers starting at their positions at the end of burn
sampler.reset()
if progress_bar==True:
print("running runs")
sampler.run_mcmc(pos, runs, adapt=True, progress_bar=progress_bar)
samples=sampler.chain[0, :, :, :].reshape((-1, ndim))
# plot walker paths
ylabels = ['Teff', 'logg', 'vsini', 'RV', 'slope', 'intercept']
for m in range(ndim):
plt.subplot(ndim,1,m+1)
plt.plot(sampler.chain[0,:,:,m].transpose(), alpha=0.2)
plt.ylabel(ylabels[m])
plt.xlabel('Step')
plt.savefig(write_directory + '/plots/' + t + '_walkers.png', bbox_inches='tight')
plt.close()
# calculate 16th, 50th, 84th quantiles of the parameter samples
quantiles = mquantiles(samples, prob=[0.16, 0.50, 0.84], axis=0)
targetname = os.path.splitext(os.path.basename(t))[0]
acceptance_r = np.mean(sampler.acceptance_fraction)
# print acceptance fraction, should be between 0.2-0.5 for efficient sampling
print("Mean acceptance fraction: {0:.3f}"
.format(acceptance_r))
# The parameter results
Teff_r = quantiles[1][0]
Teffminus_r = quantiles[1][0] - quantiles[0][0]
Teffplus_r = quantiles[2][0] - quantiles[1][0]
logg_r = quantiles[1][1]
loggminus_r = quantiles[1][1] - quantiles[0][1]
loggplus_r = quantiles[2][1] - quantiles[1][1]
vsini_r = quantiles[1][2]
vsiniminus_r = quantiles[1][2] - quantiles[0][2]
vsiniplus_r = quantiles[2][2] - quantiles[1][2]
RV_r = quantiles[1][3]
RVminus_r = quantiles[1][3] - quantiles[0][3]
RVplus_r = quantiles[2][3] - quantiles[1][3]
slope_r = quantiles[1][4]
slopeminus_r = quantiles[1][4] - quantiles[0][4]
slopeplus_r = quantiles[2][4] - quantiles[1][4]
intercept_r = quantiles[1][5]
interceptminus_r = quantiles[1][5] - quantiles[0][5]
interceptplus_r = quantiles[2][5] - quantiles[1][5]
#fig = corner.corner(samples, quantiles=[0.16, 0.50, 0.84], labels=['Teff', 'log(g)', 'vsini', 'RV', 'slope', 'intercept'], show_titles=True, title_kwargs={"fontsize": 10}, plot_datapoints=True, plot_contours=True, auto_bars=True, data_kwargs={"alpha": 0.005})
# fig.savefig(write_directory + t + '_cornerplot.png', bbox_inches='tight')
# plt.close()
# parameters of best fit (for plotting)
Xp = Teff_r, logg_r, vsini_r, RV_r, slope_r, intercept_r
fp = np.poly1d(Xp[-2:])
fitp = fp(wavelength)
# plot spectrum and best-fit
plt.plot(wavelength, flux, wavelength, model(Xp, wavelength) * fitp)
if exclude_region == True:
for n,line in enumerate(lines):
if n == 0:
plt.vlines([line[1]], flux.min()*0.8, flux.max()*1.2, colors='r', linestyles='dashed')
elif n == (len(lines)-1):
plt.vlines([line[0]], flux.min()*0.8, flux.max()*1.2, colors='r', linestyles='dashed')
else:
plt.vlines([line[0], line[1]], flux.min()*0.8, flux.max()*1.2, colors='r', linestyles='dashed')
plt.xlim(min_wav, max_wav)
plt.ylim(flux.min()*0.8, flux.max()*1.2)
plt.xlabel(r'Wavelength ($\AA$)')
plt.ylabel('Calibrated counts')
plt.savefig(write_directory + '/plots/' + t + '_spectrum.png', bbox_inches='tight')
plt.close()
# plot | |
# -*- coding: utf-8 -*-
# %% [markdown]
# # 🚀 Snorkel Intro Tutorial: Data Labeling
# %% [markdown]
# In this tutorial, we will walk through the process of using Snorkel to build a training set for classifying YouTube comments as spam or not spam.
# The goal of this tutorial is to illustrate the basic components and concepts of Snorkel in a simple way, but also to dive into the actual process of iteratively developing real applications in Snorkel.
#
# * For an overview of Snorkel, visit [snorkel.org](https://snorkel.org)
# * You can also check out the [Snorkel API documentation](https://snorkel.readthedocs.io/)
#
# Our goal is to train a classifier over the comment data that can predict whether a comment is spam or not spam.
# We have access to a large amount of *unlabeled data* in the form of YouTube comments with some metadata.
# In order to train a classifier, we need to label our data, but doing so by hand for real world applications can be prohibitively slow and expensive, often taking person-weeks or months.
#
# We therefore turn to weak supervision using **_labeling functions (LFs)_**: noisy, programmatic rules and heuristics that assign labels to unlabeled training data.
# We'll dive into the Snorkel API and how we write labeling functions later in this tutorial, but as an example,
# we can write an LF that labels data points with `"http"` in the comment text as spam since many spam
# comments contain links:
#
# ```python
# from snorkel.labeling import labeling_function
#
# @labeling_function()
# def lf_contains_link(x):
# # Return a label of SPAM if "http" in comment text, otherwise ABSTAIN
# return SPAM if "http" in x.text.lower() else ABSTAIN
# ```
#
# The tutorial is divided into four parts:
# 1. **Loading Data**: We load a [YouTube comments dataset](http://www.dt.fee.unicamp.br/~tiago//youtubespamcollection/), originally introduced in ["TubeSpam: Comment Spam Filtering on YouTube"](https://ieeexplore.ieee.org/document/7424299/), ICMLA'15 (<NAME>, <NAME>, <NAME>).
#
# 2. **Writing Labeling Functions**: We write Python programs that take as input a data point and assign labels (or abstain) using heuristics, pattern matching, and third-party models.
#
# 3. **Combining Labeling Function Outputs with the Label Model**: We use the outputs of the labeling functions over the training set as input to the label model, which assigns probabilistic labels to the training set.
#
# 4. **Training a Classifier**: We train a classifier that can predict labels for *any* YouTube comment (not just the ones labeled by the labeling functions) using the probabilistic training labels from step 3.
# %% [markdown]
# ### Task: Spam Detection
# %% [markdown]
# We use a [YouTube comments dataset](http://www.dt.fee.unicamp.br/~tiago//youtubespamcollection/) that consists of YouTube comments from 5 videos. The task is to classify each comment as being
#
# * **`HAM`**: comments relevant to the video (even very simple ones), or
# * **`SPAM`**: irrelevant (often trying to advertise something) or inappropriate messages
#
# For example, the following comments are `SPAM`:
#
# "Subscribe to me for free Android games, apps.."
#
# "Please check out my vidios"
#
# "Subscribe to me and I'll subscribe back!!!"
#
# and these are `HAM`:
#
# "3:46 so cute!"
#
# "This looks so fun and it's a good song"
#
# "This is a weird video."
# %% [markdown]
# ### Data Splits in Snorkel
#
# We split our data into 4 sets:
# * **Training Set**: The largest split of the dataset, and the one without ground truth ("gold") labels.
# We will generate labels for these data points with weak supervision.
# * \[Optional\] **Development Set**: A small labeled subset of the training data (e.g. 100 points) to guide LF development. See note below.
# * **Validation Set**: A small labeled set used to tune hyperparameters while training the classifier.
# * **Test Set**: A labeled set for final evaluation of our classifier. This set should only be used for final evaluation, _not_ error analysis.
#
#
# While it is possible to develop labeling functions on the unlabeled training set only, users often find it more time-efficient to label a small dev set to provide a quick approximate signal on the accuracies and failure modes of their LFs (rather than scrolling through training data points and mentally assessing approximate accuracy).
# Alternatively, users sometimes will have the validation set also serve as the development set.
# Do the latter only with caution: because the labeling functions will be based on data points from the validation set, the validation set will no longer be an unbiased proxy for the test set.
# %% [markdown]
# ## 1. Loading Data
# %% [markdown]
# We load the YouTube comments dataset and create Pandas DataFrame objects for the train, validation, and test sets.
# DataFrames are extremely popular in Python data analysis workloads, and Snorkel provides native support
# for several DataFrame-like data structures, including Pandas, Dask, and PySpark.
# For more information on working with Pandas DataFrames, see the [Pandas DataFrame guide](https://pandas.pydata.org/pandas-docs/stable/getting_started/dsintro.html).
#
# Each DataFrame consists of the following fields:
# * **`author`**: Username of the comment author
# * **`data`**: Date and time the comment was posted
# * **`text`**: Raw text content of the comment
# * **`label`**: Whether the comment is `SPAM` (1), `HAM` (0), or `UNKNOWN/ABSTAIN` (-1)
# * **`video`**: Video the comment is associated with
#
# We start by loading our data.
# The `load_spam_dataset()` method downloads the raw CSV files from the internet, divides them into splits, converts them into DataFrames, and shuffles them.
# As mentioned above, the dataset contains comments from 5 of the most popular YouTube videos during a period between 2014 and 2015.
# * The first four videos' comments are combined to form the `train` set. This set has no gold labels.
# * The `dev` set is a random sample of 200 data points from the `train` set with gold labels added.
# * The fifth video is split 50/50 between a validation set (`valid`) and `test` set.
# %% [markdown] {"tags": ["md-exclude"]}
# This next cell takes care of some notebook-specific housekeeping.
# You can ignore it.
# %% {"tags": ["md-exclude"]}
# %matplotlib inline
import os
# Make sure we're running from the spam/ directory
if os.path.basename(os.getcwd()) == "snorkel-tutorials":
os.chdir("spam")
# Turn off TensorFlow logging messages
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# For reproducibility
os.environ["PYTHONHASHSEED"] = "0"
# %% [markdown] {"tags": ["md-exclude"]}
# If you want to display all comment text untruncated, change `DISPLAY_ALL_TEXT` to `True` below.
# %% {"tags": ["md-exclude"]}
import pandas as pd
DISPLAY_ALL_TEXT = False
pd.set_option("display.max_colwidth", 0 if DISPLAY_ALL_TEXT else 50)
# %% [markdown] {"tags": ["md-exclude"]}
# This next cell makes sure a spaCy English model is downloaded.
# If this is your first time downloading this model, restart the kernel after executing the next cell.
# %% {"tags": ["md-exclude"]}
# Download the spaCy english model
# ! python -m spacy download en_core_web_sm
# %%
from utils import load_spam_dataset
df_train, df_dev, df_valid, df_test = load_spam_dataset()
# We pull out the label vectors for ease of use later
Y_dev = df_dev.label.values
Y_valid = df_valid.label.values
Y_test = df_test.label.values
# %% [markdown]
# Let's view 5 data points from the `dev` set.
# %%
df_dev.sample(5, random_state=3)
# %% [markdown]
# The class distribution varies slightly between `SPAM` and `HAM`, but they're approximately class-balanced.
# You can verify this by looking at the `dev` set labels.
# %%
# For clarity, we define constants to represent the class labels for spam, ham, and abstaining.
ABSTAIN = -1
HAM = 0
SPAM = 1
print(f"Dev SPAM frequency: {100 * (df_dev.label.values == SPAM).mean():.1f}%")
# %% [markdown]
# ## 2. Writing Labeling Functions (LFs)
# %% [markdown]
# ### A gentle introduction to LFs
# %% [markdown]
# **Labeling functions (LFs) help users encode domain knowledge and other supervision sources programmatically.**
#
# LFs are heuristics that take as input a data point and either assign a label to it (in this case, `HAM` or `SPAM`) or abstain (don't assign any label). Labeling functions can be *noisy*: they don't have perfect accuracy and don't have to label every data point.
# Moreover, different labeling functions can overlap (label the same data point) and even conflict (assign different labels to the same data point). This is expected, and we demonstrate how we deal with this later.
#
# Because their only requirement is that they map a data point a label (or abstain), they can wrap a wide variety of forms of supervision. Examples include, but are not limited to:
# * *Keyword searches*: looking for specific words in | |
noisefilters = []
preconditioners = []
for offset_slice, sigmasqs in offset_slices:
nstep = offset_slice.stop - offset_slice.start
filterlen = nstep * 2 + 1
filterfreq = np.fft.rfftfreq(filterlen, self.step_length)
noisefilter = truncate(np.fft.irfft(interpolate(filterfreq, logfilter)))
noisefilters.append(noisefilter)
# Build the band-diagonal preconditioner
if self.precond_width <= 1:
# Compute C_a prior
preconditioner = truncate(np.fft.irfft(interpolate(filterfreq, logpsd)))
else:
# Compute Cholesky decomposition prior
wband = min(self.precond_width, noisefilter.size // 2)
precond_width = max(wband, min(self.precond_width, nstep))
icenter = noisefilter.size // 2
preconditioner = np.zeros([precond_width, nstep], dtype=np.float64)
preconditioner[0] = sigmasqs
preconditioner[:wband, :] += np.repeat(
noisefilter[icenter : icenter + wband, np.newaxis], nstep, 1
)
lower = True
scipy.linalg.cholesky_banded(
preconditioner, overwrite_ab=True, lower=lower, check_finite=True
)
preconditioners.append((preconditioner, lower))
return noisefilters, preconditioners
@function_timer
def get_steps(self):
""" Divide each interval into offset steps
"""
self.offset_templates = []
self.offset_slices = [] # slices in all observations
for iobs, obs in enumerate(self.data.obs):
tod = obs["tod"]
common_flags = tod.local_common_flags(self.common_flags)
common_flags = (common_flags & self.common_flag_mask) != 0
if (self.intervals is not None) and (self.intervals in obs):
intervals = obs[self.intervals]
else:
intervals = None
local_intervals = tod.local_intervals(intervals)
times = tod.local_times()
offset_slices = {} # slices in this observation
for ival in local_intervals:
length = times[ival.last] - times[ival.first]
nbase = int(np.ceil(length / self.step_length))
# Divide the interval into steps, allowing for irregular sampling
todslices = []
start_times = np.arange(nbase) * self.step_length + ival.start
start_indices = np.searchsorted(times, start_times)
stop_indices = np.hstack([start_indices[1:], [ival.last]])
todslices = []
for istart, istop in zip(start_indices, stop_indices):
todslices.append(slice(istart, istop))
for idet, det in enumerate(tod.local_dets):
istart = self.namplitude
sigmasqs = []
for todslice in todslices:
sigmasq = self._get_sigmasq(
tod, det, todslice, common_flags, self.detweights[iobs][det]
)
# Register the baseline offset
self.offset_templates.append(
[self.namplitude, iobs, det, todslice, sigmasq]
)
sigmasqs.append(sigmasq)
self.namplitude += 1
# Keep a record of ranges of offsets that correspond
# to one detector and one interval.
# This is the domain we apply the noise filter in.
if det not in offset_slices:
offset_slices[det] = []
offset_slices[det].append(
(slice(istart, self.namplitude), sigmasqs)
)
self.offset_slices.append(offset_slices)
return
@function_timer
def _get_sigmasq(self, tod, det, todslice, common_flags, detweight):
""" calculate a rough estimate of the baseline variance
for diagonal preconditioner
"""
flags = tod.local_flags(det, self.flags)[todslice]
good = (flags & self.flag_mask) == 0
good[common_flags[todslice]] = False
ngood = np.sum(good)
sigmasq = 1
if detweight != 0:
sigmasq /= detweight
if ngood != 0:
sigmasq /= ngood
return sigmasq
@function_timer
def add_to_signal(self, signal, amplitudes):
offset_amplitudes = amplitudes[self.name]
last_obs = None
last_det = None
last_ref = None
todslices = []
itemplates = []
for itemplate, iobs, det, todslice, sigmasq in self.offset_templates:
if iobs != last_obs or det != last_det:
if len(todslices) != 0:
add_offsets_to_signal(
last_ref, todslices, offset_amplitudes, np.array(itemplates)
)
todslices = []
itemplates = []
last_obs = iobs
last_det = det
last_ref = signal[iobs, det, :]
todslices.append(todslice)
itemplates.append(itemplate)
if len(todslices) != 0:
add_offsets_to_signal(
last_ref, todslices, offset_amplitudes, np.array(itemplates)
)
return
@function_timer
def project_signal(self, signal, amplitudes):
offset_amplitudes = amplitudes[self.name]
last_obs = None
last_det = None
last_ref = None
todslices = []
itemplates = []
for itemplate, iobs, det, todslice, sqsigma in self.offset_templates:
if iobs != last_obs or det != last_det:
if len(todslices) != 0:
project_signal_offsets(
last_ref, todslices, offset_amplitudes, np.array(itemplates)
)
todslices = []
itemplates = []
last_obs = iobs
last_det = det
last_ref = signal[iobs, det, :]
todslices.append(todslice)
itemplates.append(itemplate)
if len(todslices) != 0:
project_signal_offsets(
last_ref, todslices, offset_amplitudes, np.array(itemplates)
)
return
@function_timer
def add_prior(self, amplitudes_in, amplitudes_out):
if not self.use_noise_prior:
return
offset_amplitudes_in = amplitudes_in[self.name]
offset_amplitudes_out = amplitudes_out[self.name]
for iobs, obs in enumerate(self.data.obs):
tod = obs["tod"]
for det in tod.local_dets:
slices = self.offset_slices[iobs][det]
filters = self.filters[iobs][det]
for (offsetslice, sigmasqs), noisefilter in zip(slices, filters):
amps_in = offset_amplitudes_in[offsetslice]
# scipy.signal.convolve will use either `convolve` or `fftconvolve`
# depending on the size of the inputs
amps_out = scipy.signal.convolve(amps_in, noisefilter, mode="same")
offset_amplitudes_out[offsetslice] += amps_out
return
@function_timer
def apply_precond(self, amplitudes_in, amplitudes_out):
offset_amplitudes_in = amplitudes_in[self.name]
offset_amplitudes_out = amplitudes_out[self.name]
if self.use_noise_prior:
# C_a preconditioner
for iobs, obs in enumerate(self.data.obs):
tod = obs["tod"]
for det in tod.local_dets:
slices = self.offset_slices[iobs][det]
preconditioners = self.preconditioners[iobs][det]
for (offsetslice, sigmasqs), preconditioner in zip(
slices, preconditioners
):
amps_in = offset_amplitudes_in[offsetslice]
if self.precond_width <= 1:
# Use C_a prior
# scipy.signal.convolve will use either `convolve` or `fftconvolve`
# depending on the size of the inputs
amps_out = scipy.signal.convolve(
amps_in, preconditioner, mode="same"
)
else:
# Use pre-computed Cholesky decomposition
amps_out = scipy.linalg.cho_solve_banded(
preconditioner,
amps_in,
overwrite_b=False,
check_finite=True,
)
offset_amplitudes_out[offsetslice] = amps_out
else:
# Diagonal preconditioner
offset_amplitudes_out[:] = offset_amplitudes_in
for itemplate, iobs, det, todslice, sigmasq in self.offset_templates:
offset_amplitudes_out[itemplate] *= sigmasq
return
class TemplateMatrix(TOASTMatrix):
def __init__(self, data, comm, templates=None):
""" Initialize the template matrix with a given baseline length
"""
self.data = data
self.comm = comm
self.templates = []
for template in templates:
self.register_template(template)
return
@function_timer
def register_template(self, template):
""" Add template to the list of templates to fit
"""
self.templates.append(template)
@function_timer
def apply(self, amplitudes):
""" Compute and return y = F.a
"""
new_signal = self.zero_signal()
for template in self.templates:
template.add_to_signal(new_signal, amplitudes)
return new_signal
@function_timer
def apply_transpose(self, signal):
""" Compute and return a = F^T.y
"""
new_amplitudes = self.zero_amplitudes()
for template in self.templates:
template.project_signal(signal, new_amplitudes)
return new_amplitudes
@function_timer
def add_prior(self, amplitudes, new_amplitudes):
""" Compute a' += C_a^{-1}.a
"""
for template in self.templates:
template.add_prior(amplitudes, new_amplitudes)
return
@function_timer
def apply_precond(self, amplitudes):
""" Compute a' = M^{-1}.a
"""
new_amplitudes = self.zero_amplitudes()
for template in self.templates:
template.apply_precond(amplitudes, new_amplitudes)
return new_amplitudes
@function_timer
def zero_amplitudes(self):
""" Return a null amplitudes object
"""
new_amplitudes = TemplateAmplitudes(self.templates, self.comm)
return new_amplitudes
@function_timer
def zero_signal(self):
""" Return a distributed vector of signal set to zero.
The zero signal object will use the same TOD objects but different cache prefix
"""
new_signal = Signal(self.data, temporary=True, init_val=0)
return new_signal
@function_timer
def clean_signal(self, signal, amplitudes, in_place=True):
""" Clean the given distributed signal vector by subtracting
the templates multiplied by the given amplitudes.
"""
# DEBUG begin
"""
import pdb
import matplotlib.pyplot as plt
plt.figure(figsize=[18, 12])
for sig in [signal]:
tod = sig.data.obs[0]["tod"]
for idet, det in enumerate(tod.local_dets):
plt.subplot(2, 2, idet + 1)
plt.plot(tod.local_signal(det, sig.name), label=sig.name, zorder=50)
"""
# DEBUG end
if in_place:
outsignal = signal
else:
outsignal = signal.copy()
template_tod = self.apply(amplitudes)
outsignal -= template_tod
# DEBUG begin
"""
for sig, zorder in [(template_tod, 100), (outsignal, 0)]:
tod = sig.data.obs[0]["tod"]
for idet, det in enumerate(tod.local_dets):
plt.subplot(2, 2, idet + 1)
plt.plot(tod.local_signal(det, sig.name), label=sig.name, zorder=zorder)
plt.legend(loc="best")
plt.savefig("test.png")
plt.close()
#pdb.set_trace()
"""
# DEBUG end
return outsignal
class TemplateAmplitudes(TOASTVector):
""" TemplateAmplitudes objects hold local and shared template amplitudes
"""
def __init__(self, templates, comm):
self.comm = comm
self.amplitudes = OrderedDict()
self.comms = OrderedDict()
for template in templates:
self.amplitudes[template.name] = np.zeros(template.namplitude)
self.comms[template.name] = template.comm
return
@function_timer
def __str__(self):
result = "template amplitudes:"
for name, values in self.amplitudes.items():
result += '\n"{}" : \n{}'.format(name, values)
return result
@function_timer
def dot(self, other):
""" Compute the dot product between the two amplitude vectors
"""
total = 0
for name, values in self.amplitudes.items():
dp = np.dot(values, other.amplitudes[name])
comm = self.comms[name]
if comm is not None:
dp = comm.reduce(dp, op=MPI.SUM)
if comm.rank != 0:
dp = 0
total += dp
if self.comm is not None:
total = self.comm.allreduce(total, op=MPI.SUM)
return total
@function_timer
def __getitem__(self, key):
return self.amplitudes[key]
@function_timer
def __setitem__(self, key, value):
self.amplitudes[name][:] = value
return
@function_timer
def copy(self):
new_amplitudes = TemplateAmplitudes([], self.comm)
for name, values in self.amplitudes.items():
new_amplitudes.amplitudes[name] = self.amplitudes[name].copy()
new_amplitudes.comms[name] = self.comms[name]
return new_amplitudes
@function_timer
def __iadd__(self, other):
""" Add the provided amplitudes to this one
"""
if isinstance(other, TemplateAmplitudes):
for name, values in self.amplitudes.items():
values += other.amplitudes[name]
else:
for name, values in self.amplitudes.items():
values += other
return self
@function_timer
def __isub__(self, other):
""" Subtract the provided amplitudes from this one
"""
if isinstance(other, TemplateAmplitudes):
for name, values in self.amplitudes.items():
values -= other.amplitudes[name]
else:
for name, values in self.amplitudes.items():
values -= other
return self
@function_timer
def __imul__(self, other):
""" Scale the amplitudes
"""
for name, values in self.amplitudes.items():
values *= other
return self
@function_timer
def __itruediv__(self, other):
""" Divide the amplitudes
"""
for name, values in self.amplitudes.items():
values /= other
return self
class TemplateCovariance(TOASTMatrix):
def __init__(self):
pass
class ProjectionMatrix(TOASTMatrix):
""" Projection matrix:
Z = I - P (P^T N^{-1} P)^{-1} | |
(self.shift_x, "X axis shift of input optic")
if hasattr(self, 'shift_y'):
phdu.header['SHIFTY'] = (self.shift_y, "Y axis shift of input optic")
if hasattr(self, 'rotation'):
phdu.header['ROTATION'] = (self.rotation, "Rotation of input optic, in deg")
hdul = fits.HDUList(hdus=[phdu])
if outname is not None:
phdu.writeto(outname, overwrite=True)
_log.info("Output written to " + outname)
return hdul
def get_coordinates(self, wave):
"""Get coordinates of this optic, optionally including shifts
Method: Calls the supplied wave object's coordinates() method,
then checks for the existence of the following attributes:
"shift_x", "shift_y", "rotation", "inclination_x", "inclination_y"
If any of them are present, then the coordinates are modified accordingly.
Shifts are given in meters for pupil optics and arcseconds for image
optics. Rotations and inclinations are given in degrees.
For multiple transformations, the order of operations is:
shift, rotate, incline.
"""
y, x = wave.coordinates()
if hasattr(self, "shift_x"):
x -= float(self.shift_x)
if hasattr(self, "shift_y"):
y -= float(self.shift_y)
if hasattr(self, "rotation"):
angle = np.deg2rad(self.rotation)
xp = np.cos(angle) * x + np.sin(angle) * y
yp = -np.sin(angle) * x + np.cos(angle) * y
x = xp
y = yp
# inclination around X axis rescales Y, and vice versa:
if hasattr(self, "inclination_x"):
y /= np.cos(np.deg2rad(self.inclination_x))
if hasattr(self, "inclination_y"):
x /= np.cos(np.deg2rad(self.inclination_y))
return y, x
class ScalarTransmission(AnalyticOpticalElement):
""" Uniform transmission between 0 and 1.0 in intensity.
Either a null optic (empty plane) or some perfect ND filter...
But most commonly this is just used as a null optic placeholder """
def __init__(self, name=None, transmission=1.0, **kwargs):
if name is None:
name = ("-empty-" if transmission == 1.0 else
"Scalar Transmission of {0}".format(transmission))
AnalyticOpticalElement.__init__(self, name=name, **kwargs)
self.transmission = float(transmission)
self.wavefront_display_hint = 'intensity'
def get_transmission(self, wave):
res = np.empty(wave.shape, dtype=_float())
res.fill(self.transmission)
return res
class InverseTransmission(AnalyticOpticalElement):
""" Given any arbitrary OpticalElement with transmission T(x,y)
return the inverse transmission 1 - T(x,y)
This is a useful ingredient in the SemiAnalyticCoronagraph algorithm.
"""
def __init__(self, optic=None):
super(InverseTransmission, self).__init__()
if optic is None or not hasattr(optic, 'get_transmission'):
raise ValueError("Need to supply an valid optic to invert!")
self.uninverted_optic = optic
self.name = "1 - " + optic.name
self.planetype = optic.planetype
self.pixelscale = optic.pixelscale
self.oversample = optic.oversample
if hasattr(self.uninverted_optic, '_default_display_size'):
self._default_display_size = self.uninverted_optic._default_display_size
@property
def shape(self): # override parent class shape function
return self.uninverted_optic.shape
def get_transmission(self, wave):
return 1 - self.uninverted_optic.get_transmission(wave)
def get_opd(self, wave):
return self.uninverted_optic.get_opd(wave)
def display(self, **kwargs):
if isinstance(self.uninverted_optic, AnalyticOpticalElement):
AnalyticOpticalElement.display(self, **kwargs)
else:
OpticalElement.display(self, **kwargs)
# ------ Analytic Image Plane elements (coordinates in arcsec) -----
class AnalyticImagePlaneElement(AnalyticOpticalElement):
""" Parent virtual class for AnalyticOptics which are
dimensioned in angular units such as arcseconds, rather
than physical length units such as meters.
"""
def __init__(self, name='Generic image plane optic', *args, **kwargs):
AnalyticOpticalElement.__init__(self, name=name, planetype=PlaneType.image, *args, **kwargs)
self.wavefront_display_hint = 'intensity' # preferred display for wavefronts at this plane
class BandLimitedCoronagraph(AnalyticImagePlaneElement):
""" Defines an ideal band limited coronagraph occulting mask.
Parameters
----------
name : string
Descriptive name
kind : string
Either 'circular' or 'linear'. The linear ones are custom shaped to NIRCAM's design
with flat bits on either side of the linear tapered bit.
Also includes options 'nircamcircular' and 'nircamwedge' specialized for the
JWST NIRCam occulters, including the off-axis ND acq spots and the changing
width of the wedge occulter.
sigma : float
The numerical size parameter, as specified in Krist et al. 2009 SPIE
wavelength : float
Wavelength this BLC is optimized for, only for the linear ones.
"""
allowable_kinds = ['circular', 'linear']
""" Allowable types of BLC supported by this class"""
@utils.quantity_input(wavelength=u.meter)
def __init__(self, name="unnamed BLC", kind='circular', sigma=1, wavelength=None, **kwargs):
AnalyticImagePlaneElement.__init__(self, name=name, **kwargs)
self.kind = kind.lower() # either circular or linear
if self.kind in ['nircamwedge', 'nircamcircular']:
import warnings
warnings.warn('JWST NIRCam specific functionality in poppy.BandLimitedCoron is moving to ' +
'webbpsf.NIRCam_BandLimitedCoron. The "nircamwedge" and "nircamcircular" options ' +
'in poppy will be removed in a future version of poppy.', DeprecationWarning)
elif self.kind not in self.allowable_kinds:
raise ValueError("Invalid value for kind of BLC: " + self.kind)
self.sigma = float(sigma) # size parameter. See section 2.1 of Krist et al. SPIE 2007, 2009
if wavelength is not None:
self.wavelength = float(wavelength) # wavelength, for selecting the
# linear wedge option only
self._default_display_size = 20. * u.arcsec # default size for onscreen display, sized for NIRCam
def get_transmission(self, wave):
""" Compute the amplitude transmission appropriate for a BLC for some given pixel spacing
corresponding to the supplied Wavefront.
Based on the Krist et al. SPIE paper on NIRCam coronagraph design
Note that the equations in Krist et al specify the intensity transmission of the occulter,
but what we want to return here is the amplitude transmittance. That is the square root
of the intensity, of course, so the equations as implemented here all differ from those
written in Krist's SPIE paper by lacking an exponential factor of 2. Thanks to <NAME>
for pointing this out.
"""
if not isinstance(wave, BaseWavefront): # pragma: no cover
raise ValueError("BLC get_transmission must be called with a Wavefront to define the spacing")
assert (wave.planetype == PlaneType.image)
y, x = self.get_coordinates(wave)
if self.kind == 'circular':
# larger sigma implies narrower peak? TBD verify if this is correct
#
r = _r(x, y)
sigmar = self.sigma * r
sigmar.clip(np.finfo(sigmar.dtype).tiny, out=sigmar) # avoid divide by zero -> NaNs
self.transmission = (1 - (2 * scipy.special.jn(1, sigmar) / sigmar) ** 2)
self.transmission[r == 0] = 0 # special case center point (value based on L'Hopital's rule)
elif self.kind == 'nircamcircular':
# larger sigma implies narrower peak? TBD verify if this is correct
#
r = _r(x, y)
sigmar = self.sigma * r
sigmar.clip(np.finfo(sigmar.dtype).tiny, out=sigmar) # avoid divide by zero -> NaNs
self.transmission = (1 - (2 * scipy.special.jn(1, sigmar) / sigmar) ** 2)
# add in the ND squares. Note the positions are not exactly the same in the two wedges.
# See the figures in Krist et al. of how the 6 ND squares are spaced among the 5
# corongraph regions
# Also add in the opaque border of the coronagraph mask holder.
if self.sigma > 4:
# MASK210R has one in the corner and one half in the other corner
wnd = np.where(
(y > 5) &
(
((x < -5) & (x > -10)) |
((x > 7.5) & (x < 12.5))
)
)
wborder = np.where((np.abs(y) > 10) | (x < -10)) # left end of mask holder
else:
# the others have two halves on in each corner.
wnd = np.where(
(y > 5) &
(np.abs(x) > 7.5) &
(np.abs(x) < 12.5)
)
wborder = np.where(np.abs(y) > 10)
self.transmission[wnd] = np.sqrt(1e-3)
self.transmission[wborder] = 0
self.transmission[r == 0] = 0 # special case center point (value based on L'Hopital's rule)
elif self.kind == 'linear':
sigmar = self.sigma * np.abs(y)
sigmar.clip(np.finfo(sigmar.dtype).tiny, out=sigmar) # avoid divide by zero -> NaNs
self.transmission = (1 - (np.sin(sigmar) / sigmar) ** 2)
elif self.kind == 'nircamwedge':
# This is hard-coded to the wedge-plus-flat-regions shape for NIRCAM
# we want a scale factor that goes from 2 to 6 with 1/5th of it as a fixed part on
# either end
# scalefact = np.linspace(1,7, x.shape[1]).clip(2,6)
# the scale fact should depent on X coord in arcsec, scaling across a 20 arcsec FOV.
# map flat regions to 2.5 arcsec each?
# map -7.5 to 2, +7.5 to 6. slope is 4/15, offset is +9.5
scalefact = (2 + (-x + 7.5) * 4 / 15).clip(2, 6)
# scalefact *= self.sigma / 2 #;2.2513
# scalefact *= 2.2513
# scalefact.shape = (1, x.shape[1])
# This does not work - shape appears to be curved not linear.
# This is NOT a linear relationship. See calc_blc_wedge in test_poppy.
if np.abs(self.wavelength - 2.1e-6) < 0.1e-6:
polyfitcoeffs = np.array([2.01210737e-04, -7.18758337e-03, 1.12381516e-01,
-1.00877701e+00, 5.72538509e+00, -2.12943497e+01,
5.18745152e+01, -7.97815606e+01, 7.02728734e+01])
elif np.abs(self.wavelength - 4.6e-6) < 0.1e-6:
polyfitcoeffs = np.array([9.16195583e-05, -3.27354831e-03, 5.11960734e-02,
-4.59674047e-01, 2.60963397e+00, -9.70881273e+00,
2.36585911e+01, -3.63978587e+01, 3.20703511e+01])
else:
raise NotImplemented("No defined NIRCam wedge BLC mask for that wavelength? ")
sigmas = scipy.poly1d(polyfitcoeffs)(scalefact)
| |
from datetime import datetime
from dateutil.relativedelta import relativedelta
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required, user_passes_test
from django.db.models import Q, Count
from django.http import HttpResponse, StreamingHttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, redirect, render
from django.template import RequestContext
from django.utils.text import slugify
from forms import DataExportForm
from models import LibraryVisit, LoginForm
import csv
import json
import time
def login_user(request):
form = LoginForm(request.POST or None)
if request.POST and form.is_valid():
user = form.login(request)
hash = request.POST['hash']
if user:
login(request, user)
return HttpResponseRedirect('/'+hash)
return render(request, 'admin/login.html', {'login_form': form })
def is_super_check(user):
'''Check to see if user is super.'''
print(user.is_superuser)
return user.is_superuser
@login_required(login_url='/login/')
@user_passes_test(is_super_check)
def index(request):
context = {}
return render_to_response('libraryuse/dashboard.html', context)
@login_required(login_url='/login/')
def reports_index(request):
context = {}
return redirect('/#/reports')
def chart_data(numbers, distinct, total, start, end, library):
data = []
visits = []
title = ""
data.append('{"data":[')
for number in numbers:
if number.has_key('visit_time'):
dt = datetime.strptime(str(number['visit_time']), '%Y-%m-%d %H:%M:%S')
epoch = int(time.mktime(dt.timetuple()))
# We have to add the three zeros to work with HighCharts
visits.append('[%s000,%s]' % (epoch, number['total']))
elif number.has_key('acpl_n'):
title = "Academic Plan"
acpl_n = number['acpl_n']
visits.append('{"label":"%s","value":%s}' % (acpl_n, number['total']))
elif number.has_key('acca_i'):
title = "Academic Career"
acca_i = number['acca_i']
visits.append('{"label":"%s","value":%s}' % (acca_i, number['total']))
elif number.has_key('dprt_n'):
title = "Department"
dprt_n = number['dprt_n']
visits.append('{"label":"%s","value":%s}' % (dprt_n, number['total']))
elif number.has_key('dvsn_n'):
title = "Faculty Division"
dvsn_n = number['dvsn_n']
visits.append('{"label":"%s","value":%s}' % (dvsn_n, number['total']))
data.append(', '.join(visits))
data.append('],')
data.append('"meta":{')
data.append('"strt_date":["%s"],' % start)
data.append('"end_date":["%s"],' % end)
data.append('"library":["%s"],' % library)
data.append('"title":["%s"]' % title)
data.append('},')
data.append('"distinct":["%s"],' % distinct)
data.append('"total":["%s"],' % total)
data.append('"queried_at":["%s"]}' % datetime.now())
return(data)
def export(request,start, end):
context = RequestContext(request, {})
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = ('attachment; filename="libraryuse(%s).csv"') % (slugify('-'.join([start,end])))
writer = csv.writer(response)
visits = LibraryVisit.objects \
.filter(visit_time__range=[start, end])
writer.writerow([('Data retreived from %s to %s') % (start,end) ])
writer.writerow([''])
writer.writerow(['visit_time', 'term_number', 'location', 'prsn_c_type', \
'prsn_e_type', 'emjo_c_clsf', 'dprt_c', \
'edprt_n', 'dvsn_i', 'dvsn_n', \
'empe_c_fclt_rank', 'prsn_c_type_hc', \
'prsn_e_type_hc', 'emjo8hc_c_clsf', 'dprt8hc_c', \
'dprt8hc_n', 'dvsn8hc_i', 'dvsn8hc_n', \
'acca_i', 'acpr_n', 'acpl_n', \
'stdn_e_clas', 'stdn_f_ungr', 'stdn_f_cmps_on'])
for v in visits:
writer.writerow([v.visit_time, v.term_number, v.location, \
v.prsn_c_type, v.prsn_e_type, v.emjo_c_clsf, \
v.dprt_c, v.dprt_n, v.dvsn_i, v.dvsn_n, \
v.empe_c_fclt_rank, v.prsn_c_type_hc, \
v.prsn_e_type_hc, v.emjo8hc_c_clsf, v.dprt8hc_c, \
v.dprt8hc_n, v.dvsn8hc_i, v.dvsn8hc_n, v.acca_i, \
v.acpr_n, v.acpl_n, v.stdn_e_clas, v.stdn_f_ungr, \
v.stdn_f_cmps_on])
return response
def get_classifications(filter_by):
if filter_by == 'stdn_e_clas':
return LibraryVisit.objects \
.values_list('stdn_e_clas', flat=True) \
.distinct() \
.exclude(stdn_e_clas__isnull=True) \
.order_by('stdn_e_clas')
elif filter_by == 'acpl_n':
return LibraryVisit.objects \
.values_list('acpl_n', flat=True) \
.distinct() \
.exclude(acpl_n__isnull=True) \
.order_by('acpl_n')
elif filter_by == 'dprt_n':
return LibraryVisit.objects \
.values_list('dprt_n', flat=True) \
.distinct() \
.exclude(dprt_n__isnull=True) \
.order_by('dprt_n')
elif filter_by == 'acca_i':
return LibraryVisit.objects \
.values_list('acca_i', flat=True) \
.distinct().exclude(acca_i__isnull=True) \
.filter(Q(prsn_c_type = 'B') | Q(prsn_c_type = 'S')) \
.order_by('acca_i')
elif filter_by == 'dvsn_n':
return LibraryVisit.objects \
.values_list('dvsn_n', flat=True) \
.distinct() \
.exclude(dvsn_n__isnull=True) \
.filter(Q(prsn_c_type = 'F')) \
.order_by('dvsn_n')
else:
return LibraryVisit.objects \
.values_list('dprt_n', flat=True) \
.distinct() \
.exclude(dprt_n__isnull=True) \
.order_by('dprt_n') \
.filter(dvsn_n = filter_by)
#@login_required
def total_usage(request, library, person_type, start, end):
total_count = LibraryVisit.objects.values('visit_time') \
.annotate(total=Count('visit_time')) \
.order_by('visit_time') \
.filter(visit_time__range=[start, end]) \
.filter(location = library)
if person_type == 'all':
numbers = total_count
elif person_type == 'student':
numbers = total_count.filter(Q(prsn_c_type = 'B') | Q(prsn_c_type = 'S'))
elif person_type == 'faculty':
numbers = total_count.filter(prsn_c_type = 'F')
elif person_type == 'staff':
numbers = total_count.filter(prsn_c_type = 'E')
distinct = numbers.values("prsn_i_ecn").distinct().count()
total = numbers.values("prsn_i_ecn").count()
data = chart_data(numbers, distinct, total, start, end, library)
return StreamingHttpResponse(data, content_type='application/json')
def on_off_campus(request, library, resident, start, end):
numbers = LibraryVisit.objects.values('visit_time') \
.annotate(total=Count('visit_time')) \
.order_by('visit_time') \
.filter(visit_time__range=[start, end]) \
.filter(location = library) \
.filter(stdn_f_cmps_on = resident) \
.filter(Q(prsn_c_type = 'B') | Q(prsn_c_type = 'S'))
distinct = numbers.values("prsn_i_ecn").distinct().count()
total = numbers.values("prsn_i_ecn").count()
data = chart_data(numbers, distinct, total, start, end, library)
return StreamingHttpResponse(data, content_type='application/json')
def student_class(request, library, classification, start, end):
numbers = LibraryVisit.objects.values('visit_time') \
.annotate(total=Count('visit_time')) \
.order_by('visit_time') \
.filter(visit_time__range=[start, end]) \
.filter(location = library) \
.filter(Q(prsn_c_type = 'B') | Q(prsn_c_type = 'S'))
if classification!='all':
numbers = numbers.filter(stdn_e_clas = classification)
distinct = numbers.values("prsn_i_ecn").distinct().count()
total = numbers.values("prsn_i_ecn").count()
data = chart_data(numbers, distinct, total, start, end, library)
return StreamingHttpResponse(data, content_type='application/json')
def faculty_staff_class(request, library, classification, start, end):
numbers = LibraryVisit.objects.values('visit_time') \
.annotate(total=Count('visit_time')) \
.order_by('visit_time') \
.filter(visit_time__range=[start, end]) \
.filter(location = library) \
.filter(Q(prsn_c_type = 'F'))
if classification != "all":
numbers = numbers.filter(dvsn_n = classification)
distinct = numbers.values("prsn_i_ecn").distinct().count()
total = numbers.values("prsn_i_ecn").count()
data = chart_data(numbers, distinct, total, start, end, library)
return StreamingHttpResponse(data, content_type='application/json')
def degree_class(request, library, classification, start, end):
numbers = LibraryVisit.objects.values('visit_time') \
.annotate(total=Count('visit_time')) \
.order_by('visit_time') \
.filter(visit_time__range=[start, end]) \
.filter(location = library) \
.filter(Q(prsn_c_type = 'B') | Q(prsn_c_type = 'S') | Q(prsn_c_type = 'F') | Q(prsn_c_type = 'E'))
if classification != "all":
numbers = numbers.filter(acpl_n = classification)
distinct = numbers.values("prsn_i_ecn").distinct().count()
total = numbers.values("prsn_i_ecn").count()
data = chart_data(numbers, distinct, total, start, end, library)
return StreamingHttpResponse(data, content_type='application/json')
def career_class(request, library, classification, start, end):
numbers = LibraryVisit.objects.values('visit_time') \
.annotate(total=Count('visit_time')) \
.order_by('visit_time') \
.filter(visit_time__range=[start, end]) \
.filter(location = library) \
.filter(Q(prsn_c_type = 'B') | Q(prsn_c_type = 'S') | Q(prsn_c_type = 'F') | Q(prsn_c_type = 'E'))
if classification != "all":
numbers = numbers.filter(acca_i = classification)
distinct = numbers.values("prsn_i_ecn").distinct().count()
total = numbers.values("prsn_i_ecn").count()
data = chart_data(numbers, distinct, total, start, end, library)
return StreamingHttpResponse(data, content_type='application/json')
def top_academic_plan(request, library, start, end):
numbers = LibraryVisit.objects.values('acpl_n') \
.annotate(total=Count('acpl_n')) \
.order_by('-total') \
.filter(visit_time__range=[start, end]) \
.filter(location = library) \
.filter(Q(prsn_c_type = 'B') | Q(prsn_c_type = 'S'))
distinct = numbers.values('acpl_n').distinct().count()
total = numbers.values('acpl_n').count()
data = chart_data(numbers, distinct, total, start, end, library)
return StreamingHttpResponse(data, content_type='application/json')
def top_academic_career(request, library, start, end):
numbers = LibraryVisit.objects.values('acca_i') \
.annotate(total=Count('acca_i')) \
.order_by('-total') \
.filter(visit_time__range=[start, end]) \
.filter(location = library) \
.filter(Q(prsn_c_type = 'B') | Q(prsn_c_type = 'S') | Q(prsn_c_type = 'F') | Q(prsn_c_type = 'E'))
distinct = numbers.values('acca_i').distinct().count()
total = numbers.values('acca_i').count()
data = chart_data(numbers, distinct, total, start, end, library)
return StreamingHttpResponse(data, content_type='application/json')
def top_dprtn(request, library, start, end):
numbers = LibraryVisit.objects.values('dprt_n') \
.annotate(total=Count('dprt_n')) \
.order_by('-total') \
.filter(visit_time__range=[start, end]) \
.filter(location = library)
distinct = numbers.values('dprt_n').distinct().count()
total = numbers.values('dprt_n').count()
data = chart_data(numbers, distinct, total, start, end, library)
return StreamingHttpResponse(data, content_type='application/json')
def top_dprtn_type(request, library, person_type, start, end):
numbers = LibraryVisit.objects.values('dprt_n') \
.annotate(total=Count('dprt_n')) \
.order_by('-total') \
.filter(visit_time__range=[start, end]) \
.filter(location = library)
if person_type == 'student':
numbers = numbers.filter(Q(prsn_c_type = 'B') | Q(prsn_c_type = 'S'))
elif person_type == 'faculty':
numbers = numbers.filter(prsn_c_type = 'F')
elif person_type == 'staff':
numbers = numbers.filter(prsn_c_type = 'E')
distinct = numbers.values('dprt_n').distinct().count()
total = numbers.values('dprt_n').count()
data = chart_data(numbers, distinct, total, start, end, library)
return StreamingHttpResponse(data, content_type='application/json')
def top_division(request, library, start, end):
numbers = LibraryVisit.objects.values('dvsn_n') \
.annotate(total=Count('dvsn_n')) \
.order_by('-total') \
.filter(visit_time__range=[start, end]) \
.filter(location = library)
distinct = numbers.values('dvsn_n').distinct().count()
total = numbers.values('dvsn_n').count()
data = chart_data(numbers, distinct, total, start, end, library)
return StreamingHttpResponse(data, content_type='application/json')
def top_division_type(request, library, person_type, start, end):
numbers = LibraryVisit.objects.values('dvsn_n') \
.annotate(total=Count('dvsn_n')) \
.order_by('-total') \
.filter(visit_time__range=[start, end]) \
.filter(location = library)
if person_type == 'student':
numbers = numbers.filter(Q(prsn_c_type = 'B') | Q(prsn_c_type = 'S'))
elif person_type == 'faculty':
numbers = numbers.filter(prsn_c_type = 'F')
elif person_type == 'staff':
numbers = numbers.filter(prsn_c_type = 'E')
distinct = numbers.values('dvsn_n').distinct().count()
total = numbers.values('dvsn_n').count()
data = chart_data(numbers, distinct, total, start, end, library)
return StreamingHttpResponse(data, content_type='application/json')
def calculate_dates(start, end):
start_date = datetime.strptime(start, '%Y-%m-%d').date()
end_date = datetime.strptime(end, '%Y-%m-%d').date()
date_delta = end_date - start_date
weeks = date_delta.days/7
data = {}
data['start_date'] = start_date
data['end_date'] = end_date
data['weeks'] = weeks
return data
def int_day(dow):
# This translates the MySQL days to dateutil days.
# In Django MySQL Monday = 2 but 0 in dateutil.
dow_ints = {
1 : 6,
2 : 0,
3 : 1,
4 : 2,
5 : 3,
6 : 4,
7 : 5
}
return int(dow_ints[dow])
def alph_day(dow):
# This translates the MySQL days to dateutil days.
# In Django MySQL Monday = 2 but 0 in dateutil.
dow_alphs = {
'1' : 'Sunday',
'2' : 'Monday',
'3' : 'Tuesday',
'4' : 'Wednesday',
'5' : 'Thursday',
'6' : 'Friday',
'7' : 'Saturday'
}
return dow_alphs[dow]
def total_averages(request, library, start, end, start_hour, end_hour, dow):
dates = calculate_dates(start, end)
count = 0
totals = 0
while (count <= dates['weeks']):
start_time = dates['start_date']+relativedelta(weeks=+count, hour=int(start_hour), weekday=int_day(int(dow)))
end_time = dates['start_date']+relativedelta(weeks=+count, hour=int(end_hour), weekday=int_day(int(dow)))
numbers = LibraryVisit.objects \
.values('visit_time') \
.annotate(total=Count('visit_time')) \
.filter(visit_time__range=[start_time, end_time])\
.filter(visit_time__week_day = dow) \
.filter(location = library)
for number in numbers:
if number['visit_time'].hour != end_hour:
totals += number['total']
count += 1
average = totals / count
jsonp = '{'
jsonp += '"start_date":"%s",' % start
jsonp += '"end_date":"%s",' % end
jsonp += '"start_hour":"%s",' | |
given an input.
Arguments:
inp:
the input.
clip:
clips the weights to positive values if set to '+' and to
negatives if set to '-'
save_output:
Whether to save the output in the node.
Returns:
the output of the node.
"""
assert inp is not None or self.from_node[0].output is not None
inp = self.from_node[0].output if inp is None else inp
if clip is None:
weights = self.weights
elif clip == '+':
weights = tf.clip_by_value(self.weights, 0, math.inf)
elif clip == '-':
weights = tf.clip_by_value(self.weights, -math.inf, 0)
else:
raise ValueError(f'Kernel clip value {clip} not recognised')
output = weights @ inp
if save_output:
self.output = output
return output
def forward_numpy(self, inp: np.array=None, save_output=False) -> np.array:
"""
Computes the output of the node given a numpy input.
Arguments:
inp:
the input.
save_output:
Whether to save the output in the node.
Returns:
the output of the node.
"""
output = self.weights.numpy() @ inp
if save_output is True:
self.output = output
return output
def transpose(self, inp: torch.tensor) -> torch.tensor:
"""
Computes the input to the node given an output.
Arguments:
inp:
the output.
Returns:
the input of the node.
"""
return inp @ self.weights
class Conv(Node):
def __init__(
self,
from_node: list,
to_node: list,
input_shape: tuple,
output_shape: tuple,
kernels: torch.tensor,
bias: torch.tensor,
padding: tuple,
strides: tuple,
config: Config,
depth=None,
bounds=Bounds(),
id=None
):
"""
Arguments:
from_node:
list of input nodes.
to_node:
list of output nodes.
input_shape:
shape of the input tensor to the node.
output_shape:
shape of the output tensor to the node.
kernels:
the kernels.
bias:
bias vector.
padding:
the padding.
strides:
the strides.
config:
configuration.
depth:
the depth of the node.
bounds:
the concrete bounds of the node.
"""
super().__init__(
from_node,
to_node,
input_shape,
output_shape,
config,
depth=depth,
bounds=bounds,
id=id
)
assert len(input_shape) in [3, 4]
self.kernels = kernels
self.bias = bias
self.padding = padding
self.strides = strides
if len(input_shape) == 3:
_, self.in_height, self.in_width = input_shape
_, self.out_height, self.out_width = output_shape
else:
_, _, self.in_height, self.in_width = input_shape
_, _, self.out_height, self.out_width = output_shape
self.out_ch, self.in_ch, self.krn_height, self.krn_width = kernels.shape
self.out_ch_sz = int(self.output_size / self.out_ch)
def copy(self):
"""
Copies the node.
"""
return Conv(
self.from_node,
self.to_node,
self.input_shape,
self.output_shape,
self.kernels,
self.bias,
self.padding,
self.strides,
self.config,
depth=self.depth,
bounds=self.bounds.copy(),
id=self.id
)
def numpy(self):
"""
Copies the node with numpy data.
"""
return Conv(
self.from_node,
self.to_node,
self.input_shape,
self.output_shape,
self.kernels.numpy(),
self.bias.numpy(),
self.padding,
self.strides,
self.config,
depth=self.depth,
bounds=self.bounds,
id=self.id
)
def get_milp_var_size(self):
"""
Returns the number of milp variables required for the milp encoding of
the node.
"""
return self.output_size
def get_bias(self, index: tuple):
"""
Returns the bias of the given output.
Arguments:
index:
the index of the output.
Returns:
the bias.
"""
return self.bias[index[-1]]
def edge_weight(self, index1: tuple, index2: tuple):
"""
Returns the weight of the edge between output with index1 of the
current node and output with index2 of the previous node.
Arguments:
index1:
index of the output of the current node.
index2:
index of the output of the previous node.
Returns:
the weight.
"""
height_start = index1[0] * self.strides[0] - self.padding[0]
height = index2[0] - height_start
width_start = index1[1] * self.strides[1] - self.padding[1]
width = index2[1] - width_start
return self.kernels[index1[0]][index2[0]][height][width]
return self.kernels[index1[0]][index2[0]][height][width]
def forward(
self,
inp: np.array=None,
clip=None,
add_bias=True,
save_output=False
) -> torch.tensor:
"""
Computes the output of the node given an input.
Arguments:
inp:
the input.
clip:
clips the weights to positive values if set to '+' and to
negatives if set to '-'
add_bias:
whether to add bias
save_output:
Whether to save the output in the node.
Returns:
the output of the node.
"""
assert inp is not None or self.from_node[0].output is not None
inp = self.from_node[0].output if inp is None else inp
if clip is None:
kernels = self.kernels
elif clip == '+':
kernels = torch.clamp(self.kernels, 0, math.inf)
elif clip == '-':
kernels = torch.clamp(self.kernels, -math.inf, 0)
else:
raise ValueError(f'Kernel clip value {kernel_clip} not recognised')
output = torch.nn.functional.conv2d(
inp,
kernels,
stride=self.strides,
padding=self.padding,
).flatten()
if add_bias is True:
output = output.flatten() + torch.tile(self.bias, (self.out_ch_sz, 1)).T.flatten()
output = output.reshape(self.output_shape)
if save_output:
self.output = output
return output
def forward_numpy(self, inp: np.array=None, save_output=False) -> np.array:
"""
Computes the output of the node given an input.
Arguments:
inp:
the input.
save_output:
Whether to save the output in the node.
Returns:
the output of the node.
"""
assert inp is not None or self.from_node[0].output is not None
inp = self.from_node[0].output if inp is None else inp
padded_inp = Conv.pad(inp, self.padding)
inp_strech = Conv.im2col(
padded_inp, (self.krn_height, self.krn_width), self.strides
)
kernel_strech = self.kernels.reshape(self.out_ch, -1).numpy()
output = kernel_strech @ inp_strech
output = output.flatten() + np.tile(self.bias.numpy(), (self.out_ch_sz, 1)).T.flatten()
output = output.reshape(self.output_shape)
if save_output is True:
self.output = output
return output
def transpose(self, inp: torch.tensor) -> torch.tensor:
"""
Computes the input to the node given an output.
Arguments:
inp:
the output.
Returns:
the input of the node.
"""
out_pad_height = self.in_height - (self.out_height - 1) * self.strides[0] + 2 * self.padding[0] - self.krn_height
out_pad_width = self.in_width - (self.out_width - 1) * self.strides[0] + 2 * self.padding[0] - self.krn_width
return torch.nn.functional.conv_transpose2d(
inp.reshape((inp.shape[0], self.out_ch, self.out_height, self.out_width)),
self.kernels,
stride=self.strides,
padding=self.padding,
output_padding=(out_pad_height, out_pad_width)
).reshape(inp.shape[0], - 1)
# return tf.nn.conv2d_transpose(
# inp.reshape((inp.shape[0],) + self.output_shape),
# self.kernels.transpose(2, 3, 1, 0),
# (inp.shape[0], ) + self.input_shape,
# self.strides,
# padding = [
# [0, 0],
# [self.padding[0], self.padding[0]],
# [self.padding[1], self.padding[1]],
# [0, 0]
# ]
# ).numpy().reshape(inp.shape[0], -1)
def get_non_pad_idxs(self) -> torch.tensor:
"""
Returns:
Indices of the original input whithin the padded one.
"""
if len(self.input_shape) == 3:
in_ch, height, width = self.input_shape
else:
_, in_ch, height, width = self.input_shape
pad_height, pad_width = self.padding
size = self.get_input_padded_size()
non_pad_idxs = torch.arange(size, dtype=torch.long).reshape(
self.in_ch,
height + 2 * pad_height,
width + 2 * pad_width
)[:, pad_height:height + pad_height, pad_width :width + pad_width].flatten()
return non_pad_idxs
def get_input_padded_size(self) -> int:
"""
Returns:
Size of the padded input.
"""
return (self.in_height + 2 * self.padding[0]) * (self.in_width + 2 * self.padding[1]) * self.in_ch
def get_input_padded_shape(self) -> tuple:
"""
Computes the shape of padded input.
"""
return (
self.in_ch,
self.in_height + 2 * self.padding[0],
self.in_width + 2 * self.padding[1],
)
@staticmethod
def compute_output_shape(in_shape: tuple, weights_shape: tuple, padding: tuple, strides: tuple) -> tuple:
"""
Computes the output shape of a convolutional layer.
Arguments:
in_shape:
shape of the input tensor to the layer.
weights_shape:
shape of the kernels of the layer.
padding:
pair of int for the width and height of the padding.
strides:
pair of int for the width and height of the strides.
Returns:
tuple of the output shape
"""
assert len(in_shape) in [3, 4]
if len(in_shape) == 3:
in_ch, in_height, in_width = in_shape
else:
batch, in_ch, in_height, in_width = in_shape
out_ch, _, k_height, k_width = weights_shape
out_height = int(math.floor(
(in_height - k_height + 2 * padding[0]) / strides[0] + 1
))
out_width = int(math.floor(
(in_width - k_width + 2 * padding[1]) / strides[1] + 1
))
if len(in_shape) == 3:
return out_ch, out_height, out_width
else:
return batch, out_ch, out_height, out_width
@staticmethod
def pad(inp: torch.tensor, padding: tuple, values: tuple=(0,0)) -> torch.tensor:
"""
Pads a given matrix with constants.
Arguments:
inp:
matrix.
padding:
the padding.
values:
the constants.
Returns
padded inp.
"""
assert(len(inp.shape)) in [3, 4]
if padding == (0, 0):
return inp
if len(inp.shape) == 3:
padding = ((0, 0), padding, padding)
else:
padding = ((0, 0), (0, 0), padding, padding)
return np.pad(inp, padding, 'constant', constant_values=values)
@staticmethod
def im2col(matrix: torch.tensor, kernel_shape: tuple, strides: tuple, indices: bool=False) -> torch.tensor:
"""
im2col function.
Arguments:
matrix:
The matrix.
kernel_shape:
The kernel shape.
strides:
The strides of the convolution.
indices:
Whether to select indices (true) or values (false) of the matrix.
Returns:
im2col matrix
"""
assert len(matrix.shape) in [3, 4], f"{len(matrix.shape)}-D is not supported."
assert type(matrix) in [torch.Tensor, np.ndarray], f"{type(matrix)} matrices are not supported."
opr = torch if isinstance(matrix, torch.Tensor) else np
filters, | |
<reponame>TeamICSTECHNOS/DOCoMETRe
from py4j.clientserver import ClientServer, JavaParameters, PythonParameters;
import sys;
import os;
import numpy;
import io;
import re;
import time;
class DOCoMETRe(object):
#global experiments;
global jvmMode;
def __init__(self, gateway):
self.gateway = gateway;
self.experiments = dict();
if(jvmMode): gateway.jvm.System.out.println("In __init__ gateway");
def shutDownServer(self, object):
if(jvmMode): self.gateway.jvm.System.out.println("In shutdown server");
pass;
def toString(self):
if(jvmMode): self.gateway.jvm.System.out.println("In toString");
return "This is DOCoMETRe Python Entry Point";
def loadData(self, dataType, loadName, dataFilesList, sessionsProperties):
if(dataType == "DOCOMETRE"):
self.loadDataDocometre(loadName, dataFilesList, sessionsProperties);
else:
pass;
def loadDataDocometre(self, loadName, dataFilesList, sessionsProperties):
if(jvmMode): self.gateway.jvm.System.out.println("In loadDataDocometre");
if ".sau" in dataFilesList:
if(jvmMode): self.gateway.jvm.System.out.println("For now, sau files are not handled with Python");
elif ".samples" in dataFilesList:
self.loadDataDocometreSAMPLES(loadName, dataFilesList, sessionsProperties);
elif ".adw" in dataFilesList:
if(jvmMode): self.gateway.jvm.System.out.println("For now, ADW files are not handled with Python");
else:
if(jvmMode): self.gateway.jvm.System.out.println("Data files format not hanled with Python");
def loadDataDocometreSAMPLES(self, loadName, dataFilesList, sessionsProperties):
if(jvmMode): self.gateway.jvm.System.out.println("In loadDataDocometreSAMPLES");
# if(jvmMode): self.gateway.jvm.System.out.println(sessionsProperties)
prefix_QN = "_DATA_FILES_NAMES_PREFIX";
baseTrialsNumber_QN = "_BASE_TRIALS_NUMBER";
createdCategories = dict();
createdChannels = list();
dataFiles = dataFilesList.split(";");
nbDataFiles = len(dataFiles);
maximumSamples = sessionsProperties["MAXIMUM_SAMPLES"];
totalTrialsNumber = sessionsProperties["TOTAL_TRIALS_NUMBER"];
for n in range(0, nbDataFiles):
segments = dataFiles[n].split(os.path.sep);
# Get session name for criteria, trial number and prefix
sessionName = segments[len(segments) - 3];
trialName = segments[len(segments) - 2];
key = os.path.dirname(os.path.abspath(dataFiles[n]))
process = sessionsProperties[key + "_PROCESS"];
# if(jvmMode): self.gateway.jvm.System.out.println("nbDataFiles : " + str(nbDataFiles) + " fichier : " + str(n+1) + " sessionName : " + sessionName + " process : " + process + " - " + dataFiles[n]);
criteria = sessionName + "." + process;
if sessionName + prefix_QN in sessionsProperties:
if sessionsProperties[sessionName + prefix_QN] != "":
criteria = sessionsProperties[sessionName + prefix_QN];
criteria = criteria + "." + sessionName + "." + process;
# if(jvmMode): self.gateway.jvm.System.out.println("nbDataFiles : " + str(nbDataFiles) + " fichier : " + str(n+1) + " critere : " + criteria);
# if(jvmMode): self.gateway.jvm.System.out.println(str(n) + " - " + dataFiles[n]);
trialNumber = trialName.split("\u00b0")[1];
system = sessionsProperties[key + "_SYSTEM"];
baseTrialsNumber = sessionsProperties[sessionName + baseTrialsNumber_QN];
trialNumber = str(int(baseTrialsNumber) + int(trialNumber));
# Get channel's name
fileName = segments[len(segments) - 1];
fileNameSegments = fileName.split('.');
channelName = fileNameSegments[0];
if sessionName + prefix_QN in sessionsProperties:
if sessionsProperties[sessionName + prefix_QN] != "":
channelName = fileNameSegments[1];
if criteria in createdCategories:
append = False;
trialsList = createdCategories[criteria];
if isinstance(trialsList, numpy.ndarray):
if int(trialNumber) not in trialsList:
append = True;
#else:
# if int(trialNumber) != trialsList:
# append = True;
if append:
trialsList = numpy.append(trialsList, int(trialNumber));
#if(jvmMode): self.gateway.jvm.System.out.println(ListConverter().convert(trialsList.tolist(), gateway._gateway_client));
else:
trialsList = numpy.array(int(trialNumber));
#if(jvmMode): self.gateway.jvm.System.out.println(trialsList);
createdCategories[criteria] = trialsList;
if channelName not in createdChannels:
createdChannels.append(channelName);
sampleFrequency = sessionsProperties[channelName + "_SF"];
self.experiments[loadName + "." + channelName + "." + "SampleFrequency"] = sampleFrequency;
self.experiments[loadName + "." + channelName + "." + "isSignal"] = "1";
self.experiments[loadName + "." + channelName + "." + "isCategory"] = "0";
self.experiments[loadName + "." + channelName + "." + "isEvent"] = "0";
self.experiments[loadName + "." + channelName + "." + "NbFeatures"] = "0";
self.experiments[loadName + "." + channelName + "." + "NbMarkersGroups"] = "0";
self.experiments[loadName + "." + channelName + "." + "Values"] = numpy.zeros((int(totalTrialsNumber), int(maximumSamples)));
self.experiments[loadName + "." + channelName + "." + "NbSamples"] = numpy.zeros(int(totalTrialsNumber));
self.experiments[loadName + "." + channelName + "." + "FrontCut"] = numpy.zeros(int(totalTrialsNumber));
self.experiments[loadName + "." + channelName + "." + "EndCut"] = numpy.zeros(int(totalTrialsNumber));
# Read data
data = numpy.fromfile(dataFiles[n], dtype="float32");
if system == "Arduino UNO":
data = data[1::2];
sizeData = len(data);
#if(jvmMode): self.gateway.jvm.System.out.println("sizeData : " + str(sizeData));
key = loadName + "." + channelName + ".NbSamples";
self.experiments[key][int(trialNumber) - 1] = sizeData;
key = loadName + "." + channelName + ".FrontCut";
self.experiments[key][int(trialNumber) - 1] = 0;
key = loadName + "." + channelName + ".EndCut";
self.experiments[key][int(trialNumber) - 1] = sizeData;
values = self.experiments[loadName + "." + channelName + ".Values"];
#if(jvmMode): self.gateway.jvm.System.out.println("size values : " + str(len(values[int(trialNumber) - 1])));
values[int(trialNumber) - 1][0:sizeData] = data;
n = 1;
for criteria in createdCategories:
values = createdCategories[criteria];
self.experiments[loadName + ".Category" + str(n) + ".Criteria"] = criteria;
self.experiments[loadName + ".Category" + str(n) + ".TrialsList"] = values;
self.experiments[loadName + ".Category" + str(n) + ".isSignal"] = "0";
self.experiments[loadName + ".Category" + str(n) + ".isCategory"] = "1";
self.experiments[loadName + ".Category" + str(n) + ".isEvent"] = "0";
n = n + 1;
def evaluate(self, expression):
# if(jvmMode): self.gateway.jvm.System.out.println("Evaluate : " + expression);
return str(eval(expression));
def unload(self, fullName):
exec("import re;docometre.experiments = {k:v for k,v in docometre.experiments.items() if re.search('^" + fullName + "', k) == None}");
def getChannels(self, subjectFullName):
channels = list({k:v for k,v in self.experiments.copy().items() if re.search("^" + subjectFullName + "\.\w+\.isSignal$", k)});
channels = [re.sub("\.\w+$", "", channel) for channel in channels];
channels = [re.sub("^\w+\.\w+\.", "", channel) for channel in channels];
return ",".join(channels);
def getVector(self, expression, dataType, trialNumber, frontCut, endCut):
values = eval(expression);
if(trialNumber > -1):
values = values[trialNumber][frontCut:endCut];
arrayValues = numpy.array(values);
arrayValues = arrayValues.astype(dataType);
return arrayValues.tobytes();
def runScript(self, code):
if(jvmMode): gateway.jvm.System.out.println("In runScript : " + code);
exec(code);
def saveSubject(self, fullSubjectNameRegExp, dataFilesFullPath):
subject = {k:v for k,v in self.experiments.items() if re.search(fullSubjectNameRegExp, k) != None}
ndArrayFileNumber = 1;
file = open(dataFilesFullPath + 'save.data','w');
for key,value in subject.items():
# Remove Experiment.Subject prefix
newKey = re.sub("^\<KEY> "", key);
if isinstance(value, str):
file.write(newKey);
file.write('\n');
file.write('str("' + value + '")');
file.write('\n');
elif isinstance(value, int):
file.write(newKey);
file.write('\n');
file.write('int(' + str(value) + ')');
file.write('\n');
elif isinstance(value, numpy.ndarray):
fileName = 'data_' + str(ndArrayFileNumber) + '.numpy';
ndArrayFileNumber = ndArrayFileNumber + 1;
file.write(newKey);
file.write('\n');
file.write('numpy.ndarray:' + fileName);
file.write('\n');
ndArrayFile = open(dataFilesFullPath + fileName,'wb');
numpy.save(ndArrayFile, value, False, False);
ndArrayFile.close();
elif isinstance(value, list) and all(isinstance(n, str) for n in value):
file.write(newKey);
file.write('\n');
file.write('list.str(' + ':'.join(value) + ')');
file.write('\n');
else:
if(jvmMode):
self.gateway.jvm.System.out.print("Type not handled : " + type(value).__name__);
self.gateway.jvm.System.out.println(" for key : " + key);
else: print("Type not handled : " + type(value) + " for key : " + key);
file.close();
def loadSubject(self, saveFilesFullPath):
previousWD = os.getcwd();
try:
subject = dict();
segments = saveFilesFullPath.split(os.path.sep);
experimentName = segments[len(segments) - 3];
subjectName = segments[len(segments) - 2];
currentWD = previousWD + os.path.sep + experimentName + os.path.sep + subjectName;
os.chdir(currentWD);
prefixKey = experimentName + "." + subjectName + ".";
file = open('save.data','r');
key = file.readline().strip();
while key:
# Add Experiment.Subject prefix
key = prefixKey + key;
value = file.readline().strip();
if(value.startswith('str(')):
subject[key] = eval(value);
elif(value.startswith('int(')):
subject[key] = eval(value);
elif(value.startswith('numpy.ndarray:')):
fileName = value.replace('numpy.ndarray:', '');
ndArrayFile = open(fileName,'rb');
value = numpy.load(ndArrayFile, None);
ndArrayFile.close();
subject[key] = value;
elif(value.startswith('list.str')):
value = re.sub("^list\.str\(", "", value);
value = re.sub("\)$", "", value);
subject[key] = value.split(":");
key = file.readline().strip();
self.experiments.update(subject);
finally:
os.chdir(previousWD);
if(jvmMode):
self.gateway.jvm.System.out.println("WD : " + os.getcwd());
def rename(self, keyRegExp, keyReplace):
subDict = {k:v for k,v in docometre.experiments.items() if re.search(keyRegExp, k) != None}
for key,value in subDict.items():
newKey = re.sub(keyRegExp, keyReplace, key);
docometre.experiments[newKey] = docometre.experiments.pop(key);
subDict = {k:v for k,v in docometre.experiments.items() if re.search(keyRegExp, k) != None}
keyReplace = "^" + re.sub("\.", "\.", keyReplace);
subDict2 = {k:v for k,v in docometre.experiments.items() if re.search(keyReplace, k) != None}
#if(jvmMode):
# self.gateway.jvm.System.out.println("Is dict empty : " + str(any(subDict)) + " for " + keyRegExp);
# self.gateway.jvm.System.out.println("Is dict empty : " + str(any(subDict2)) + " for " + keyReplace);
return (not(any(subDict)) and any(subDict2))
def getLoadedSubjects(self):
keys = {k for k,v in docometre.experiments.items() if re.search('^\w+\.\w+\.\w+\.isSignal', k) != None};
loadedSubjects = set();
for key in keys:
newKey = re.sub("\.\w+\.isSignal$", "", key);
loadedSubjects.add(newKey);
if(len(loadedSubjects) > 0):
return ":".join(loadedSubjects);
return "";
class Java:
implements = ["fr.univamu.ism.docometre.python.PythonEntryPoint"]
if __name__ == "__main__":
print("Current working folder : " + os.getcwd());
jvmMode = True;
#experiments = dict();
if(len(sys.argv) > 1):
if(sys.argv[1] == "-jvm"):
gateway = ClientServer(java_parameters = JavaParameters(), python_parameters = PythonParameters());
docometre = DOCoMETRe(gateway);
gateway.entry_point.setPythonEntryPoint(docometre);
else:
jvmMode = False;
else:
jvmMode = False;
if(not jvmMode):
print("We are not in JVM mode");
jvmMode = False;
docometre = DOCoMETRe(None);
loadName = "ReachabilityCoriolis.PreTestFull";
fileHandle = io.open("./tests/dataFilesList.txt", "r", encoding="utf-8");
dataFilesList = fileHandle.read();
fileHandle.close();
fileHandle = io.open("./tests/sessionsProperties.txt", "r", encoding="utf-8");
sessionPropertiesString = fileHandle.read();
sessionProperties = eval(sessionPropertiesString);
fileHandle.close();
docometre.loadData("DOCOMETRE", loadName, dataFilesList, sessionProperties);
# Some infos
print(docometre.experiments["ReachabilityCoriolis.PreTestFull.Category1.Criteria"]);
print(docometre.experiments["ReachabilityCoriolis.PreTestFull.Category1.isSignal"]);
print(docometre.experiments["ReachabilityCoriolis.PreTestFull.Category1.isCategory"]);
print(docometre.experiments["ReachabilityCoriolis.PreTestFull.Category1.isEvent"]);
print(docometre.experiments["ReachabilityCoriolis.PreTestFull.Category1.TrialsList"]);
print(21 in docometre.experiments["ReachabilityCoriolis.PreTestFull.Category1.TrialsList"]);
# Test if subject is loaded
filteredDictionnary = {k:v for k,v in docometre.experiments.items() if re.search("^ReachabilityCoriolis\.PreTestFull\.", k)};
testLoaded = len(filteredDictionnary) > 0;
print(testLoaded);
# Test evaluate 1
# expression = "len({k:v for k,v in docometre.experiments.items() if re.search(\"^" + "ReachabilityCoriolis\.PreTestFull" + "\.\", k)})";
#response = docometre.evaluate(expression);
# print(response);
# Test evaluate 1
# expression = "len({k:v for k,v in docometre.experiments.items() if re.search(\"^" + "ReachabilityCoriolis\.PreTestFull" + "\.\", k)}) > 0";
# response = docometre.evaluate(expression);
# print(response);
# Unload subject
#print(docometre.experiments);
#docometre.unload("ReachabilityCoriolis\.PreTestFull");
#print(docometre.experiments);
# Get channels, signals, categories or events names
keys = docometre.experiments.keys();
signals = list({k:v for k,v in docometre.experiments.items() if re.search("isSignal$", k) and v == "1"});
signals = [re.sub("\.\w+$", "", signal) for signal in signals];
signals = [re.sub("^\w+\.\w+\.", "", signal) for signal in signals];
categories = list({k:v for k,v in docometre.experiments.items() if re.search("isCategory$", k) and v == "1"});
categories = [re.sub("\.\w+$", "", category) for category in categories];
categories = [re.sub("^\w+\.\w+\.", "", category) for category in categories];
events = list({k:v for k,v in docometre.experiments.items() if re.search("isEvent$", k) and v == "1"});
events = [re.sub("\.\w+$", "", event) for event in events];
events = [re.sub("^\w+\.\w+\.", "", event) for event in events];
channels = list({k:v for k,v in docometre.experiments.items() if re.search("isSignal$", k)});
channels = [re.sub("\.\w+$", "", channel) for channel in channels];
channels = [re.sub("^\w+\.\w+\.", "", channel) for channel in channels];
# channels = signals + categories + events;
print(signals);
print(categories);
print(events);
print(channels);
channels = docometre.getChannels("ReachabilityCoriolis.PreTestFull");
print(channels);
values = docometre.getVector("docometre.experiments[\"ReachabilityCoriolis.PreTestFull.Category1.TrialsList\"]", "i", -1, -1, -1);
print(values);
nbTrials = len(docometre.experiments["ReachabilityCoriolis.PreTestFull.CAN_FrameID.Values"]);
print(nbTrials);
values = docometre.getVector("docometre.experiments[\"ReachabilityCoriolis.PreTestFull.CAN_FrameID.Values\"]", "f", 0, 0, 100);
print(values);
# Unload channel
docometre.unload("ReachabilityCoriolis\.PreTestFull\.CAN_FrameID")
channels = docometre.getChannels("ReachabilityCoriolis.PreTestFull");
print(channels);
# Save Subject
startTime = time.time();
docometre.saveSubject("^ReachabilityCoriolis\.PreTestFull", "./tests/data/")
print("Time to save subject :" + str(time.time() - startTime));
# Unload subject
docometre.unload("ReachabilityCoriolis\.PreTestFull");
print(docometre.experiments);
# Load Subject
startTime = time.time();
#os.chdir(os.getcwd() + '/scripts')
docometre.loadSubject("./tests/data/");
# print(docometre.experiments);
print("Time to load subject : " + str(time.time() - startTime));
loadedSubjects = docometre.getLoadedSubjects();
print(loadedSubjects);
# Rename
startTime = time.time();
docometre.rename("^tests\.data", "ExperimentName.SubjectName");
print("Time to rename : " + str(time.time() - startTime));
#print(docometre.experiments)
subject = {k:v for k,v in docometre.experiments.items() if re.search("^tests\.data", k) != None}
#print(subject)
# Compute mean using mask
#docometre.experiments["ExperimentName.SubjectName.CAN_Marker1_X.NbFeatures"] = NbFeatures + 1;
values = docometre.experiments["ExperimentName.SubjectName.CAN_Marker1_X.Values"];
fromValues = docometre.experiments["ExperimentName.SubjectName.CAN_Marker1_X.FrontCut"];
toValues = docometre.experiments["ExperimentName.SubjectName.CAN_Marker1_X.EndCut"];
columns = numpy.arange(values.shape[1]).reshape(-1,1);
fromValues = numpy.zeros(values.shape[0]);
#if "From_Beginning" != "fromInputSignal" : fromValues = docometre.experiments["ExperimentName.SubjectName.CAN_Marker1_X.FrontCut"];
toValues = values.shape[1]*numpy.ones(values.shape[0]);
#if "To_End" != "toInputSignal" : toValues = docometre.experiments["ExperimentName.SubjectName.CAN_Marker1_X.EndCut"];
mask = (fromValues | |
Whether this site is enabled for authentication with Bot Framework.
:param Sequence[str] trusted_origins: List of Trusted Origin URLs for this site. This field is applicable only if isSecureSiteEnabled is True.
"""
pulumi.set(__self__, "is_enabled", is_enabled)
pulumi.set(__self__, "is_v1_enabled", is_v1_enabled)
pulumi.set(__self__, "is_v3_enabled", is_v3_enabled)
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "key2", key2)
pulumi.set(__self__, "site_id", site_id)
pulumi.set(__self__, "site_name", site_name)
if is_secure_site_enabled is not None:
pulumi.set(__self__, "is_secure_site_enabled", is_secure_site_enabled)
if trusted_origins is not None:
pulumi.set(__self__, "trusted_origins", trusted_origins)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> bool:
"""
Whether this site is enabled for DirectLine channel.
"""
return pulumi.get(self, "is_enabled")
@property
@pulumi.getter(name="isV1Enabled")
def is_v1_enabled(self) -> bool:
"""
Whether this site is enabled for Bot Framework V1 protocol.
"""
return pulumi.get(self, "is_v1_enabled")
@property
@pulumi.getter(name="isV3Enabled")
def is_v3_enabled(self) -> bool:
"""
Whether this site is enabled for Bot Framework V1 protocol.
"""
return pulumi.get(self, "is_v3_enabled")
@property
@pulumi.getter
def key(self) -> str:
"""
Primary key. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def key2(self) -> str:
"""
Secondary key. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "key2")
@property
@pulumi.getter(name="siteId")
def site_id(self) -> str:
"""
Site Id
"""
return pulumi.get(self, "site_id")
@property
@pulumi.getter(name="siteName")
def site_name(self) -> str:
"""
Site name
"""
return pulumi.get(self, "site_name")
@property
@pulumi.getter(name="isSecureSiteEnabled")
def is_secure_site_enabled(self) -> Optional[bool]:
"""
Whether this site is enabled for authentication with Bot Framework.
"""
return pulumi.get(self, "is_secure_site_enabled")
@property
@pulumi.getter(name="trustedOrigins")
def trusted_origins(self) -> Optional[Sequence[str]]:
"""
List of Trusted Origin URLs for this site. This field is applicable only if isSecureSiteEnabled is True.
"""
return pulumi.get(self, "trusted_origins")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EmailChannelPropertiesResponse(dict):
"""
The parameters to provide for the Email channel.
"""
def __init__(__self__, *,
email_address: str,
is_enabled: bool,
password: str):
"""
The parameters to provide for the Email channel.
:param str email_address: The email address
:param bool is_enabled: Whether this channel is enabled for the bot
:param str password: The password for the email address. Value only returned through POST to the action Channel List API, otherwise empty.
"""
pulumi.set(__self__, "email_address", email_address)
pulumi.set(__self__, "is_enabled", is_enabled)
pulumi.set(__self__, "password", password)
@property
@pulumi.getter(name="emailAddress")
def email_address(self) -> str:
"""
The email address
"""
return pulumi.get(self, "email_address")
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> bool:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@property
@pulumi.getter
def password(self) -> str:
"""
The password for the email address. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "password")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EmailChannelResponse(dict):
"""
Email channel definition
"""
def __init__(__self__, *,
channel_name: str,
properties: Optional['outputs.EmailChannelPropertiesResponse'] = None):
"""
Email channel definition
:param str channel_name: The channel name
Expected value is 'EmailChannel'.
:param 'EmailChannelPropertiesResponseArgs' properties: The set of properties specific to email channel resource
"""
pulumi.set(__self__, "channel_name", 'EmailChannel')
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> str:
"""
The channel name
Expected value is 'EmailChannel'.
"""
return pulumi.get(self, "channel_name")
@property
@pulumi.getter
def properties(self) -> Optional['outputs.EmailChannelPropertiesResponse']:
"""
The set of properties specific to email channel resource
"""
return pulumi.get(self, "properties")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EnterpriseChannelNodeResponse(dict):
"""
The properties specific to an Enterprise Channel Node.
"""
def __init__(__self__, *,
azure_location: str,
azure_sku: str,
id: str,
name: str,
state: Optional[str] = None):
"""
The properties specific to an Enterprise Channel Node.
:param str azure_location: The location of the Enterprise Channel Node.
:param str azure_sku: The sku of the Enterprise Channel Node.
:param str id: Id of Enterprise Channel Node. This is generated by the Bot Framework.
:param str name: The name of the Enterprise Channel Node.
:param str state: The current state of the Enterprise Channel Node.
"""
pulumi.set(__self__, "azure_location", azure_location)
pulumi.set(__self__, "azure_sku", azure_sku)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
if state is not None:
pulumi.set(__self__, "state", state)
@property
@pulumi.getter(name="azureLocation")
def azure_location(self) -> str:
"""
The location of the Enterprise Channel Node.
"""
return pulumi.get(self, "azure_location")
@property
@pulumi.getter(name="azureSku")
def azure_sku(self) -> str:
"""
The sku of the Enterprise Channel Node.
"""
return pulumi.get(self, "azure_sku")
@property
@pulumi.getter
def id(self) -> str:
"""
Id of Enterprise Channel Node. This is generated by the Bot Framework.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the Enterprise Channel Node.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def state(self) -> Optional[str]:
"""
The current state of the Enterprise Channel Node.
"""
return pulumi.get(self, "state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EnterpriseChannelPropertiesResponse(dict):
"""
The parameters to provide for the Enterprise Channel.
"""
def __init__(__self__, *,
nodes: Sequence['outputs.EnterpriseChannelNodeResponse'],
state: Optional[str] = None):
"""
The parameters to provide for the Enterprise Channel.
:param Sequence['EnterpriseChannelNodeResponseArgs'] nodes: The nodes associated with the Enterprise Channel.
:param str state: The current state of the Enterprise Channel.
"""
pulumi.set(__self__, "nodes", nodes)
if state is not None:
pulumi.set(__self__, "state", state)
@property
@pulumi.getter
def nodes(self) -> Sequence['outputs.EnterpriseChannelNodeResponse']:
"""
The nodes associated with the Enterprise Channel.
"""
return pulumi.get(self, "nodes")
@property
@pulumi.getter
def state(self) -> Optional[str]:
"""
The current state of the Enterprise Channel.
"""
return pulumi.get(self, "state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class FacebookChannelPropertiesResponse(dict):
"""
The parameters to provide for the Facebook channel.
"""
def __init__(__self__, *,
app_id: str,
app_secret: str,
callback_url: str,
is_enabled: bool,
verify_token: str,
pages: Optional[Sequence['outputs.FacebookPageResponse']] = None):
"""
The parameters to provide for the Facebook channel.
:param str app_id: Facebook application id
:param str app_secret: Facebook application secret. Value only returned through POST to the action Channel List API, otherwise empty.
:param str callback_url: Callback Url
:param bool is_enabled: Whether this channel is enabled for the bot
:param str verify_token: Verify token. Value only returned through POST to the action Channel List API, otherwise empty.
:param Sequence['FacebookPageResponseArgs'] pages: The list of Facebook pages
"""
pulumi.set(__self__, "app_id", app_id)
pulumi.set(__self__, "app_secret", app_secret)
pulumi.set(__self__, "callback_url", callback_url)
pulumi.set(__self__, "is_enabled", is_enabled)
pulumi.set(__self__, "verify_token", verify_token)
if pages is not None:
pulumi.set(__self__, "pages", pages)
@property
@pulumi.getter(name="appId")
def app_id(self) -> str:
"""
Facebook application id
"""
return pulumi.get(self, "app_id")
@property
@pulumi.getter(name="appSecret")
def app_secret(self) -> str:
"""
Facebook application secret. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "app_secret")
@property
@pulumi.getter(name="callbackUrl")
def callback_url(self) -> str:
"""
Callback Url
"""
return pulumi.get(self, "callback_url")
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> bool:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@property
@pulumi.getter(name="verifyToken")
def verify_token(self) -> str:
"""
Verify token. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "verify_token")
@property
@pulumi.getter
def pages(self) -> Optional[Sequence['outputs.FacebookPageResponse']]:
"""
The list of Facebook pages
"""
return pulumi.get(self, "pages")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class FacebookChannelResponse(dict):
"""
Facebook channel definition
"""
def __init__(__self__, *,
channel_name: str,
properties: Optional['outputs.FacebookChannelPropertiesResponse'] = None):
"""
Facebook channel definition
:param str channel_name: The channel name
Expected value is 'FacebookChannel'.
:param 'FacebookChannelPropertiesResponseArgs' properties: The set of properties specific to bot facebook channel
"""
pulumi.set(__self__, "channel_name", 'FacebookChannel')
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> str:
"""
The channel name
Expected value is 'FacebookChannel'.
"""
return pulumi.get(self, "channel_name")
@property
@pulumi.getter
def properties(self) -> Optional['outputs.FacebookChannelPropertiesResponse']:
"""
The set of properties specific to bot facebook channel
"""
return pulumi.get(self, "properties")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class FacebookPageResponse(dict):
"""
A Facebook page for Facebook channel registration
"""
def __init__(__self__, *,
access_token: str,
id: str):
"""
A Facebook page for Facebook channel registration
:param str access_token: Facebook application access token. Value only returned through POST to the action Channel List API, otherwise empty.
:param str id: Page id
"""
pulumi.set(__self__, "access_token", access_token)
pulumi.set(__self__, "id", id)
@property
@pulumi.getter(name="accessToken")
def access_token(self) -> str:
"""
Facebook application access token. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "access_token")
@property
@pulumi.getter
def id(self) -> str:
"""
Page id
"""
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or | |
status = request_status (mac)
nic1 = blade_nic_info (mac)
nics.append (response_category ("NicInfo", status = status, results = [nic1]))
if (check_success (mac)):
status = request_status (set_failure_dict ("Not Present", "Success"))
nic2 = blade_nic_info ()
nics.append (response_category ("NicInfo", status = status, results = [nic2]))
return nics
def __init__ (self, nic = None):
"""
Initialize the blade NIC information.
:param nic: The result of the system query for blade NIC information.
"""
if (nic):
self.id = "1" if check_success (nic) else "0"
self.mac = nic.get ("MAC1", None)
else:
self.id = "2"
self.mac = None
def format (self, parent):
"""
Format the blade NIC information in the XML document.
:param parent: The parent element that will contain the NIC information.
:return The parent element and a status flag for the response.
"""
add_element (parent, "deviceId", self.id)
add_element (parent, "macAddress", self.mac)
return (parent, NO_STATUS)
class blade_default_power_state:
"""
Response object for the blade default power state.
"""
def __init__ (self, state):
"""
Initialize the blade default power state response.
:param state: The result of the system query for the blade default power state.
"""
self.state = state.get ("Default Power State", "NA")
def format (self, parent):
"""
Format the blade default power state in the XML document.
:param parent: The parent element that will contain the default power state.
:return The parent element and a status flag for the response.
"""
add_element (parent, "bladeState", self.state)
return (parent, NO_STATUS)
class chassis_controller_info:
"""
Response object for the chassis contorller information.
"""
def __init__ (self, info):
"""
Initialize the controller information.
:param info: The result of the system query for controller information.
"""
self.serial = info.get ("Board Serial", None)
self.asset_tag = info.get ("Product Assettag", None)
self.fw = "NA" if (check_success (info)) else None
self.hw = info.get ("Board Version", None)
self.sw = info.get ("Package", None)
self.uptime = info.get ("Up Time", None)
def format (self, parent):
"""
Format the chassis controller information in the XML document.
:param parent: The parent element that will contain the controller information.
:return The parent element and a status flag for the response.
"""
add_element (parent, "serialNumber", self.serial)
add_element (parent, "assetTag", self.asset_tag)
add_element (parent, "firmwareVersion", self.fw)
add_element (parent, "hardwareVersion", self.hw)
add_element (parent, "softwareVersion", self.sw)
add_element (parent, "systemUptime", self.uptime)
return (parent, NO_STATUS)
class chassis_network_info:
"""
Response object for a chassis network interface.
"""
@staticmethod
def build_network_property (nic):
"""
Create a response object for a single chassis network interface.
:param nic: The result of the system query for the NIC.
:return The NIC response object.
"""
status = request_status (nic)
eth = chassis_network_info (nic)
return response_category ("ChassisNetworkProperty", status = status, results = [eth])
@staticmethod
def build_network_property_collection (*nics):
"""
Create a response object for a collection of network interfaces.
:param nics: The results of the system queries for the NICs that should be in the
collection.
:return The network property collection response object.
"""
collection = []
for nic in nics:
collection.append (chassis_network_info.build_network_property (nic))
return response_category ("chassisNetworkPropertyCollection", results = collection)
@staticmethod
def get_network_properties (*nics):
"""
Create a response object for the chassis network properties.
:param nics: The list of results from the system queries for the NICs that will be reported.
:return The network properties response object.
"""
network = chassis_network_info.build_network_property_collection (*nics)
return response_category ("networkProperties",
status = request_status.get_placeholder_status (), results = [network])
def __init__ (self, info):
"""
Initialize the network interface information.
:param info: The result of the system query for network information.
"""
ip4 = info.get ("IPv4Addresses", {})
ip6 = info.get ("IPv6Addresses", {})
self.mac = info.get ("MACAddress", None)
self.ip4 = ip4.get ("Address", None)
self.subnet = ip4.get ("SubnetMask", None)
self.gateway = ip4.get ("Gateway", None)
self.ip6 = ip6.get ("Address", None)
self.prefix = ip6.get ("PrefixLength", None)
self.hostname = info.get ("Hostname", None)
self.dhcp = ip4.get ("AddressOrigin", None)
if (self.dhcp):
self.dhcp = "true" if (self.dhcp == "DHCP") else "false"
if (self.ip4 and self.ip6):
self.ip = "{0}, {1}".format (self.ip4, self.ip6)
elif (self.ip4):
self.ip = self.ip4
elif (self.ip6):
self.ip = self.ip6
else:
self.ip = None
if (self.subnet and self.prefix):
self.mask = "{0}, {1}".format (self.subnet, self.prefix)
elif (self.subnet):
self.mask = self.subnet
elif (self.prefix):
self.mask = self.prefix
else:
self.mask = None
def format (self, parent):
"""
Format the chassis network interface information in the XML document.
:param parent: The parent element that will contain the network interface information.
:return The parent element and a status flag for the response.
"""
add_element (parent, "macAddress", self.mac)
add_element (parent, "ipAddress", self.ip)
add_element (parent, "subnetMask", self.mask)
add_element (parent, "gatewayAddress", self.gateway)
ElementTree.SubElement (parent, "dnsAddress")
ElementTree.SubElement (parent, "dhcpServer", {"i:nil" : "true"})
ElementTree.SubElement (parent, "dnsDomain", {"i:nil" : "true"})
add_element (parent, "dnsHostName", self.hostname, nil = True)
add_element (parent, "dhcpEnabled", self.dhcp)
return (parent, NO_STATUS)
class chassis_psu_info:
"""
Response object for a chassis power supply.
"""
@staticmethod
def get_psu_list (psu):
"""
Get the list of PSU information objects for the respons.
:param psu: The result of the system query for power supply information.
:return The list of PSU information objects.
"""
psu_list = []
for i in range (1, 7):
status = request_status (psu)
info = chassis_psu_info (i, psu)
psu_list.append (response_category ("PsuInfo", status = status, results = [info]))
return psu_list
def __init__ (self, psu_id, psu):
"""
Initialize the PSU information.
:param psu_id: The ID of the PSU.
:param psu: The result of the system query for power supply information.
"""
self.id = str (psu_id)
self.serial = psu.get ("Board Serial", None)
self.state = "ON" if (check_success (psu)) else "NA"
self.type = None
if (psu_id == 1):
self.power = psu.get ("Feed1Phase1PowerInWatts", -1)
elif (psu_id == 2):
self.power = psu.get ("Feed1Phase2PowerInWatts", -1)
elif (psu_id == 3):
self.power = psu.get ("Feed1Phase3PowerInWatts", -1)
elif (psu_id == 4):
self.power = psu.get ("Feed2Phase1PowerInWatts", -1)
elif (psu_id == 5):
self.power = psu.get ("Feed2Phase2PowerInWatts", -1)
elif (psu_id == 6):
self.power = psu.get ("Feed2Phase3PowerInWatts", -1)
else:
self.power = psu.get ("PowerDrawnInWatts", -1)
self.power = str (int (self.power))
def format (self, parent):
"""
Format the PSU information in the XML document.
:param parent: The parent element that will contain the PSU information.
:return The parent element and a status flag for the response.
"""
add_element (parent, "id", self.id)
add_element (parent, "serialNumber", self.serial)
add_element (parent, "state", self.state)
add_element (parent, "powerOut", self.power)
add_element (parent, "deviceType", self.type)
return (parent, NO_STATUS)
class chassis_blade_info:
"""
Response object for information about a blade in the chassis.
"""
@staticmethod
def get_blade_info (blade_id, info):
"""
Get the complete response object for chassis blade information.
:param blade_id: The slot ID for the blade.
:param info: The result of the system query for the blade information.
:return The chassis blade information response object.
"""
status = request_status (info)
blade = chassis_blade_info (blade_id, info)
mac = response_category ("bladeMacAddress", results = blade_nic_info.get_nic_list (info))
return response_category ("BladeInfo", status = status, results = [blade, mac])
@staticmethod
def get_blade_list (blades):
"""
Get the list of chassis blade information.
:param blades: The list of system query results for all blades.
:return A list of blade information response objects.
"""
blade_list = []
for blade, info in enumerate (blades, 1):
blade_list.append (chassis_blade_info.get_blade_info (blade, info))
return blade_list
def __init__ (self, blade_id, info):
"""
Initialize the chassis blade information.
:param blade_id: The slot ID for the blade.
:param info: The result of the system query for the blade information.
"""
self.id = info.get ("Slot Id", str (blade_id))
self.guid = info.get ("GUID", "00000000-0000-0000-0000-000000000000")
self.name = "BLADE{0}".format (self.id)
self.state = info.get ("Port State", "NA").upper ()
def format (self, parent):
"""
Format the chassis blade information in the XML document.
:param parent: The parent element | |
mstart >= wpos + window_radius:
anno_scaled = process_anno(
[[mstart, mend, "black"], [mend, wpos + window_radius, "gray"]],
base=wpos - window_radius,
window_radius=window_radius,
)
else:
anno_scaled = process_anno(
[[wpos - window_radius, mend, "black"], [mend, wpos + window_radius, "gray"],],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 16000000:
outputs_alt = genomepredict(
sequence, mchr, mend, wpos, models=models, annotation=anno_scaled, use_cuda=use_cuda
)
if file is not None:
genomeplot(outputs_alt, show_coordinates=True, file=file + ".alt.pdf")
else:
outputs_alt = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_alt_round,
mend,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_alt, show_coordinates=True, file=file + ".alt.256m.pdf",
)
return outputs_ref_l, outputs_ref_r, outputs_alt
def process_del(
mchr,
mstart,
mend,
genome,
cmap=None,
file=None,
custom_models=None,
target=True,
show_genes=True,
show_tracks=False,
window_radius=16000000,
padding_chr="chr1",
use_cuda=True,
):
"""
Generate multiscale genome interaction predictions for
an deletion variant.
Parameters
----------
mchr : str
The chromosome name of the first segment
mstart : int
The start coordinate of the deletion.
mend : ind
The end coordinate of the deletion.
genome : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
The reference genome object to extract sequence from
custom_models : list(torch.nn.Module or str) or None, optional
Models to use instead of the default H1-ESC and HFF Orca models.
Default is None.
target : list(selene_utils2.Genomic2DFeatures or str) or bool, optional
If specified as list, use this list of targets to retrieve experimental
data (for plotting only). Default is True and will use micro-C data
for H1-ESC and HFF cells (4DNFI9GMP2J8, 4DNFI643OYP9) that correspond
to the default models.
file : str or None, optional
Default is None. The output file prefix.
show_genes : bool, optional
Default is True. If True, generate gene annotation visualization
file in pdf format that matches the windows of multiscale predictions.
show_tracks : bool, optional
Default is False. If True, generate chromatin tracks visualization
file in pdf format that matches the windows of multiscale predictions.
window_radius : int, optional
Default is 16000000. The acceptable values are 16000000 which selects
the 1-32Mb models or 128000000 which selects the 32-256Mb models.
padding_chr : str, optional
Default is "chr1". If window_radius is 128000000, padding is generally
needed to fill the sequence to 256Mb. The padding sequence will be
extracted from the padding_chr.
use_cuda : bool, optional
Default is True. Use CPU if False.
Returns
-------
outputs_ref_l, outputs_ref_r, outputs_alt : dict, dict, dict
Reference allele predictions zooming into the left boundary of the
deletion,
Reference allele predictions zooming into the right boundary of the
deletion,
Alternative allele predictions zooming into the deletion breakpoint.
The returned results are in the format of dictonaries
containing the prediction outputs and other
retrieved information. These dictionaries can be directly used as
input to genomeplot or genomeplot_256Mb. See documentation of `genomepredict` or `genomepredict_256Mb` for
details of the dictionary content.
"""
chrlen = [l for c, l in genome.get_chr_lens() if c == mchr].pop()
if custom_models is None:
if window_radius == 16000000:
models = ["h1esc", "hff"]
elif window_radius == 128000000:
models = ["h1esc_256m", "hff_256m"]
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
else:
models = custom_models
if target:
try:
if target == True:
if window_radius == 16000000:
target = ["h1esc", "hff"]
elif window_radius == 128000000:
target = ["h1esc_256m", "hff_256m"]
target = [t if isinstance(t, Genomic2DFeatures) else target_dict_global[t] for t in target]
except KeyError:
target = False
# ref.l
if window_radius == 16000000:
wpos = coord_clip(mstart, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
mchr, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
elif window_radius == 128000000:
chrlen_round = chrlen - chrlen % 32000
wpos = 128000000
if target:
sequence, normmats, targets = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
else:
sequence, normmats = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
if wpos + window_radius > mend:
anno_scaled = process_anno(
[[mstart, mend, "black"]], base=wpos - window_radius, window_radius=window_radius
)
else:
anno_scaled = process_anno(
[[mstart, wpos + window_radius, "black"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 128000000:
outputs_ref_l = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mstart,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
targets=targets,
use_cuda=use_cuda,
)
else:
outputs_ref_l = genomepredict(
sequence,
mchr,
mstart,
wpos,
models=models,
annotation=anno_scaled,
targets=targets,
use_cuda=use_cuda,
)
if file is not None:
if window_radius == 128000000:
genomeplot_256Mb(
outputs_ref_l, show_coordinates=True, file=file + ".ref.l.256m.pdf",
)
else:
genomeplot(
outputs_ref_l,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
cmap=cmap,
file=file + ".ref.l.pdf",
)
# ref.r
if window_radius == 16000000:
wpos = coord_clip(mend, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
mchr, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
if wpos - window_radius < mstart:
anno_scaled = process_anno(
[[mstart, mend, "black"]], base=wpos - window_radius, window_radius=window_radius
)
else:
anno_scaled = process_anno(
[[wpos - window_radius, mend, "black"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 16000000:
outputs_ref_r = genomepredict(
sequence, mchr, mend, wpos, models=models, annotation=anno_scaled, targets=targets, use_cuda=use_cuda,
)
if file is not None:
genomeplot(
outputs_ref_r,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
cmap=cmap,
file=file + ".ref.r.pdf",
)
else:
outputs_ref_r = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mend,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
targets=targets,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_ref_r, show_coordinates=True, file=file + ".ref.r.256m.pdf",
)
# alt
s = StructuralChange2(mchr, chrlen)
s.delete(mstart, mend)
chrlen_alt = chrlen - (mend - mstart)
if window_radius == 16000000:
wpos = coord_clip(mstart, chrlen_alt)
sequence = []
for chrm, start, end, strand in s[wpos - window_radius : wpos + window_radius]:
seq = genome.get_encoding_from_coords(chrm, start, end)
if strand == "-":
seq = seq[None, ::-1, ::-1]
else:
seq = seq[None, :, :]
sequence.append(seq)
sequence = np.concatenate(sequence, axis=1)
else:
chrlen_alt_round = chrlen_alt - chrlen_alt % 32000
wpos = 128000000
(sequence, normmats) = _retrieve_multi(
list(s[0:chrlen_alt_round]) + [[padding_chr, 0, 256000000 - chrlen_alt_round, "+"]],
genome,
target=False,
normmat=True,
normmat_regionlist=[
[mchr, 0, chrlen_alt_round, "+"],
[padding_chr, 0, 256000000 - chrlen_alt_round, "+"],
],
)
anno_scaled = process_anno(
[[mstart, "double"]], base=wpos - window_radius, window_radius=window_radius
)
if window_radius == 16000000:
outputs_alt = genomepredict(
sequence, mchr, mstart, wpos, models=models, annotation=anno_scaled, use_cuda=use_cuda
)
if file is not None:
genomeplot(outputs_alt, show_coordinates=True, cmap=cmap, file=file + ".alt.pdf")
else:
outputs_alt = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_alt_round,
mstart,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
use_cuda=use_cuda,
)
if file is not None:
genomeplot_256Mb(
outputs_alt, show_coordinates=True, file=file + ".alt.256m.pdf",
)
return outputs_ref_l, outputs_ref_r, outputs_alt
def process_inv(
mchr,
mstart,
mend,
genome,
file=None,
custom_models=None,
target=True,
show_genes=True,
show_tracks=False,
window_radius=16000000,
padding_chr="chr1",
use_cuda=True,
):
"""
Generate multiscale genome interaction predictions for
an inversion variant.
Parameters
----------
mchr : str
The chromosome name of the first segment
mstart : int
The start coordinate of the inversion.
mend : ind
The end coordinate of the inversion.
genome : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
The reference genome object to extract sequence from
custom_models : list(torch.nn.Module or str) or None, optional
Models to use instead of the default H1-ESC and HFF Orca models.
Default is None.
target : list(selene_utils2.Genomic2DFeatures or str) or bool, optional
If specified as list, use this list of targets to retrieve experimental
data (for plotting only). Default is True and will use micro-C data
for H1-ESC and HFF cells (4DNFI9GMP2J8, 4DNFI643OYP9) that correspond
to the default models.
file : str or None, optional
Default is None. The output file prefix.
show_genes : bool, optional
Default is True. If True, generate gene annotation visualization
file in pdf format that matches the windows of multiscale predictions.
show_tracks : bool, optional
Default is False. If True, generate chromatin tracks visualization
file in pdf format that matches the windows of multiscale predictions.
window_radius : int, optional
Default is 16000000. The acceptable values are 16000000 which selects
the 1-32Mb models or 128000000 which selects the 32-256Mb models.
padding_chr : str, optional
Default is "chr1". If window_radius | |
iiiIIIII1iIi
if 8 - 8: o0oOOo0O0Ooo * OoO0O00 % IiII / OoooooooOO * ooOoO0o - i11iIiiIii
if 14 - 14: Oo0Ooo . iII111i
if 50 - 50: iIii1I11I1II1
if 48 - 48: Ii1I - o0oOOo0O0Ooo - Oo0Ooo . iIii1I11I1II1
if 1 - 1: i1IIi % OoooooooOO
if 30 - 30: ooOoO0o % I11i
if 4 - 4: oO0o / OoO0O00
if 90 - 90: I11i . IiII / OoO0O00 . IiII
OoO0OOoooooOO = ( IIII and IIII . accept_more_specifics )
if ( OoO0OOoooooOO ) :
i1iIIiii = lisp_site_eid ( IIII . site )
i1iIIiii . dynamic = True
i1iIIiii . eid . copy_address ( I111IoOo0oOOO0o . eid )
i1iIIiii . group . copy_address ( I111IoOo0oOOO0o . group )
i1iIIiii . parent_for_more_specifics = IIII
i1iIIiii . add_cache ( )
i1iIIiii . inherit_from_ams_parent ( )
IIII . more_specific_registrations . append ( i1iIIiii )
IIII = i1iIIiii
else :
IIII = lisp_site_eid_lookup ( I111IoOo0oOOO0o . eid , I111IoOo0oOOO0o . group ,
True )
if 2 - 2: I11i + I1IiiI . IiII . OoOoOO00 * oO0o - ooOoO0o
if 29 - 29: OoO0O00
oo0ooooO = I111IoOo0oOOO0o . print_eid_tuple ( )
if 78 - 78: iII111i * ooOoO0o + O0 % ooOoO0o + OoO0O00
if ( IIII == None ) :
oOo00 = bold ( "Site not found" , False )
lprint ( " {} for EID {}{}" . format ( oOo00 , green ( oo0ooooO , False ) ,
", matched non-ams {}" . format ( green ( o0OOoOOoo0oo0 , False ) if o0OOoOOoo0oo0 else "" ) ) )
if 41 - 41: II111iiii . oO0o + O0 % i1IIi . Ii1I
if 90 - 90: ooOoO0o * I1IiiI / II111iiii % Oo0Ooo % OoooooooOO
if 78 - 78: OoooooooOO . IiII
if 55 - 55: I11i / I1ii11iIi11i * O0 + IiII % I11i
if 69 - 69: o0oOOo0O0Ooo % iIii1I11I1II1 . OoooooooOO - ooOoO0o
packet = i1iIiII . end_of_rlocs ( packet , I111IoOo0oOOO0o . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 94 - 94: iIii1I11I1II1 / Oo0Ooo % IiII * IiII
continue
if 62 - 62: I11i . IiII - OOooOOo - I1Ii111 / OoooooooOO . Ii1I
if 28 - 28: iII111i / I1ii11iIi11i - OoOoOO00 * Oo0Ooo + Ii1I * OoOoOO00
I1ii1I = IIII . site
if 94 - 94: oO0o
if ( OoO0OOoooooOO ) :
I1i11II = IIII . parent_for_more_specifics . print_eid_tuple ( )
lprint ( " Found ams {} for site '{}' for registering prefix {}" . format ( green ( I1i11II , False ) , I1ii1I . site_name , green ( oo0ooooO , False ) ) )
if 95 - 95: ooOoO0o * O0 + OOooOOo
else :
I1i11II = green ( IIII . print_eid_tuple ( ) , False )
lprint ( " Found {} for site '{}' for registering prefix {}" . format ( I1i11II , I1ii1I . site_name , green ( oo0ooooO , False ) ) )
if 11 - 11: i1IIi / OoOoOO00 + OoOoOO00 + I1ii11iIi11i + OOooOOo
if 21 - 21: ooOoO0o
if 28 - 28: OoOoOO00 + OoOoOO00 - OoOoOO00 / ooOoO0o
if 81 - 81: oO0o
if 34 - 34: o0oOOo0O0Ooo * OOooOOo - i1IIi * o0oOOo0O0Ooo * Oo0Ooo
if 59 - 59: iIii1I11I1II1 / Oo0Ooo % II111iiii
if ( I1ii1I . shutdown ) :
lprint ( ( " Rejecting registration for site '{}', configured in " +
"admin-shutdown state" ) . format ( I1ii1I . site_name ) )
packet = i1iIiII . end_of_rlocs ( packet , I111IoOo0oOOO0o . rloc_count )
continue
if 55 - 55: ooOoO0o - IiII + o0oOOo0O0Ooo
if 48 - 48: O0 - iIii1I11I1II1 * OOooOOo
if 33 - 33: I11i
if 63 - 63: Ii1I % II111iiii / OoOoOO00 + Oo0Ooo
if 28 - 28: OoO0O00 + I1IiiI . oO0o + II111iiii - O0
if 32 - 32: oO0o
if 62 - 62: i11iIiiIii + OoooooooOO + IiII - OoO0O00 / oO0o * iIii1I11I1II1
if 91 - 91: o0oOOo0O0Ooo - i11iIiiIii + Oo0Ooo % iIii1I11I1II1
I1 = oOOOoO0 . key_id
if ( I1ii1I . auth_key . has_key ( I1 ) == False ) : I1 = 0
O0O0 = I1ii1I . auth_key [ I1 ]
if 40 - 40: I1Ii111 * OoOoOO00 * Ii1I % iII111i % ooOoO0o . Ii1I
i111II = lisp_verify_auth ( O0ooO00OO , oOOOoO0 . alg_id ,
oOOOoO0 . auth_data , O0O0 )
iiIi1i1i = "dynamic " if IIII . dynamic else ""
if 69 - 69: i11iIiiIii + Oo0Ooo / II111iiii % OoOoOO00
o0OOo0o0 = bold ( "passed" if i111II else "failed" , False )
I1 = "key-id {}" . format ( I1 ) if I1 == oOOOoO0 . key_id else "bad key-id {}" . format ( oOOOoO0 . key_id )
if 4 - 4: II111iiii + ooOoO0o
lprint ( " Authentication {} for {}EID-prefix {}, {}" . format ( o0OOo0o0 , iiIi1i1i , green ( oo0ooooO , False ) , I1 ) )
if 25 - 25: I1IiiI - iIii1I11I1II1
if 11 - 11: I1Ii111 / iII111i - I11i
if 87 - 87: I1Ii111 * i11iIiiIii . OOooOOo . OoooooooOO
if 2 - 2: i11iIiiIii + oO0o
if 40 - 40: i11iIiiIii + oO0o * IiII
if 19 - 19: iII111i / II111iiii . I1Ii111 * I1IiiI - OOooOOo
oO0OoO0 = True
O0o0O0oooo0O = ( lisp_get_eid_hash ( I111IoOo0oOOO0o . eid ) != None )
if ( O0o0O0oooo0O or IIII . require_signature ) :
o000000oOooO = "Required " if IIII . require_signature else ""
oo0ooooO = green ( oo0ooooO , False )
i1IIIIi1Ii111 = lisp_find_sig_in_rloc_set ( packet , I111IoOo0oOOO0o . rloc_count )
if ( i1IIIIi1Ii111 == None ) :
oO0OoO0 = False
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}, no signature found" ) . format ( o000000oOooO ,
# OOooOOo
bold ( "failed" , False ) , oo0ooooO ) )
else :
oO0OoO0 = lisp_verify_cga_sig ( I111IoOo0oOOO0o . eid , i1IIIIi1Ii111 )
o0OOo0o0 = bold ( "passed" if oO0OoO0 else "failed" , False )
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}" ) . format ( o000000oOooO , o0OOo0o0 , oo0ooooO ) )
if 88 - 88: OoooooooOO / iII111i + i1IIi
if 64 - 64: IiII % I11i / iIii1I11I1II1
if 66 - 66: Ii1I
if 55 - 55: OOooOOo + I1IiiI + IiII . Ii1I * oO0o
if ( i111II == False or oO0OoO0 == False ) :
packet = i1iIiII . end_of_rlocs ( packet , I111IoOo0oOOO0o . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 71 - 71: IiII - iII111i % I1IiiI * iII111i
continue
if 27 - 27: ooOoO0o - OoO0O00
if 83 - 83: iII111i * OoOoOO00 - O0 * Ii1I
if 79 - 79: I11i / iII111i % Ii1I / OoOoOO00 % O0 / IiII
if 32 - 32: IiII * II111iiii . Ii1I
if 68 - 68: I11i / O0
if 6 - 6: oO0o - oO0o . I1IiiI % I1ii11iIi11i
if ( oOOOoO0 . merge_register_requested ) :
iiiIIIII1iIi = IIII
iiiIIIII1iIi . inconsistent_registration = False
if 22 - 22: Ii1I / I1IiiI / II111iiii
if 31 - 31: II111iiii - Ii1I * OOooOOo - i11iIiiIii / OoooooooOO - I1Ii111
if 76 - 76: Oo0Ooo
if 93 - 93: i1IIi - I1IiiI * i11iIiiIii / Ii1I . Ii1I - i1IIi
if 19 - 19: iIii1I11I1II1 * OOooOOo * Oo0Ooo % I1IiiI
if ( IIII . group . is_null ( ) ) :
if ( iiiIIIII1iIi . site_id != oOOOoO0 . site_id ) :
iiiIIIII1iIi . site_id = oOOOoO0 . site_id
iiiIIIII1iIi . registered | |
<reponame>mrdragonbear/recmetrics
import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
import scipy.sparse as sp
from sklearn.metrics import mean_squared_error
from math import sqrt
import itertools
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import random
def novelty(predicted, pop, u, n):
"""
Computes the novelty for a list of recommendations
Parameters
----------
predicted : a list of lists
Ordered predictions
example: [['X', 'Y', 'Z'], ['X', 'Y', 'Z']]
pop: dictionary
A dictionary of all items alongside of its occurrences counter in the training data
example: {1198: 893, 1270: 876, 593: 876, 2762: 867}
u: integer
The number of users in the training data
n: integer
The length of recommended lists per user
Returns
----------
novelty:
The novelty of the recommendations in system level
mean_self_information:
The novelty of the recommendations in recommended top-N list level
----------
Metric Defintion:
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2010).
Solving the apparent diversity-accuracy dilemma of recommender systems.
Proceedings of the National Academy of Sciences, 107(10), 4511-4515.
"""
mean_self_information = []
k = 0
for sublist in predicted:
self_information = 0
k += 1
for i in sublist:
self_information += np.sum(-np.log2(pop[i]/u))
mean_self_information.append(self_information/n)
novelty = sum(mean_self_information)/k
return novelty, mean_self_information
def prediction_coverage(predicted, catalog):
"""
Computes the prediction coverage for a list of recommendations
Parameters
----------
predicted : a list of lists
Ordered predictions
example: [['X', 'Y', 'Z'], ['X', 'Y', 'Z']]
catalog: list
A list of all unique items in the training data
example: ['A', 'B', 'C', 'X', 'Y', Z]
Returns
----------
prediction_coverage:
The prediction coverage of the recommendations as a percent
rounded to 2 decimal places
----------
Metric Defintion:
<NAME>., <NAME>., & <NAME>. (2010, September).
Beyond accuracy: evaluating recommender systems by coverage and serendipity.
In Proceedings of the fourth ACM conference on Recommender systems (pp. 257-260). ACM.
"""
predicted_flattened = [p for sublist in predicted for p in sublist]
unique_predictions = len(set(predicted_flattened))
prediction_coverage = round(unique_predictions/(len(catalog)* 1.0)*100,2)
return prediction_coverage
def catalog_coverage(predicted, catalog, k):
"""
Computes the catalog coverage for k lists of recommendations
Parameters
----------
predicted : a list of lists
Ordered predictions
example: [['X', 'Y', 'Z'], ['X', 'Y', 'Z']]
catalog: list
A list of all unique items in the training data
example: ['A', 'B', 'C', 'X', 'Y', Z]
k: integer
The number of observed recommendation lists
which randomly choosed in our offline setup
Returns
----------
catalog_coverage:
The catalog coverage of the recommendations as a percent
rounded to 2 decimal places
----------
Metric Defintion:
<NAME>., <NAME>., & <NAME>. (2010, September).
Beyond accuracy: evaluating recommender systems by coverage and serendipity.
In Proceedings of the fourth ACM conference on Recommender systems (pp. 257-260). ACM.
"""
sampling = random.choices(predicted, k=k)
predicted_flattened = [p for sublist in sampling for p in sublist]
L_predictions = len(set(predicted_flattened))
catalog_coverage = round(L_predictions/(len(catalog)*1.0)*100,2)
return catalog_coverage
def _ark(actual, predicted, k=10):
"""
Computes the average recall at k.
Parameters
----------
actual : list
A list of actual items to be predicted
predicted : list
An ordered list of predicted items
k : int, default = 10
Number of predictions to consider
Returns:
-------
score : int
The average recall at k.
"""
if len(predicted)>k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i,p in enumerate(predicted):
if p in actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i+1.0)
if not actual:
return 0.0
return score / len(actual)
def mark(actual, predicted, k=10):
"""
Computes the mean average recall at k.
Parameters
----------
actual : a list of lists
Actual items to be predicted
example: [['A', 'B', 'X'], ['A', 'B', 'Y']]
predicted : a list of lists
Ordered predictions
example: [['X', 'Y', 'Z'], ['X', 'Y', 'Z']]
Returns:
-------
mark: int
The mean average recall at k (mar@k)
"""
return np.mean([_ark(a,p,k) for a,p in zip(actual, predicted)])
def personalization(predicted):
"""
Personalization measures recommendation similarity across users.
A high score indicates good personalization (user's lists of recommendations are different).
A low score indicates poor personalization (user's lists of recommendations are very similar).
A model is "personalizing" well if the set of recommendations for each user is different.
Parameters:
----------
predicted : a list of lists
Ordered predictions
example: [['X', 'Y', 'Z'], ['X', 'Y', 'Z']]
Returns:
-------
The personalization score for all recommendations.
"""
def make_rec_matrix(predicted):
df = pd.DataFrame(data=predicted).reset_index().melt(
id_vars='index', value_name='item',
)
df = df[['index', 'item']].pivot(index='index', columns='item', values='item')
df = pd.notna(df)*1
rec_matrix = sp.csr_matrix(df.values)
return rec_matrix
#create matrix for recommendations
predicted = np.array(predicted)
rec_matrix_sparse = make_rec_matrix(predicted)
#calculate similarity for every user's recommendation list
similarity = cosine_similarity(X=rec_matrix_sparse, dense_output=False)
#get indicies for upper right triangle w/o diagonal
upper_right = np.triu_indices(similarity.shape[0], k=1)
#calculate average similarity
personalization = np.mean(similarity[upper_right])
return 1-personalization
def _single_list_similarity(predicted, feature_df, u):
"""
Computes the intra-list similarity for a single list of recommendations.
Parameters
----------
predicted : a list
Ordered predictions
Example: ['X', 'Y', 'Z']
feature_df: dataframe
A dataframe with one hot encoded or latent features.
The dataframe should be indexed by the id used in the recommendations.
Returns:
-------
ils_single_user: float
The intra-list similarity for a single list of recommendations.
"""
# exception predicted list empty
if not(predicted):
raise Exception('Predicted list is empty, index: {0}'.format(u))
#get features for all recommended items
recs_content = feature_df.loc[predicted]
recs_content = recs_content.dropna()
recs_content = sp.csr_matrix(recs_content.values)
#calculate similarity scores for all items in list
similarity = cosine_similarity(X=recs_content, dense_output=False)
#get indicies for upper right triangle w/o diagonal
upper_right = np.triu_indices(similarity.shape[0], k=1)
#calculate average similarity score of all recommended items in list
ils_single_user = np.mean(similarity[upper_right])
return ils_single_user
def intra_list_similarity(predicted, feature_df):
"""
Computes the average intra-list similarity of all recommendations.
This metric can be used to measure diversity of the list of recommended items.
Parameters
----------
predicted : a list of lists
Ordered predictions
Example: [['X', 'Y', 'Z'], ['X', 'Y', 'Z']]
feature_df: dataframe
A dataframe with one hot encoded or latent features.
The dataframe should be indexed by the id used in the recommendations.
Returns:
-------
The average intra-list similarity for recommendations.
"""
feature_df = feature_df.fillna(0)
Users = range(len(predicted))
ils = [_single_list_similarity(predicted[u], feature_df, u) for u in Users]
return np.mean(ils)
def mse(y, yhat):
"""
Computes the mean square error (MSE)
Parameters
----------
yhat : Series or array. Reconstructed (predicted) ratings or interaction values.
y: original true ratings or interaction values.
Returns:
-------
The mean square error (MSE)
"""
mse = mean_squared_error(y, yhat)
return mse
def rmse(y, yhat):
"""
Computes the root mean square error (RMSE)
Parameters
----------
yhat : Series or array. Reconstructed (predicted) ratings or values
y: original true ratings or values.
Returns:
-------
The mean square error (MSE)
"""
rmse = sqrt(mean_squared_error(y, yhat))
return rmse
def make_confusion_matrix(y, yhat):
"""
Calculates and plots a confusion matrix
Parameters
----------
y : list or array of actual interaction values such as ratings
yhat: list or array of actual predicted interaction values
Returns:
-------
A confusion matrix plot
"""
cm = confusion_matrix(y, yhat, labels=[1,0])
cm = np.round(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis],4)*100
fmt = ".2f"
thresh = cm.max() / 2.
descriptions = np.array([["True Positive", "False Negative"], ["False Positive", "True Negatives"]])
colors = np.array([["green", "red"], ["red", "green"]])
plt.imshow([[0,0],[0,0]], interpolation='nearest', cmap=plt.cm.Greys)
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt)+'%\n' + descriptions[i, j],
horizontalalignment="center",
color=colors[i,j])
plt.axhline(y=0.5, xmin=0, xmax=1, color="black", linewidth=0.75)
plt.axvline(x=0.5, ymin=0, ymax=1, color="black", linewidth=0.75)
plt.ylabel('True')
plt.xlabel('Predicted')
plt.title("Confusion Matrix")
plt.xticks([0,1], [1,0], rotation=45)
plt.yticks([0,1], [1,0])
plt.show()
def recommender_precision(predicted, actual):
"""
Computes the precision of each user's list of recommendations, and averages precision over all users.
----------
actual : a list of lists
Actual items to be predicted
example: [['A', 'B', 'X'], ['A', 'B', 'Y']]
predicted : a list of lists
Ordered predictions
example: [['X', 'Y', 'Z'], ['X', 'Y', 'Z']]
Returns:
-------
precision: int
"""
def calc_precision(predicted, actual):
prec = [value for value in predicted if value in actual]
prec = np.round(float(len(prec)) / float(len(predicted)), 4)
return prec
precision = np.mean(list(map(calc_precision, predicted, actual)))
return precision
def recommender_recall(predicted, actual):
"""
Computes the recall of each user's list of recommendations, and averages precision over all users.
----------
actual : a list of lists
Actual items to be predicted
example: [['A', 'B', 'X'], ['A', 'B', 'Y']]
predicted : | |
#!/usr/bin/env python3
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
# Getters & Setters for AWS EKS Clusters resource tags
# This class supports the main "resources_tags" class
# Included class & methods
# class - eks_clusters_tags
# method - get_eks_clusters_ids
# method - get_eks_clusters_tags
# method - get_eks_clusters_keys
# method - get_eks_clusters_values
# method - set_eks_clusters_tags
# Import AWS module for python
import boto3, botocore
# Import collections to use ordered dictionaries for storage
from collections import OrderedDict
# Import logging module
import logging
# Import Python's regex module to filter Boto3's API responses
import re
# Instantiate logging for this module using its file name
log = logging.getLogger(__name__)
# Define resources_tags class to get/set resources & their assigned tags
class eks_clusters_tags:
# Class constructor
def __init__(self, resource_type, region):
self.resource_type = resource_type
self.region = region
# Returns a filtered list of all resource names & ID's for the resource type specified
def get_eks_clusters_ids(self, filter_tags):
self.filter_tags = filter_tags
tag_key1_state = True if self.filter_tags.get('tag_key1') else False
tag_value1_state = True if self.filter_tags.get('tag_value1') else False
tag_key2_state = True if self.filter_tags.get('tag_key2') else False
tag_value2_state = True if self.filter_tags.get('tag_value2') else False
resource_inventory = dict()
def _intersection_union_invalid(tag_dict, cluster_name, cluster_arn):
resource_inventory['No matching resource'] = 'No matching resource'
if self.filter_tags.get('conjunction') == 'AND':
def _intersection_tfff(tag_dict, cluster_name, cluster_arn):
if self.filter_tags.get('tag_key1') in tag_dict:
resource_inventory[cluster_arn] = cluster_name
def _intersection_fftf(tag_dict, cluster_name, cluster_arn):
if self.filter_tags.get('tag_key2') in tag_dict:
resource_inventory[cluster_arn] = cluster_name
def _intersection_fftt(tag_dict, cluster_name, cluster_arn):
if self.filter_tags.get('tag_key2') in tag_dict:
if tag_dict.get(self.filter_tags.get('tag_key2')) == self.filter_tags.get('tag_value2'):
resource_inventory[cluster_arn] = cluster_name
def _intersection_ttff(tag_dict, cluster_name, cluster_arn):
if self.filter_tags.get('tag_key1') in tag_dict:
if tag_dict.get(self.filter_tags.get('tag_key1')) == self.filter_tags.get('tag_value1'):
resource_inventory[cluster_arn] = cluster_name
def _intersection_tftf(tag_dict, cluster_name, cluster_arn):
if self.filter_tags.get('tag_key1') in tag_dict and self.filter_tags.get('tag_key2') in tag_dict:
resource_inventory[cluster_arn] = cluster_name
def _intersection_tftt(tag_dict, cluster_name, cluster_arn):
if self.filter_tags.get('tag_key1') in tag_dict and self.filter_tags.get('tag_key2') in tag_dict:
if tag_dict.get(self.filter_tags.get('tag_key2')) == self.filter_tags.get('tag_value2'):
resource_inventory[cluster_arn] = cluster_name
def _intersection_tttf(tag_dict, cluster_name, cluster_arn):
if self.filter_tags.get('tag_key1') in tag_dict and self.filter_tags.get('tag_key2') in tag_dict:
if tag_dict.get(self.filter_tags.get('tag_key1')) == self.filter_tags.get('tag_value1'):
resource_inventory[cluster_arn] = cluster_name
def _intersection_tttt(tag_dict, cluster_name, cluster_arn):
if self.filter_tags.get('tag_key1') in tag_dict and self.filter_tags.get('tag_key2') in tag_dict:
if tag_dict.get(self.filter_tags.get('tag_key1')) == self.filter_tags.get('tag_value1'):
if tag_dict.get(self.filter_tags.get('tag_key2')) == self.filter_tags.get('tag_value2'):
resource_inventory[cluster_arn] = cluster_name
def _intersection_ffff(tag_dict, cluster_name, cluster_arn):
resource_inventory[cluster_arn] = cluster_name
# "AND" Truth table check for tag_key1, tag_value1, tag_key2, tag_value2
intersection_combos = {
(False, False, False, True): _intersection_union_invalid,
(False, True, False, False): _intersection_union_invalid,
(False, True, False, True): _intersection_union_invalid,
(True, False, False, True): _intersection_union_invalid,
(True, True, False, True): _intersection_union_invalid,
(False, True, True, False): _intersection_union_invalid,
(False, False, True, False): _intersection_fftf,
(False, False, True, True): _intersection_fftt,
(True, False, False, False): _intersection_tfff,
(True, True, False, False): _intersection_ttff,
(True, False, True, False): _intersection_tftf,
(True, False, True, True): _intersection_tftt,
(True, True, True, False): _intersection_tttf,
(True, True, True, True): _intersection_tttt,
(False, False, False, False): _intersection_ffff
}
try:
client = boto3.client(self.resource_type, region_name=self.region)
# Get all the EKS Clusters in the region
my_clusters = client.list_clusters()
for item in my_clusters['clusters']:
eks_cluster_arn= client.describe_cluster(
name=item
)['cluster']['arn']
try:
# Get all the tags for a given EKS Cluster
response = client.list_tags_for_resource(
resourceArn= eks_cluster_arn
)
except botocore.exceptions.ClientError as error:
log.error("Boto3 API returned error: {}".format(error))
intersection_combos[(tag_key1_state,
tag_value1_state,
tag_key2_state,
tag_value2_state)](response.get('Tags'), item, eks_cluster_arn )
except botocore.exceptions.ClientError as error:
log.error("Boto3 API returned error: {}".format(error))
if self.filter_tags.get('conjunction') == 'OR':
def _union_tfff_tftf_fftf(tag_dict, cluster_name, cluster_arn):
if self.filter_tags.get('tag_key1') in tag_dict or self.filter_tags.get('tag_key2') in tag_dict:
print(cluster_name)
print(self.filter_tags.get('tag_key1'))
print(self.filter_tags.get('tag_key2'))
print(tag_dict)
resource_inventory[cluster_arn] = cluster_name
def _union_tttf(tag_dict, cluster_name, cluster_arn):
if self.filter_tags.get('tag_key1') in tag_dict:
if tag_dict[self.filter_tags.get('tag_key1')] == self.filter_tags.get('tag_value1'):
resource_inventory[cluster_arn] = cluster_name
elif self.filter_tags.get('tag_key2') in tag_dict:
resource_inventory[cluster_arn] = cluster_name
def _union_tftt(tag_dict, cluster_name, cluster_arn):
if self.filter_tags.get('tag_key2') in tag_dict:
if tag_dict[self.filter_tags.get('tag_key2')] == self.filter_tags.get('tag_value2'):
resource_inventory[cluster_arn] = cluster_name
elif self.filter_tags.get('tag_key1') in tag_dict:
resource_inventory[cluster_arn] = cluster_name
def _union_fftt(tag_dict, cluster_name, cluster_arn):
if self.filter_tags.get('tag_key2') in tag_dict:
if tag_dict[self.filter_tags.get('tag_key2')] == self.filter_tags.get('tag_value2'):
resource_inventory[cluster_arn] = cluster_name
def _union_ttff(tag_dict, cluster_name, cluster_arn):
if self.filter_tags.get('tag_key1') in tag_dict:
if tag_dict[self.filter_tags.get('tag_key1')] == self.filter_tags.get('tag_value1'):
resource_inventory[cluster_arn] = cluster_name
def _union_tttt(tag_dict, cluster_name, cluster_arn):
if self.filter_tags.get('tag_key1') in tag_dict:
if tag_dict[self.filter_tags.get('tag_key1')] == self.filter_tags.get('tag_value1'):
resource_inventory[cluster_arn] = cluster_name
elif self.filter_tags.get('tag_key2') in tag_dict:
if tag_dict[self.filter_tags.get('tag_key2')] == self.filter_tags.get('tag_value2'):
resource_inventory[cluster_arn] = cluster_name
def _union_ffff(tag_dict, cluster_name, cluster_arn):
resource_inventory[cluster_arn] = cluster_name
# "OR" Truth table check for tag_key1, tag_value1, tag_key2, tag_value2
or_combos = {
(False, False, False, True): _intersection_union_invalid,
(False, True, False, False): _intersection_union_invalid,
(False, True, False, True): _intersection_union_invalid,
(False, True, True, True): _intersection_union_invalid,
(True, True, False, True): _intersection_union_invalid,
(False, False, True, False): _union_tfff_tftf_fftf,
(False, False, True, True): _union_fftt,
(True, False, False, False): _union_tfff_tftf_fftf,
(True, False, True, False): _union_tfff_tftf_fftf,
(True, False, True, True): _union_tftt,
(True, True, False, False): _union_ttff,
(True, True, True, False): _union_tttf,
(True, True, True, True): _union_tttt,
(False, False, False, False): _union_ffff
}
try:
client = boto3.client(self.resource_type, region_name=self.region)
# Get all the EKS Clusters in the region
my_clusters = client.list_clusters()
for item in my_clusters['clusters']:
eks_cluster_arn= client.describe_cluster(
name=item
)['cluster']['arn']
try:
# Get all the tags for a given EKS Cluster
response = client.list_tags_for_resource(
resourceArn=eks_cluster_arn
)
or_combos[(tag_key1_state,
tag_value1_state,
tag_key2_state,
tag_value2_state)](response.get('Tags'), item, eks_cluster_arn)
except botocore.exceptions.ClientError as error:
log.error("Boto3 API returned error: {}".format(error))
except botocore.exceptions.ClientError as error:
log.error("Boto3 API returned error: {}".format(error))
return resource_inventory
# method - get_eks_clusters_tags
# Returns a nested dictionary of every resource & its key:value tags for the chosen resource type
# No input arguments
def get_eks_clusters_tags(self):
# Instantiate dictionaries to hold resources & their tags
tagged_resource_inventory = dict()
try:
client = boto3.client(self.resource_type, region_name=self.region)
# Get all the EKS Clusters in the region
my_clusters = client.list_clusters()
if len(my_clusters['clusters']) == 0:
tagged_resource_inventory["No Resource Found"] = {"No Tags Found": "No Tags Found"}
else:
for item in my_clusters['clusters']:
resource_tags = {}
eks_cluster_arn= client.describe_cluster(
name=item
)['cluster']['arn']
try:
response = client.list_tags_for_resource(
resourceArn= eks_cluster_arn
)
try:
for tag_key, tag_value in response['tags'].items():
if not re.search("^aws:", tag_key):
resource_tags[tag_key]= tag_value
except:
resource_tags[tag_key] = "No tag values found"
except botocore.exceptions.ClientError as error:
log.error("Boto3 API returned error: {}".format(error))
resource_tags["No Tags Found"] = "No Tags Found"
sorted_resource_tags = OrderedDict(sorted(resource_tags.items()))
tagged_resource_inventory[eks_cluster_arn] = sorted_resource_tags
except botocore.exceptions.ClientError as error:
log.error("Boto3 API returned error: {}".format(error))
tagged_resource_inventory["No Resource Found"] = {"No Tags Found": "No Tags Found"}
return tagged_resource_inventory
# method - get_eks_clusters_keys
# Getter method retrieves every tag:key for object's resource type
# No input arguments
def get_eks_clusters_keys(self):
tag_keys_inventory = list()
try:
client = boto3.client(self.resource_type, region_name=self.region)
# Get all the EKS clusters in the region
my_clusters = client.list_clusters()
for item in my_clusters['clusters']:
cluster_arn = client.describe_cluster(
name=item
)['cluster']['arn']
try:
# Get all the tags for a given EKS Cluster
response = client.list_tags_for_resource(
resourceArn=eks_cluster_arn
)
try:
# Add all tag keys to the list
for tag_key, _ in response['Tags'].items():
if not re.search("^aws:", tag_key):
tag_keys_inventory.append(tag_key)
except:
tag_keys_inventory.append("No tag keys found")
except botocore.exceptions.ClientError as error:
log.error("Boto3 API returned error: {}".format(error))
tag_keys_inventory.append("No tag keys found")
except botocore.exceptions.ClientError as error:
log.error("Boto3 API returned error: {}".format(error))
tag_keys_inventory.append("No tag keys found")
return tag_keys_inventory
# method - get_eks_clusters_values
# Getter method retrieves every tag:value for object's resource type
# No input arguments
def get_eks_clusters_values(self):
tag_values_inventory = list()
try:
client = boto3.client(self.resource_type, region_name=self.region)
# Get all the EKS clusters in the region
my_clusters = client.list_clusters()
for item in my_clusters['clusters']:
cluster_arn = client.describe_cluster(
name=item
)['cluster']['arn']
try:
# Get all the tags for a given EKS Cluster
response = client.list_tags_for_resource(
resourceArn=cluster_arn
)
try:
# Add all tag keys to the list
for tag_key, tag_value in response['Tags'].items():
# Exclude any AWS-applied tags which begin with "aws:"
if not re.search("^aws:", tag_key):
tag_values_inventory.append(tag_value)
except:
tag_values_inventory.append("No tag values found")
except botocore.exceptions.ClientError as error:
log.error("Boto3 API returned error: {}".format(error))
tag_values_inventory.append("No tag values found")
except botocore.exceptions.ClientError as error:
log.error("Boto3 API returned error: {}".format(error))
tag_values_inventory.append("No tag values found")
return tag_values_inventory
# method - set_eks_clusters_tags
# Setter method to update tags on user-selected resources
# 2 inputs - list of resource EKS Cluster arns to tag, list of individual tag key:value dictionaries
def set_eks_clusters_tags(self, resources_to_tag, chosen_tags):
resources_updated_tags = dict()
tag_dict = dict()
# for EKS Boto3 API covert list of tags dicts to single key:value tag dict
for tag in chosen_tags:
tag_dict[tag['Key']] = tag['Value']
for resource_arn in resources_to_tag:
try:
client = boto3.client(self.resource_type, region_name=self.region)
try:
response = client.tag_resource(
resourceArn=resource_arn,
tags=tag_dict
)
except botocore.exceptions.ClientError as error:
log.error("Boto3 API returned error: {}".format(error))
resources_updated_tags["No Resources Found"] = "No Tags Applied"
except | |
<reponame>sixpi/ProDy
# -*- coding: utf-8 -*-
"""This module defines MSA analysis functions."""
from numpy import all, zeros, dtype, array, char, cumsum
from .sequence import Sequence, splitSeqLabel
from prody import LOGGER
__all__ = ['MSA', 'refineMSA', 'mergeMSA', 'specMergeMSA']
try:
range = xrange
except NameError:
pass
class MSA(object):
"""Store and manipulate multiple sequence alignments."""
def __init__(self, msa, title='Unknown', labels=None, **kwargs):
"""*msa* must be a 2D Numpy character array. *labels* is a list of
sequence labels (or titles). *mapping* should map label or part of
label to sequence index in *msa* array. If *mapping* is not given,
one will be build from *labels*."""
try:
ndim, dtype_, shape = msa.ndim, msa.dtype, msa.shape
except AttributeError:
raise TypeError('msa is not a Numpy array')
self._aligned = aligned = kwargs.get('aligned', True)
if aligned:
if ndim != 2:
raise ValueError('msa.dim must be 2')
if dtype_ != dtype('|S1'):
raise ValueError('msa must be a character array')
numseq = shape[0]
if labels and len(labels) != numseq:
raise ValueError('len(labels) must be equal to number of '
'sequences')
self._labels = labels
mapping = kwargs.get('mapping')
if mapping is None:
if labels is not None:
# map labels to sequence index
self._mapping = mapping = {}
for index, label in enumerate(labels):
label = splitSeqLabel(label)[0]
try:
value = mapping[label]
except KeyError:
mapping[label] = index
else:
try:
value.append(index)
except AttributeError:
mapping[label] = [value, index]
elif mapping:
try:
mapping['isdict']
except KeyError:
pass
except Exception:
raise TypeError('mapping must be a dictionary')
self._mapping = mapping
if labels is None:
self._labels = [None] * numseq
self._msa = msa
self._title = str(title) or 'Unknown'
self._split = bool(kwargs.get('split', True))
def __str__(self):
return 'MSA ' + self._title
def __repr__(self):
if self._aligned:
return ('<MSA: {0} ({1} sequences, {2} residues)>'
).format(self._title, self.numSequences(),
self.numResidues())
else:
return ('<MSA: {0} ({1} sequences, not aligned)>'
).format(self._title, self.numSequences())
def __len__(self):
return len(self._msa)
def __getitem__(self, index):
if isinstance(index, int):
return Sequence(self, index)
if isinstance(index, str):
try:
rows = self._mapping[index]
except KeyError:
raise KeyError('label {0} is not mapped to a sequence'
.format(index))
else:
msa = self._msa[rows]
if msa.ndim == 1:
return Sequence(self, rows)
else:
if msa.base is not None:
msa = msa.copy()
labels = self._labels
return MSA(msa, title='{0}[{1}]'.format(self._title,
index), labels=[labels[i] for i in rows])
elif isinstance(index, tuple):
if len(index) == 1:
return self[index[0]]
elif len(index) == 2:
rows, cols = index
else:
raise IndexError('invalid index: ' + str(index))
else:
rows, cols = index, None
# handle list of labels
if isinstance(rows, list):
rows = self.getIndex(rows) or rows
elif isinstance(rows, int):
return Sequence(self._msa[rows, cols].tostring(),
self._labels[rows])
elif isinstance(rows, str):
try:
rows = self._mapping[rows]
except KeyError:
raise KeyError('label {0} is not mapped to a sequence'
.format(index))
else:
if isinstance(rows, int):
return Sequence(self._msa[rows, cols].tostring(),
self._labels[rows])
if cols is None:
msa = self._msa[rows]
else:
if isinstance(cols, (slice, int)):
msa = self._msa[rows, cols]
else:
try:
msa = self._msa[rows].take(cols, 1)
except TypeError:
raise IndexError('invalid index: ' + str(index))
try:
lbls = self._labels[rows]
except TypeError:
labels = self._labels
lbls = [labels[i] for i in rows]
else:
if not isinstance(lbls, list):
lbls = [lbls]
if msa.ndim == 0:
msa = msa.reshape((1, 1))
elif msa.ndim == 1:
msa = msa.reshape((1, len(msa)))
if msa.base is not None:
msa = msa.copy()
return MSA(msa=msa, title=self._title + '\'', labels=lbls,
aligned=self._aligned)
def __iter__(self):
for i in range(len(self._msa)):
yield Sequence(self, i)
def __contains__(self, key):
try:
return key in self._mapping
except Exception:
pass
return False
def __eq__(self, other):
try:
other = other._getArray()
except AttributeError:
return False
try:
return all(other == self._msa)
except Exception:
pass
return False
def _getSplit(self):
return self._split
def _setSplit(self, split):
self._split = bool(split)
split = property(_getSplit, _setSplit,
doc='Return split label when iterating or indexing.')
def isAligned(self):
"""Returns **True** if MSA is aligned."""
return self._aligned
def numSequences(self):
"""Returns number of sequences."""
return self._msa.shape[0]
def numResidues(self):
"""Returns number of residues (or columns in the MSA), if MSA is
aligned."""
if self._aligned:
return self._msa.shape[1]
def numIndexed(self):
"""Returns number of sequences that are indexed using the identifier
part or all of their labels. The return value should be equal to
number of sequences."""
count = len(self._mapping)
if len(self._msa) == count:
return count
else:
count = len(self._mapping)
for val in self._mapping.values():
try:
count += len(val) - 1
except TypeError:
pass
return count
def getTitle(self):
"""Returns title of the instance."""
return self._title
def setTitle(self, title):
"""Set title of the instance."""
self._title = str(title)
def getLabel(self, index, full=False):
"""Returns label of the sequence at given *index*. Residue numbers will
be removed from the sequence label, unless *full* is **True**."""
index = self._mapping.get(index, index)
if full:
return self._labels[index]
else:
return splitSeqLabel(self._labels[index])[0]
def getResnums(self, index):
"""Returns starting and ending residue numbers (:term:`resnum`) for the
sequence at given *index*."""
index = self._mapping.get(index, index)
return splitSeqLabel(self._labels[index])[1:]
def getArray(self):
"""Returns a copy of the MSA character array."""
return self._msa.copy()
def _getArray(self):
"""Returns MSA character array."""
return self._msa
def getIndex(self, label):
"""Returns index of the sequence that *label* maps onto. If *label*
maps onto multiple sequences or *label* is a list of labels, a list
of indices is returned. If an index for a label is not found,
return **None**."""
try:
index = self._mapping[label]
except KeyError:
return None
except TypeError:
mapping = self._mapping
indices = []
append, extend = indices.append, indices.extend
for key in label:
try:
index = mapping[key]
except KeyError:
return None
try:
extend(index)
except TypeError:
append(index)
return indices
else:
try:
return list(index)
except TypeError:
return index
def iterLabels(self, full=False):
"""Yield sequence labels. By default the part of the label used for
indexing sequences is yielded."""
if full:
for label in self._labels:
yield label
else:
for label in self._labels:
yield splitSeqLabel(label)[0]
def countLabel(self, label):
"""Returns the number of sequences that *label* maps onto."""
try:
return len(self._mapping[label])
except KeyError:
return 0
except TypeError:
return 1
def refineMSA(msa, label=None, rowocc=None, seqid=None, colocc=None, **kwargs):
"""Refine *msa* by removing sequences (rows) and residues (columns) that
contain gaps.
:arg msa: multiple sequence alignment
:type msa: :class:`.MSA`
:arg label: remove columns that are gaps in the sequence matching label,
``msa.getIndex(label)`` must return a sequence index, a PDB identifier
is also acceptable
:type label: str
:arg rowocc: row occupancy, sequences with less occupancy will be
removed after *label* refinement is applied
:type rowocc: float
:arg seqid: keep unique sequences at specified sequence identity level,
unique sequences are identified using :func:`.uniqueSequences`
:type seqid: float
:arg colocc: column occupancy, residue positions with less occupancy
will be removed after other refinements are applied
:type colocc: float
:arg keep: keep columns corresponding to residues not resolved in the PDB
structure, default is **False**, applies when *label* is a PDB
identifier
:arg type: bool
For Pfam MSA data, *label* is UniProt entry name for the protein. You may
also use PDB structure and chain identifiers, e.g. ``'1p38'`` or
``'1p38A'``, for *label* argument and UniProt entry names will be parsed
using :func:`.parsePDBHeader` function (see also :class:`.Polymer` and
:class:`.DBRef`).
The order of refinements are applied in the order of arguments. If *label*
and *unique* is specified is specified, sequence matching *label* will
be kept in the refined :class:`.MSA` although it may be similar to some
other sequence."""
# if msa is a char array, it will be refined but label won't work
try:
ndim, dtype_ = msa.ndim, msa.dtype
except AttributeError:
try:
arr = msa._getArray()
except AttributeError:
raise TypeError('msa must be a character array or an MSA instance')
ndim, dtype_ = arr.ndim, arr.dtype
else:
arr, msa = msa, None
if dtype('|S1') != dtype_:
raise ValueError('msa must be a character array or an MSA instance')
if ndim != 2:
raise ValueError('msa must be a 2D array or an MSA instance')
title = []
cols = None
index = None
if label is not None:
before = arr.shape[1]
LOGGER.timeit('_refine')
try:
upper, lower = label.upper(), label.lower()
except AttributeError:
raise TypeError('label must be a string')
if msa is None:
raise TypeError('msa must be an MSA instance, '
'label cannot be used')
index = msa.getIndex(label)
if index is None:
index = msa.getIndex(upper)
if index is None:
index = msa.getIndex(lower)
chain = None
if index is None and (len(label) == 4 or | |
<reponame>jonasbgood/StreetTreesOfNYC
# -*- coding: utf-8 -*-
"""
@author: Jonas
Created on Fri Apr 23 11:05:04 2021
To do and nice to have:
- start with empty map for faster load?
- Keep zoom and location of user when filter change
- show empty map of nyc when no data selected or keep latest viewer setting
- create table with district statistics
- create individual links to google maps / street view for each tree?
- export function
. better performance for complete data set
possible analytical questions:
- city improvement priorities
- where are the most sick or dead trees (also in relative terms, e.g. per
area, borogh or street length)
"""
import numpy as np
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
from sodapy import Socrata
import pandas as pd
## predefined order for different health options
## although 'Alive' exists but just once, probably erroneous entry
health_status_order = ['Good', 'Fair', 'Poor', 'Dead', 'Stump']
# max number of data points. Last time I checked there were 683788 data points available
data_limit = 20000
def get_data(data_limit=2000):
# data source
datasource = "data.cityofnewyork.us"
dataset = "uvpi-gqnh"
timeout = 30
########################################################################
# Insert personal app_token here. Alternatively, if you do not want your
# token published in this file you can create a file app_token.txt in the
# same directory containing only the app_token string
token = ''
########################################################################
## Try to read token from file app_token.txt
if token == '':
try:
token = str(np.loadtxt('app_token.txt', dtype=str, max_rows=1))
except:
token = ''
if token != '':
client = Socrata(datasource,token,timeout=timeout)
else:
client = Socrata(datasource, None, timeout=timeout)
record_count_total = client.get(dataset, select="COUNT(*)")
record_count_total = int(record_count_total[0]['COUNT'])
## results_meta = client.get_metadata(dataset)
results = client.get(dataset, limit=data_limit)
client.close()
## Convert to pandas DataFrame
df = pd.DataFrame.from_dict(results)
# make data types usable and consistent
df['tree_id'] = df['tree_id'].astype(int)
df['latitude'] = df['latitude'].astype(float)
df['longitude'] = df['longitude'].astype(float)
df['tree_dbh'] = df['tree_dbh'].astype(float)
df['stump_diam'] = df['stump_diam'].astype(float)
df['status'] = df['status'].astype(str) # in order to handle NaN as 'nan'
df['health'] = df['health'].astype(str)
df['spc_latin'] = df['spc_latin'].astype(str)
df['spc_common'] = df['spc_common'].astype(str)
df['problems'] = df['problems'].astype(str)
## replace small diameter values with higher values for visualization in a new column
df['tree_dbh_vis'] = df.tree_dbh
df.loc[df.status == 'Stump', 'tree_dbh_vis'] = df.stump_diam
df.loc[df.tree_dbh_vis < 5, 'tree_dbh_vis'] = 5
## clipping of extremely large diameter
df.loc[df.tree_dbh_vis > 25, 'tree_dbh_vis'] = 25
## replace values - variant 1, using numpy.where (strange... but it works)
# df.spc_common = np.where(df.health == 'nan', df.status, df.spc_common)
## replace NaN in health by status entries ('Stump' or 'Dead')
# df['health'] = np.where(df['health'] == 'nan', df['status'], df['health'])
## replace nan values with status entires - variant 2, use pandas.where
df.spc_common = df.status.where(df.spc_common == 'nan', df.spc_common)
df['health'] = df['status'].where(df['health'] == 'nan', df['health'])
return df, record_count_total
def create_mapbox_figure(df):
if df.count()[0] > 0:
health_status_selected = df['health'].unique().astype(str)
## set legend entries in predefined order
category_orders = [
val for val in health_status_order if val in health_status_selected]
## change color order to fit health status order
my_colors = px.colors.DEFAULT_PLOTLY_COLORS.copy()
my_colors[0] = px.colors.DEFAULT_PLOTLY_COLORS[2] # 'Good' = green
my_colors[1] = px.colors.DEFAULT_PLOTLY_COLORS[0] # 'Fair' = blue
my_colors[2] = px.colors.DEFAULT_PLOTLY_COLORS[1] # 'Poor' = orange
## set color values
color_discrete_sequence = [my_colors[idx] for idx, val in
enumerate(health_status_order) if val in health_status_selected]
## set hover data
hover_data = {'spc_latin': True,
'health': True,
'problems': True,
'tree_dbh': True,
'tree_dbh_vis': False,
'latitude': False,
'longitude': False,
'tree_id': True,
## it is important to have tree_id on the last position
## for single tree identification in get_single_tree_data()
}
fig = px.scatter_mapbox(df,
lat="latitude",
lon="longitude",
hover_name='spc_common',
hover_data=hover_data,
color='health',
category_orders={'health': category_orders},
color_discrete_sequence=color_discrete_sequence,
size='tree_dbh_vis',
size_max=15,
mapbox_style="carto-positron",
height=1000,
)
fig.update_layout(legend=dict(
yanchor="top",
y=0.99,
xanchor="left",
x=0.01
))
else:
## show empty map
#category_orders = health_status_order
#color_discrete_sequence = my_colors
fig = px.scatter_mapbox(df,
lat="latitude",
lon="longitude",
hover_name='spc_common',
mapbox_style="carto-positron",
height=1000)
## this might help to remember maps position and zoom
## still looking for better handling of new data and keeping
## user positions...
fig['layout']['uirevision'] = 'my_setup'
return fig
def make_borough_options(df, borough_names):
options = [{'label': val + ' ({})'.format(sum(df.boroname == val)), 'value': val} for val in borough_names]
return options
def make_health_status_options(df, health_status):
options = [{'label': val + ' ({})'.format(sum(df.health == val)), 'value': val} for val in health_status]
return options
## get data
df, record_count_total = get_data(data_limit)
df_count = df.count()[0]
## create health_status filter options
## although 'Alive' exists just once or so, probably errorneous entry
health_status_order = ['Good', 'Fair', 'Poor', 'Dead', 'Stump']
health_status_unique = df['health'].unique().astype(str)
# 1. add known status elements first in order
health_status = [
val for val in health_status_order if val in health_status_unique]
# 2. add additional unexpected or new status elements at back
health_status.extend(
[val for val in health_status_unique if val not in health_status_order])
## compare health status lists and give warning if unexpected elements in health_status_unique
if set(health_status_unique) - set(health_status_order) != set():
print('Warning: Not all health status options covered:', set(health_status_unique) - set(health_status_order))
## create borough filter options
borough_names = df['boroname'].unique()
borough_names.sort()
## set up app
external_stylesheets=['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets, title='Street Trees', prevent_initial_callbacks=False)
app.layout = html.Div([
html.Div([ # main row
html.Div([ # first column, width 8
html.H1(children='Hello Street Trees of New York City'),
dcc.Markdown('''
The NYC 2015 Street Tree Census counts {} entries in total. Entries shown in this application: {}
Data from here: [NYC OpenData 2015 Street Tree Census](https://data.cityofnewyork.us/Environment/2015-Street-Tree-Census-Tree-Data/uvpi-gqnh)
'''.format(record_count_total, df_count)),
## Map for visualization
dcc.Loading(id='loading_1', type='default',
children = dcc.Graph(id='graph_mapbox')),
html.Div([ # column
html.Div([
## Checklist for selecting Boroughs
html.H3('Borough'),
dcc.Checklist(id='checklist_borough',
# make checklist with total number of elements in each category like, e.g.: Queens (212)
options=make_borough_options(df, borough_names),
value=['Brooklyn']),
], className='three columns'),
html.Div([
## Checklist for selecting health status
html.H3('Health status'),
dcc.Checklist(id='checklist_health',
# make checklist with total number of elements in each category like, e.g.: Good (1426)
options=make_health_status_options(df, health_status),
value=health_status),
## storage variables wrapped in Loading(). This gives a
## lifesign when large data sets are processed
html.Br(),
html.Br(),
dcc.Loading(id='loading_2', type='default',
children=[
dcc.Store(id='store_df_filtered'),
dcc.Store(id='store_df_filtered_borough'),
dcc.Store(id='store_df_filtered_health'),
dcc.Store(id='store_df_graph_select'),]),
], className='three columns'),
html.Div([
## Export section
html.H3('Export data'),
html.H6('Complete data set'),
html.Button("Download CSV", id="btn_all_csv"),
dcc.Download(id="download_dataframe_all_csv"),
html.Button("Download XLSX", id="btn_all_xlsx"),
dcc.Download(id="download_dataframe_all_xlsx"),
html.Br(),
html.Br(),
html.H6('Filtered data set'),
html.Button("Download CSV", id="btn_filtered_csv"),
dcc.Download(id="download_dataframe_filtered_csv"),
html.Button("Download XLSX", id="btn_filtered_xlsx"),
dcc.Download(id="download_dataframe_filtered_xlsx"),
html.Br(),
html.Br(),
html.H6('User selected (graphical selection)'),
html.Button("Download CSV", id="btn_graph_select_csv"),
dcc.Download(id="download_dataframe_graph_select_csv"),
html.Button("Download XLSX", id="btn_graph_select_xlsx"),
dcc.Download(id="download_dataframe_graph_select_xlsx"),
], className='six columns'),
], className='column'),
], className='eight columns'),
html.Div([ # second sub column, width 3 for table on right side
## Table showing details of selected item
html.H3('Selected tree'),
dash_table.DataTable(
id='selectedTreeTable',
columns=[{'name': 'Trait', 'id': 'Trait'},
{'name': 'Value', 'id': 'Value'}],
),
], className='three columns'),
], className='row'),
# ## only for testing and debugging
# html.Div('TEST', id='test_text'),
])
##############################################################################
## call back functions
##############################################################################
## update filtered data
@app.callback(dash.dependencies.Output('store_df_filtered', 'data'),
dash.dependencies.Output('store_df_filtered_borough', 'data'),
dash.dependencies.Output('store_df_filtered_health', 'data'),
dash.dependencies.Input('checklist_borough', 'value'),
dash.dependencies.Input('checklist_health', 'value'),
prevent_initial_call=False,)
def update_filtered_data(borough_name, health_status):
df_filtered_borough = df.loc[df['boroname'].isin(borough_name)]
df_filtered_health = df.loc[df['health'].isin(health_status)]
df_filtered = df_filtered_borough.loc[df['health'].isin(health_status)]
return df_filtered.to_json(date_format='iso', orient='split'), \
df_filtered_borough.to_json(date_format='iso', orient='split'), \
df_filtered_health.to_json(date_format='iso', orient='split')
## update mapbox figure
@app.callback(dash.dependencies.Output('graph_mapbox', 'figure'),
dash.dependencies.Input('store_df_filtered', 'data'),
prevent_initial_call=True,)
def update_graph_mapbox(jsonified_filtered_data):
df_filtered = pd.read_json(jsonified_filtered_data, orient='split')
return create_mapbox_figure(df_filtered)
## update checklist_borough
@app.callback(dash.dependencies.Output('checklist_borough', 'options'),
dash.dependencies.Input('store_df_filtered_health', 'data'))
def update_borough_options(jsonified_filtered_data):
df_filtered = pd.read_json(jsonified_filtered_data, orient='split')
options = make_borough_options(df_filtered, borough_names)
return options
## update checklist_health
@app.callback(dash.dependencies.Output('checklist_health', 'options'),
dash.dependencies.Input('store_df_filtered_borough', 'data'))
def update_health_status_options(jsonified_filtered_data):
df_filtered = pd.read_json(jsonified_filtered_data, orient='split')
options = make_health_status_options(df_filtered, health_status)
return options
## save user selected tree_ids
@app.callback(dash.dependencies.Output('store_df_graph_select', 'data'),
dash.dependencies.Input('graph_mapbox', 'selectedData'))
def update_user_selected_data(selected_data):
if selected_data:
tree_ids = [val['customdata'][-1] for val in selected_data['points'] ]
return tree_ids
return None
########################
## data export functions
########################
## all data - csv
@app.callback(dash.dependencies.Output("download_dataframe_all_csv", "data"),
dash.dependencies.Input("btn_all_csv", "n_clicks"),
prevent_initial_call=True,)
def download_all_csv(n_clicks):
df_download = df.drop(columns = ['tree_dbh_vis'])
return dcc.send_data_frame(df_download.to_csv, "StreetTreesOfNYC.csv")
## all data - excel
@app.callback(dash.dependencies.Output("download_dataframe_all_xlsx", "data"),
dash.dependencies.Input("btn_all_xlsx", "n_clicks"),
prevent_initial_call=True,)
def download_all_xlsx(n_clicks):
df_download = df.drop(columns = ['tree_dbh_vis'])
return dcc.send_data_frame(df_download.to_excel, "StreetTreesOfNYC.xlsx", sheet_name="Sheet_1")
## filtered data - csv
@app.callback(dash.dependencies.Output("download_dataframe_filtered_csv", "data"),
dash.dependencies.Input("btn_filtered_csv", "n_clicks"),
dash.dependencies.Input('store_df_filtered', 'data'),
prevent_initial_call=True,)
def download_filtered_csv(n_clicks, jsonified_filtered_data):
## make sure that the button was clicked (we ignore the trigger event from altered data)
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'btn_filtered_csv' in changed_id:
df_filter = pd.read_json(jsonified_filtered_data, orient='split')
df_filter = df_filter.drop(columns = ['tree_dbh_vis'])
return dcc.send_data_frame(df_filter.to_csv, "StreetTreesOfNYC_filtered.csv")
return
## filtered data - excel
@app.callback(dash.dependencies.Output("download_dataframe_filtered_xlsx", "data"),
dash.dependencies.Input("btn_filtered_xlsx", "n_clicks"),
dash.dependencies.Input('store_df_filtered', 'data'),
prevent_initial_call=True,)
def download_filtered_xlsx(n_clicks, jsonified_filtered_data):
## make sure that the button was clicked (we ignore the trigger event from altered data)
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'btn_filtered_xlsx' in changed_id:
df_filter = pd.read_json(jsonified_filtered_data, orient='split')
df_filter = df_filter.drop(columns = ['tree_dbh_vis'])
| |
`T_i \cdot x`, where `T_i` is the `i`-th generator. This is
coded individually for use in ``x._mul_()``.
EXAMPLES::
sage: R.<q> = QQ[]; H = IwahoriHeckeAlgebra("A2", q).T()
sage: T1, T2 = H.algebra_generators()
sage: [H.product_by_generator(x, 1) for x in [T1,T2]]
[(q-1)*T[1] + q, T[2,1]]
sage: [H.product_by_generator(x, 1, side = "left") for x in [T1,T2]]
[(q-1)*T[1] + q, T[1,2]]
"""
return self.linear_combination((self.product_by_generator_on_basis(w, i, side), c)
for (w,c) in x)
def to_C_basis(self, w):
r"""
Return `T_w` as a linear combination of `C`-basis elements.
EXAMPLES::
sage: R = LaurentPolynomialRing(QQ, 'v')
sage: v = R.gen(0)
sage: H = IwahoriHeckeAlgebra('A2', v**2)
sage: s1,s2 = H.coxeter_group().simple_reflections()
sage: T = H.T()
sage: C = H.C()
sage: T.to_C_basis(s1)
v*T[1] + v^2
sage: C(T(s1))
v*C[1] + v^2
sage: C(v^-1*T(s1) - v)
C[1]
sage: C(T(s1*s2)+T(s1)+T(s2)+1)
v^2*C[1,2] + (v^3+v)*C[1] + (v^3+v)*C[2] + (v^4+2*v^2+1)
sage: C(T(s1*s2*s1))
v^3*C[1,2,1] + v^4*C[1,2] + v^4*C[2,1] + v^5*C[1] + v^5*C[2] + v^6
"""
H = self.realization_of()
generic_T = H._generic_iwahori_hecke_algebra.T()
return generic_T.to_C_basis(w).specialize_to(H)
def to_Cp_basis(self, w):
r"""
Return `T_w` as a linear combination of `C^{\prime}`-basis
elements.
EXAMPLES::
sage: R.<v> = LaurentPolynomialRing(QQ)
sage: H = IwahoriHeckeAlgebra('A2', v**2)
sage: s1,s2 = H.coxeter_group().simple_reflections()
sage: T = H.T()
sage: Cp = H.Cp()
sage: T.to_Cp_basis(s1)
v*Cp[1] + (-1)
sage: Cp(T(s1))
v*Cp[1] + (-1)
sage: Cp(T(s1)+1)
v*Cp[1]
sage: Cp(T(s1*s2)+T(s1)+T(s2)+1)
v^2*Cp[1,2]
sage: Cp(T(s1*s2*s1))
v^3*Cp[1,2,1] + (-v^2)*Cp[1,2] + (-v^2)*Cp[2,1] + v*Cp[1] + v*Cp[2] + (-1)
"""
H = self.realization_of()
generic_T = H._generic_iwahori_hecke_algebra.T()
return generic_T.to_Cp_basis(w).specialize_to(H)
def bar_on_basis(self, w):
"""
Return the bar involution of `T_w`, which is `T^{-1}_{w^-1}`.
EXAMPLES::
sage: R.<v> = LaurentPolynomialRing(QQ)
sage: H = IwahoriHeckeAlgebra('A3', v**2)
sage: W = H.coxeter_group()
sage: s1,s2,s3 = W.simple_reflections()
sage: T = H.T()
sage: b = T.bar_on_basis(s1*s2*s3); b
(v^-6)*T[1,2,3]
+ (-v^-4+v^-6)*T[1,2]
+ (-v^-4+v^-6)*T[3,1]
+ (-v^-4+v^-6)*T[2,3]
+ (v^-2-2*v^-4+v^-6)*T[1]
+ (v^-2-2*v^-4+v^-6)*T[2]
+ (v^-2-2*v^-4+v^-6)*T[3]
+ (-1+3*v^-2-3*v^-4+v^-6)
sage: b.bar()
T[1,2,3]
"""
return self.monomial(w.inverse()).inverse()
def hash_involution_on_basis(self, w):
r"""
Return the hash involution on the basis element ``self[w]``.
The hash involution `\alpha` is a `\ZZ`-algebra
involution of the Iwahori-Hecke algebra determined by
`q^{1/2} \mapsto q^{-1/2}`, and `T_w \mapsto -1^{\ell(w)}
(q_1 q_2)^{-\ell(w)} T_w`, for `w` an element of the
corresponding Coxeter group.
This map is defined in [KL79]_ and it is used to change between
the `C` and `C^{\prime}` bases because
`\alpha(C_w) = (-1)^{\ell(w)}C^{\prime}_w`.
This function is not intended to be called directly. Instead, use
:meth:`hash_involution`.
EXAMPLES::
sage: R.<v> = LaurentPolynomialRing(QQ, 'v')
sage: H = IwahoriHeckeAlgebra('A3', v**2)
sage: T=H.T()
sage: s=H.coxeter_group().simple_reflection(1)
sage: T.hash_involution_on_basis(s)
(-v^-2)*T[1]
sage: T[s].hash_involution()
(-v^-2)*T[1]
sage: h = T[1]*T[2] + (v^3 - v^-1 + 2)*T[3,1,2,3]
sage: h.hash_involution()
(-v^-7+2*v^-8+v^-11)*T[1,2,3,2] + (v^-4)*T[1,2]
sage: h.hash_involution().hash_involution() == h
True
"""
H = self.realization_of()
return (-H._q_prod)**(-w.length())*self.monomial(w)
class Element(CombinatorialFreeModuleElement):
r"""
A class for elements of an Iwahori-Hecke algebra in the `T` basis.
TESTS::
sage: R.<q> = QQ[]
sage: H = IwahoriHeckeAlgebra("B3",q).T()
sage: T1,T2,T3 = H.algebra_generators()
sage: T1+2*T2*T3
2*T[2,3] + T[1]
sage: T1*T1
(q-1)*T[1] + q
sage: R.<q1,q2> = QQ[]
sage: H = IwahoriHeckeAlgebra("A2", q1, q2=q2).T(prefix="x")
sage: sum(H.algebra_generators())^2
x[1,2] + x[2,1] + (q1+q2)*x[1] + (q1+q2)*x[2] + (-2*q1*q2)
sage: H = IwahoriHeckeAlgebra("A2", q1, q2=q2).T(prefix="t")
sage: t1,t2 = H.algebra_generators()
sage: (t1-t2)^3
(q1^2-q1*q2+q2^2)*t[1] + (-q1^2+q1*q2-q2^2)*t[2]
sage: R.<q> = QQ[]
sage: H = IwahoriHeckeAlgebra("G2", q).T()
sage: [T1, T2] = H.algebra_generators()
sage: T1*T2*T1*T2*T1*T2 == T2*T1*T2*T1*T2*T1
True
sage: T1*T2*T1 == T2*T1*T2
False
sage: H = IwahoriHeckeAlgebra("A2", 1).T()
sage: [T1,T2] = H.algebra_generators()
sage: T1+T2
T[1] + T[2]
sage: -(T1+T2)
-T[1] - T[2]
sage: 1-T1
-T[1] + 1
sage: T1.parent()
Iwahori-Hecke algebra of type A2 in 1,-1 over Integer Ring in the T-basis
"""
def inverse(self):
r"""
Return the inverse if ``self`` is a basis element.
An element is a basis element if it is `T_w` where `w` is in
the Weyl group. The base ring must be a field or Laurent
polynomial ring. Other elements of the ring have inverses but
the inverse method is only implemented for the basis elements.
EXAMPLES::
sage: R.<q> = LaurentPolynomialRing(QQ)
sage: H = IwahoriHeckeAlgebra("A2", q).T()
sage: [T1,T2] = H.algebra_generators()
sage: x = (T1*T2).inverse(); x
(q^-2)*T[2,1] + (-q^-1+q^-2)*T[1] + (-q^-1+q^-2)*T[2] + (1-2*q^-1+q^-2)
sage: x*T1*T2
1
TESTS:
We check some alternative forms of input for inverting
an element::
sage: R.<q> = LaurentPolynomialRing(QQ)
sage: H = IwahoriHeckeAlgebra("A2", q).T()
sage: T1,T2 = H.algebra_generators()
sage: ~(T1*T2)
(q^-2)*T[2,1] + (-q^-1+q^-2)*T[1] + (-q^-1+q^-2)*T[2] + (1-2*q^-1+q^-2)
sage: (T1*T2)^(-1)
(q^-2)*T[2,1] + (-q^-1+q^-2)*T[1] + (-q^-1+q^-2)*T[2] + (1-2*q^-1+q^-2)
"""
if len(self) != 1:
raise NotImplementedError("inverse only implemented for basis elements (monomials in the generators)"%self)
H = self.parent()
w = self.support_of_term()
return H.prod(H.inverse_generator(i) for i in reversed(w.reduced_word()))
__invert__ = inverse
standard = T
class _KLHeckeBasis(_Basis):
r"""
Abstract class for the common methods for the Kazhdan-Lusztig `C` and
`C^{\prime}` bases.
"""
def __init__(self, IHAlgebra, prefix=None):
r"""
Returns the Kazhdan-Lusztig basis of the Iwahori-Hecke algebra
``IHAlgebra``.
EXAMPLES::
sage: R.<v> = LaurentPolynomialRing(QQ)
sage: H = IwahoriHeckeAlgebra('A3', v**2)
sage: Cp = H.Cp()
sage: C = H.C()
"""
if IHAlgebra._root is None:
raise ValueError('The Kazhdan_Lusztig bases are defined only when -q_1*q_2 is a square')
if IHAlgebra._is_generic:
klbasis=IwahoriHeckeAlgebra_nonstandard._KLHeckeBasis
else:
klbasis=IwahoriHeckeAlgebra._KLHeckeBasis
super(klbasis, self).__init__(IHAlgebra, prefix)
# Define conversion from the KL-basis to the T-basis via
# specialization from the generic Hecke algebra
self.module_morphism(self.to_T_basis, codomain=IHAlgebra.T(), category=self.category()
).register_as_coercion()
# ...and from the T_basis to the KL-basis.
T = IHAlgebra.T()
T.module_morphism(getattr(T, 'to_{}_basis'.format(self._basis_name)),
codomain=self, category=self.category()
).register_as_coercion()
def product_on_basis(self, w1, w2):
r"""
Return the product of the two Kazhdan-Lusztig basis elements
indexed by ``w1`` and ``w2``. The computation is actually done by
converting to the `T`-basis, multiplying and then converting back.
EXAMPLES::
sage: R = LaurentPolynomialRing(QQ, 'v')
sage: v = R.gen(0)
sage: H = IwahoriHeckeAlgebra('A2', v**2)
sage: s1,s2 = H.coxeter_group().simple_reflections()
sage: [H.Cp().product_on_basis(s1,x) for x in [s1,s2]]
[(v+v^-1)*Cp[1], Cp[1,2]]
sage: [H.C().product_on_basis(s1,x) for x in [s1,s2]]
[(-v-v^-1)*C[1], C[1,2]]
"""
return self(self.to_T_basis(w1) * self.to_T_basis(w2))
def bar_on_basis(self, w):
r"""
Return the bar involution on the Kazhdan-Lusztig basis element
indexed by ``w``. By definition, all Kazhdan-Lusztig basis elements
are fixed by the bar involution.
EXAMPLES::
sage: R.<v> = LaurentPolynomialRing(QQ)
sage: H = IwahoriHeckeAlgebra('A3', v**2)
sage: W = H.coxeter_group()
sage: s1,s2,s3 = W.simple_reflections()
sage: Cp = H.Cp()
sage: Cp.bar_on_basis(s1*s2*s1*s3)
Cp[1,2,3,1]
"""
return self.monomial(w)
def to_T_basis(self, w):
r"""
Returns the Kazhdan-Lusztig basis elememt ``self[w]`` as a linear
combination of ``T``-bais elements.
EXAMPLES::
sage: H=IwahoriHeckeAlgebra("A3",1); Cp=H.Cp(); C=H.C()
sage: s=H.coxeter_group().simple_reflection(1)
sage: C.to_T_basis(s)
T[1] - 1
sage: Cp.to_T_basis(s)
T[1] + 1
"""
H = self.realization_of()
generic_KL = getattr(H._generic_iwahori_hecke_algebra, self._basis_name)()
return generic_KL.to_T_basis(w).specialize_to(H)
class Cp(_KLHeckeBasis):
r"""
The `C^{\prime}` Kazhdan-Lusztig basis of Iwahori-Hecke algebra.
Assuming the standard quadratic relations of `(T_r-q)(T_r+1)=0`, for
every element `w` in the Coxeter group, there is a unique element
`C^{\prime}_w` in the Iwahori-Hecke algebra which is uniquely determined
by the two properties:
.. MATH::
\begin{aligned}
\overline{ C^{\prime}_w } &= C^{\prime}_w\\
C^{\prime}_w &= q^{-\ell(w)/2}
\sum_{v \leq w} P_{v,w}(q) T_v
\end{aligned}
where `\leq` is the Bruhat order on the underlying Coxeter group and
`P_{v,w}(q) \in \ZZ[q,q^{-1}]` are polynomials in `\ZZ[q]` such that
`P_{w,w}(q) = 1` and if `v < w` then `\deg P_{v,w}(q) \leq
\frac{1}{2}(\ell(w)-\ell(v)-1)`.
More generally, if the quadratic relations are of the form
(T_s-q_1)(T_s-q_2)=0` and `\sqrt{-q_1q_2}` exists then for a simple
reflection `s` then the corresponding Kazhdan-Lusztig basis element is:
.. MATH::
C^{\prime}_s = (-q_1 q_2)^{-1/2} (T_s + 1).
See [KL79]_ for more details.
EXAMPLES::
sage: R = LaurentPolynomialRing(QQ, 'v')
sage: v = R.gen(0)
sage: H = IwahoriHeckeAlgebra('A5', v**2)
sage: W = H.coxeter_group()
sage: s1,s2,s3,s4,s5 = W.simple_reflections()
sage: T = H.T()
sage: Cp = H.Cp()
sage: T(s1)**2
(v^2-1)*T[1] + v^2
sage: T(Cp(s1))
(v^-1)*T[1] + (v^-1)
sage: T(Cp(s1)*Cp(s2)*Cp(s1))
(v^-3)*T[1,2,1] + (v^-3)*T[1,2] + (v^-3)*T[2,1] + (v^-1+v^-3)*T[1] + (v^-3)*T[2] + (v^-1+v^-3)
::
sage: R = LaurentPolynomialRing(QQ, 'v')
sage: v = R.gen(0)
sage: H = IwahoriHeckeAlgebra('A3', v**2)
sage: W = H.coxeter_group()
sage: s1,s2,s3 = W.simple_reflections()
sage: Cp = H.Cp()
sage: Cp(s1*s2*s1)
Cp[1,2,1]
sage: Cp(s1)**2
(v+v^-1)*Cp[1]
sage: Cp(s1)*Cp(s2)*Cp(s1)
Cp[1,2,1] + Cp[1]
sage: Cp(s1)*Cp(s2)*Cp(s3)*Cp(s1)*Cp(s2) # long time
Cp[1,2,3,1,2] + Cp[1,2,1] + Cp[3,1,2]
TESTS::
sage: R.<v> = LaurentPolynomialRing(QQ, 'v')
sage: H = IwahoriHeckeAlgebra('A3', v**2)
sage: W = H.coxeter_group()
sage: T = H.T()
sage: C = H.C()
sage: Cp = H.Cp()
sage: all(Cp(T(Cp[x])) == Cp[x] for x in W) # long time
True
sage: all(Cp(C(Cp[x])) == Cp[x] | |
<gh_stars>1-10
import netket as nk
import numpy as np
import netket.experimental as nkx
from netket.experimental.operator._fermions_2nd import _convert_terms_to_spin_blocks
from netket.experimental.operator.fermion import destroy, create, number
import pytest
op_ferm = {}
hi = nkx.hilbert.SpinOrbitalFermions(3)
op_ferm["FermionOperator2nd_hermitian"] = (
nkx.operator.FermionOperator2nd(
hi, terms=(((0, 0), (1, 1)), ((1, 0), (0, 1))), weights=(1.0 + 1j, 1 - 1j)
),
True,
)
op_ferm["FermionOperator2nd_not_hermitian"] = (
nkx.operator.FermionOperator2nd(
hi, terms=(((0, 0), (2, 1)), ((1, 0), (0, 1))), weights=(1.0 + 1j, 1 - 1j)
),
False,
)
op_ferm["FermionOperator2nd_hermitian_3term"] = (
nkx.operator.FermionOperator2nd(
hi,
(((0, 0), (1, 1), (2, 1)), ((2, 0), (1, 0), (0, 1))),
weights=(1.0 - 1j, 1 + 1j),
),
True,
)
op_ferm["FermionOperator2nd_not_hermitian_3term"] = (
nkx.operator.FermionOperator2nd(
hi,
(((0, 0), (1, 1), (2, 1)), ((3, 0), (1, 0), (0, 1))),
weights=(1.0 - 1j, 2 + 2j),
),
False,
)
op_ferm["fermihubbard_int"] = (
nkx.operator.FermionOperator2nd(
hi,
terms=(
((0, 1), (0, 0), (1, 1), (1, 0)),
((0, 1), (0, 0), (1, 1), (1, 0)),
((0, 1), (0, 0), (1, 1), (1, 0)),
((0, 1), (0, 0), (1, 1), (1, 0)),
),
weights=(1.0, 1.0, 1.0, 1.0),
),
True,
)
op_ferm["ordering"] = (
nkx.operator.FermionOperator2nd(
hi,
terms=(((0, 1), (0, 0), (1, 1), (1, 0)), ((1, 1), (1, 0), (0, 1), (0, 0))),
weights=(1.0 - 1j, 1 + 1j),
),
True,
)
@pytest.mark.parametrize(
"op_ferm, is_hermitian",
[pytest.param(op, is_herm, id=name) for name, (op, is_herm) in op_ferm.items()],
)
def test_is_hermitian_fermion2nd(op_ferm, is_hermitian):
print("OPERATOR", op_ferm._operators)
assert op_ferm.is_hermitian == is_hermitian
def test_fermion_operator_with_strings():
hi = nkx.hilbert.SpinOrbitalFermions(3)
terms = (((0, 1), (2, 0)),)
op1 = nkx.operator.FermionOperator2nd(hi, terms)
op2 = nkx.operator.FermionOperator2nd(hi, ("0^ 2",))
assert np.allclose(op1.to_dense(), op2.to_dense())
terms = (((0, 1), (1, 0)), ((2, 1), (1, 0)))
weights = (0.5 - 0.5j, 0.5 + 0.5j)
op1 = nkx.operator.FermionOperator2nd(hi, terms, weights)
op2 = nkx.operator.FermionOperator2nd(hi, ("0^ 1", "2^ 1"), weights)
assert np.allclose(op1.to_dense(), op2.to_dense())
terms = (((0, 1), (1, 0), (2, 1)), ((2, 1), (1, 0), (0, 1)))
weights = (0.5 - 0.5j, 0.5 + 0.5j)
op1 = nkx.operator.FermionOperator2nd(hi, terms, weights)
op2 = nkx.operator.FermionOperator2nd(hi, ("0^ 1 2^", "2^ 1 0^"), weights)
assert np.allclose(op1.to_dense(), op2.to_dense())
def compare_openfermion_fermions():
# skip test if openfermion not installed
pytest.importorskip("openfermion")
from openfermion import FermionOperator, get_sparse_operator
# openfermion
of = FermionOperator("0^ 1", 1.0) + FermionOperator("1^ 0", 1.0)
of_dense = get_sparse_operator(of).todense()
# from_openfermion
fo = nkx.operator.FermionOperator2nd.from_openfermion(of)
fo_dense = fo.to_dense()
# FermionOperator2nd
hi = nkx.hilbert.SpinOrbitalFermions(2) # two sites
fermop = nkx.operator.FermionOperator2nd(
hi, terms=(((0, 1), (1, 0)), ((1, 1), (0, 0))), weights=(1.0, 1.0)
)
fermop_dense = fermop.to_dense()
# compare openfermion vs from_openfermion
assert np.array_equal(of_dense, fo_dense)
# compare openfermion vs FermionOperator2nd
assert np.array_equal(of_dense, fermop_dense)
# compare from_openfermion vs FermionOperator 2nd
assert np.array_equal(fo_dense, fermop_dense)
def test_add_fermions():
hi = nkx.hilbert.SpinOrbitalFermions(5)
op1 = nkx.operator.FermionOperator2nd(hi, terms=("1^ 2"), weights=(1,), constant=2)
op2 = nkx.operator.FermionOperator2nd(
hi, terms=("3^ 4"), weights=(1.3,), constant=5.7
)
op3 = nkx.operator.FermionOperator2nd(
hi, terms=("3^ 4", "1^ 2"), weights=(1.3, 1), constant=7.7
)
op4 = op3 * 2
op5 = nkx.operator.FermionOperator2nd(
hi, terms=("3^ 4", "1^ 2"), weights=(2 * 1.3, 2 * 1), constant=2 * 7.7
)
assert np.allclose((op1 + op2).to_dense(), op3.to_dense())
assert np.allclose(op4.to_dense(), op5.to_dense())
def test_create_annihil_number():
hi = nkx.hilbert.SpinOrbitalFermions(5)
op1 = nkx.operator.FermionOperator2nd(hi, terms=("0^ 0", "1^ 2"), weights=(0.3, 2))
# without spin
def c(site):
return destroy(hi, site)
def cdag(site):
return create(hi, site)
def cn(site):
return number(hi, site)
op2 = 0.3 * cn(0) + 2 * cdag(1) * c(2)
assert np.allclose(op1.to_dense(), op2.to_dense())
op3 = nkx.operator.FermionOperator2nd(
hi, terms=("0^ 1", "1^ 2"), weights=(1 + 1j, 2 - 2j), constant=2
)
op4 = (1 + 1j) * cdag(0) * c(1) + (2 - 2j) * cdag(1) * c(2) + 2
assert np.allclose(op3.to_dense(), op4.to_dense())
# same, but with spin
hi = nkx.hilbert.SpinOrbitalFermions(4, s=1 / 2)
op1 = nkx.operator.FermionOperator2nd(hi, terms=("0^ 0", "1^ 6"), weights=(0.3, 2))
op2 = 0.3 * number(hi, 0, -0.5) + 2 * create(hi, 1, -0.5) * destroy(hi, 2, +0.5)
assert np.allclose(op1.to_dense(), op2.to_dense())
op3 = nkx.operator.FermionOperator2nd(
hi, terms=("4^ 1", "1^ 2"), weights=(1 + 1j, 2 - 2j), constant=2
)
op4 = (
(1 + 1j) * create(hi, 0, +0.5) * destroy(hi, 1, -0.5)
+ (2 - 2j) * create(hi, 1, -0.5) * destroy(hi, 2, -0.5)
+ 2
)
assert np.allclose(op3.to_dense(), op4.to_dense())
def test_operations_fermions():
hi = nkx.hilbert.SpinOrbitalFermions(5)
op1 = nkx.operator.FermionOperator2nd(
hi, terms=("1^ 2",), weights=(1,), constant=2, dtype=complex
)
op2 = nkx.operator.FermionOperator2nd(
hi, terms=("3^ 4"), weights=(1.3,), constant=5.7
)
op2copy = op2.copy()
assert op2copy.hilbert == op2.hilbert
assert np.allclose(list(op2._operators.keys()), list(op2copy._operators.keys()))
assert np.allclose(list(op2._operators.values()), list(op2copy._operators.values()))
assert op2.is_hermitian == op2copy.is_hermitian
assert np.allclose(op2.to_dense(), op2copy.to_dense())
op3 = nkx.operator.FermionOperator2nd(
hi, terms=("3^ 4", "1^ 2"), weights=(1.3, 1), constant=7.7
)
op12 = op1.copy()
op12 += op2
assert np.allclose((op1 + op2).to_dense(), op3.to_dense())
assert np.allclose(op12.to_dense(), op3.to_dense())
op4 = op3 * 2
op5 = nkx.operator.FermionOperator2nd(
hi, terms=("3^ 4", "1^ 2"), weights=(2 * 1.3, 2 * 1), constant=2 * 7.7
)
op4b = op3.copy()
op4b *= 2
assert np.allclose(op4.to_dense(), op5.to_dense())
assert np.allclose(op4b.to_dense(), op5.to_dense())
op6 = nkx.operator.FermionOperator2nd(
hi, terms=("1^ 2", "0^ 1"), weights=(1j, -1.0j), constant=7.7
)
op7 = nkx.operator.FermionOperator2nd(
hi, terms=("1^ 2", "0^ 1"), weights=(1, 1), constant=7.7
)
op8 = nkx.operator.FermionOperator2nd(
hi, terms=("1^ 2", "0^ 1"), weights=(1.0 + 1j, 1 - 1j), constant=2 * 7.7
)
op67 = op6.copy()
op67 += op7
assert np.allclose((op6 + op7).to_dense(), op8.to_dense())
assert np.allclose(op67.to_dense(), op8.to_dense())
op8 = nkx.operator.FermionOperator2nd(
hi, terms=("0^ 1", "2^ 3"), weights=(1 + 1j, 2 - 0.5j), constant=1.0 + 3j
)
op8_trueconj = nkx.operator.FermionOperator2nd(
hi, terms=("1^ 0", "3^ 2"), weights=(1 - 1j, 2 + 0.5j), constant=1.0 - 3j
)
assert np.allclose(op8.conjugate().to_dense(), op8_trueconj.to_dense())
def test_fermion_op_matmul():
hi = nkx.hilbert.SpinOrbitalFermions(3)
op1 = nkx.operator.FermionOperator2nd(hi, terms=("0^ 0", "1^ 2"), weights=(0.3, 2))
# multiply with a real constant
op_real = nkx.operator.FermionOperator2nd(hi, [], [], constant=2.0)
assert np.allclose((op1 @ op_real).to_dense(), (op1 * 2).to_dense())
assert np.allclose((op1 * op_real).to_dense(), (op1 * 2).to_dense())
# multiply with a real+complex constant
op_complex = nkx.operator.FermionOperator2nd(hi, [], [], constant=2.0 + 2j)
assert np.allclose((op1 @ op_complex).to_dense(), (op1 * (2 + 2j)).to_dense())
assert np.allclose((op1 * op_complex).to_dense(), (op1 * (2 + 2j)).to_dense())
# multiply with another operator
op2 = nkx.operator.FermionOperator2nd(
hi, terms=("1^ 1", "0^ 2"), weights=(1 + 1j, 0.5)
)
op2b = nkx.operator.FermionOperator2nd(
hi,
terms=("0^ 0 1^ 1", "0^ 0 0^ 2", "1^ 2 1^ 1", "1^ 2 0^ 2"),
weights=(0.3 * (1 + 1j), 0.3 * 0.5, 2 * (1 + 1j), 2 * 0.5),
)
assert np.allclose(
(op1 @ op2).to_dense(),
op2b.to_dense(),
)
assert np.allclose(
(op1 * op2).to_dense(),
op2b.to_dense(),
)
# multiply with another operator + constant
op3 = nkx.operator.FermionOperator2nd(
hi, terms=("1^ 1",), weights=(1 + 1j,), constant=5
)
op3b = nkx.operator.FermionOperator2nd(
hi,
terms=("0^ 0 1^ 1", "0^ 0", "1^ 2 1^ 1", "1^ 2"),
weights=(0.3 * (1 + 1j), 5 * 0.3, 2 * (1 + 1j), 10),
constant=0,
)
assert np.allclose(
(op1 @ op3).to_dense(),
op3b.to_dense(),
)
assert np.allclose(
(op1 * op3).to_dense(),
op3b.to_dense(),
)
def test_fermion_add_sub_mul():
# check addition
hi = nkx.hilbert.SpinOrbitalFermions(3)
op1 = nkx.operator.FermionOperator2nd(
hi, terms=("0^ 0", "1^ 2"), weights=(0.3, 2), constant=2
)
assert np.allclose((op1 + op1).to_dense(), 2 * op1.to_dense())
op2 = nkx.operator.FermionOperator2nd(
hi, terms=("0^ 0", "0^ 1"), weights=(0.5, 4j), constant=1
)
op2b = nkx.operator.FermionOperator2nd(
hi, terms=("0^ 0", "1^ 2", "0^ 1"), weights=(0.3 + 0.5, 2, 4j), constant=3
)
assert np.allclose((op1 + op2).to_dense(), op2b.to_dense())
op2c = op2.copy()
op2c += op1
assert np.allclose(op2c.to_dense(), op2b.to_dense())
# check substraction
op2d = nkx.operator.FermionOperator2nd(
hi, terms=("0^ 0", "1^ 2", "0^ 1"), weights=(0.3 - 0.5, 2, -4j), constant=1
)
assert np.allclose((op1 - op2).to_dense(), op2d.to_dense())
op1c = op1.copy()
op1c -= op2
assert np.allclose(op1c.to_dense(), op2d.to_dense())
# check multiplication with scalar
op1f = nkx.operator.FermionOperator2nd(
hi,
terms=("0^ 0", "1^ 2"),
weights=(
3,
20,
),
constant=20,
)
op1c = op1.copy()
op1c *= 10
assert np.allclose((op1 * 10).to_dense(), op1f.to_dense())
assert np.allclose(op1c.to_dense(), op1f.to_dense())
@pytest.mark.parametrize("dtype1", [np.float32, np.float64])
@pytest.mark.parametrize("dtype2", [np.float32, np.float64])
def test_dtype_promotion(dtype1, dtype2):
hi = nkx.hilbert.SpinOrbitalFermions(3)
op1 = nkx.operator.FermionOperator2nd(
hi, terms=("0^ 0", "1^ 2"), weights=(0.3, 2), constant=2, dtype=dtype1
)
op2 = nkx.operator.FermionOperator2nd(
hi, terms=("0^ 1"), weights=(0.1,), constant=2, dtype=dtype2
)
assert op1.dtype == dtype1
assert op2.dtype == dtype2
assert op1.to_dense().dtype == dtype1
assert op2.to_dense().dtype == dtype2
assert (-op1).dtype == dtype1
assert (-op2).dtype == dtype2
assert (op1 + op2).dtype == np.promote_types(op1.dtype, op2.dtype)
assert (op1 - op2).dtype == np.promote_types(op1.dtype, op2.dtype)
assert (op1 @ op2).dtype == np.promote_types(op1.dtype, op2.dtype)
a = np.array(0.5, dtype=dtype1)
assert (op2 + a + op2).dtype == np.promote_types(a.dtype, op2.dtype)
assert (op2 - a).dtype == np.promote_types(a.dtype, op2.dtype)
assert (op2 * a).dtype == np.promote_types(a.dtype, op2.dtype)
a = np.array(0.5, dtype=dtype2)
assert (op1 + a).dtype == np.promote_types(op1.dtype, a.dtype)
assert (op1 - a).dtype == np.promote_types(op1.dtype, a.dtype)
| |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_multiresolutionimageinterface')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_multiresolutionimageinterface')
_multiresolutionimageinterface = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_multiresolutionimageinterface', [dirname(__file__)])
except ImportError:
import _multiresolutionimageinterface
return _multiresolutionimageinterface
try:
_mod = imp.load_module('_multiresolutionimageinterface', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_multiresolutionimageinterface = swig_import_helper()
del swig_import_helper
else:
import _multiresolutionimageinterface
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _multiresolutionimageinterface.delete_SwigPyIterator
__del__ = lambda self: None
def value(self) -> "PyObject *":
return _multiresolutionimageinterface.SwigPyIterator_value(self)
def incr(self, n: 'size_t'=1) -> "swig::SwigPyIterator *":
return _multiresolutionimageinterface.SwigPyIterator_incr(self, n)
def decr(self, n: 'size_t'=1) -> "swig::SwigPyIterator *":
return _multiresolutionimageinterface.SwigPyIterator_decr(self, n)
def distance(self, x: 'SwigPyIterator') -> "ptrdiff_t":
return _multiresolutionimageinterface.SwigPyIterator_distance(self, x)
def equal(self, x: 'SwigPyIterator') -> "bool":
return _multiresolutionimageinterface.SwigPyIterator_equal(self, x)
def copy(self) -> "swig::SwigPyIterator *":
return _multiresolutionimageinterface.SwigPyIterator_copy(self)
def next(self) -> "PyObject *":
return _multiresolutionimageinterface.SwigPyIterator_next(self)
def __next__(self) -> "PyObject *":
return _multiresolutionimageinterface.SwigPyIterator___next__(self)
def previous(self) -> "PyObject *":
return _multiresolutionimageinterface.SwigPyIterator_previous(self)
def advance(self, n: 'ptrdiff_t') -> "swig::SwigPyIterator *":
return _multiresolutionimageinterface.SwigPyIterator_advance(self, n)
def __eq__(self, x: 'SwigPyIterator') -> "bool":
return _multiresolutionimageinterface.SwigPyIterator___eq__(self, x)
def __ne__(self, x: 'SwigPyIterator') -> "bool":
return _multiresolutionimageinterface.SwigPyIterator___ne__(self, x)
def __iadd__(self, n: 'ptrdiff_t') -> "swig::SwigPyIterator &":
return _multiresolutionimageinterface.SwigPyIterator___iadd__(self, n)
def __isub__(self, n: 'ptrdiff_t') -> "swig::SwigPyIterator &":
return _multiresolutionimageinterface.SwigPyIterator___isub__(self, n)
def __add__(self, n: 'ptrdiff_t') -> "swig::SwigPyIterator *":
return _multiresolutionimageinterface.SwigPyIterator___add__(self, n)
def __sub__(self, *args) -> "ptrdiff_t":
return _multiresolutionimageinterface.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
SwigPyIterator_swigregister = _multiresolutionimageinterface.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
SHARED_PTR_DISOWN = _multiresolutionimageinterface.SHARED_PTR_DISOWN
class vector_int(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, vector_int, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, vector_int, name)
__repr__ = _swig_repr
def iterator(self) -> "swig::SwigPyIterator *":
return _multiresolutionimageinterface.vector_int_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self) -> "bool":
return _multiresolutionimageinterface.vector_int___nonzero__(self)
def __bool__(self) -> "bool":
return _multiresolutionimageinterface.vector_int___bool__(self)
def __len__(self) -> "std::vector< int >::size_type":
return _multiresolutionimageinterface.vector_int___len__(self)
def __getslice__(self, i: 'std::vector< int >::difference_type', j: 'std::vector< int >::difference_type') -> "std::vector< int,std::allocator< int > > *":
return _multiresolutionimageinterface.vector_int___getslice__(self, i, j)
def __setslice__(self, *args) -> "void":
return _multiresolutionimageinterface.vector_int___setslice__(self, *args)
def __delslice__(self, i: 'std::vector< int >::difference_type', j: 'std::vector< int >::difference_type') -> "void":
return _multiresolutionimageinterface.vector_int___delslice__(self, i, j)
def __delitem__(self, *args) -> "void":
return _multiresolutionimageinterface.vector_int___delitem__(self, *args)
def __getitem__(self, *args) -> "std::vector< int >::value_type const &":
return _multiresolutionimageinterface.vector_int___getitem__(self, *args)
def __setitem__(self, *args) -> "void":
return _multiresolutionimageinterface.vector_int___setitem__(self, *args)
def pop(self) -> "std::vector< int >::value_type":
return _multiresolutionimageinterface.vector_int_pop(self)
def append(self, x: 'std::vector< int >::value_type const &') -> "void":
return _multiresolutionimageinterface.vector_int_append(self, x)
def empty(self) -> "bool":
return _multiresolutionimageinterface.vector_int_empty(self)
def size(self) -> "std::vector< int >::size_type":
return _multiresolutionimageinterface.vector_int_size(self)
def swap(self, v: 'vector_int') -> "void":
return _multiresolutionimageinterface.vector_int_swap(self, v)
def begin(self) -> "std::vector< int >::iterator":
return _multiresolutionimageinterface.vector_int_begin(self)
def end(self) -> "std::vector< int >::iterator":
return _multiresolutionimageinterface.vector_int_end(self)
def rbegin(self) -> "std::vector< int >::reverse_iterator":
return _multiresolutionimageinterface.vector_int_rbegin(self)
def rend(self) -> "std::vector< int >::reverse_iterator":
return _multiresolutionimageinterface.vector_int_rend(self)
def clear(self) -> "void":
return _multiresolutionimageinterface.vector_int_clear(self)
def get_allocator(self) -> "std::vector< int >::allocator_type":
return _multiresolutionimageinterface.vector_int_get_allocator(self)
def pop_back(self) -> "void":
return _multiresolutionimageinterface.vector_int_pop_back(self)
def erase(self, *args) -> "std::vector< int >::iterator":
return _multiresolutionimageinterface.vector_int_erase(self, *args)
def __init__(self, *args):
this = _multiresolutionimageinterface.new_vector_int(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def push_back(self, x: 'std::vector< int >::value_type const &') -> "void":
return _multiresolutionimageinterface.vector_int_push_back(self, x)
def front(self) -> "std::vector< int >::value_type const &":
return _multiresolutionimageinterface.vector_int_front(self)
def back(self) -> "std::vector< int >::value_type const &":
return _multiresolutionimageinterface.vector_int_back(self)
def assign(self, n: 'std::vector< int >::size_type', x: 'std::vector< int >::value_type const &') -> "void":
return _multiresolutionimageinterface.vector_int_assign(self, n, x)
def resize(self, *args) -> "void":
return _multiresolutionimageinterface.vector_int_resize(self, *args)
def insert(self, *args) -> "void":
return _multiresolutionimageinterface.vector_int_insert(self, *args)
def reserve(self, n: 'std::vector< int >::size_type') -> "void":
return _multiresolutionimageinterface.vector_int_reserve(self, n)
def capacity(self) -> "std::vector< int >::size_type":
return _multiresolutionimageinterface.vector_int_capacity(self)
__swig_destroy__ = _multiresolutionimageinterface.delete_vector_int
__del__ = lambda self: None
vector_int_swigregister = _multiresolutionimageinterface.vector_int_swigregister
vector_int_swigregister(vector_int)
class vector_uint(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, vector_uint, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, vector_uint, name)
__repr__ = _swig_repr
def iterator(self) -> "swig::SwigPyIterator *":
return _multiresolutionimageinterface.vector_uint_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self) -> "bool":
return _multiresolutionimageinterface.vector_uint___nonzero__(self)
def __bool__(self) -> "bool":
return _multiresolutionimageinterface.vector_uint___bool__(self)
def __len__(self) -> "std::vector< unsigned int >::size_type":
return _multiresolutionimageinterface.vector_uint___len__(self)
def __getslice__(self, i: 'std::vector< unsigned int >::difference_type', j: 'std::vector< unsigned int >::difference_type') -> "std::vector< unsigned int,std::allocator< unsigned int > > *":
return _multiresolutionimageinterface.vector_uint___getslice__(self, i, j)
def __setslice__(self, *args) -> "void":
return _multiresolutionimageinterface.vector_uint___setslice__(self, *args)
def __delslice__(self, i: 'std::vector< unsigned int >::difference_type', j: 'std::vector< unsigned int >::difference_type') -> "void":
return _multiresolutionimageinterface.vector_uint___delslice__(self, i, j)
def __delitem__(self, *args) -> "void":
return _multiresolutionimageinterface.vector_uint___delitem__(self, *args)
def __getitem__(self, *args) -> "std::vector< unsigned int >::value_type const &":
return _multiresolutionimageinterface.vector_uint___getitem__(self, *args)
def __setitem__(self, *args) -> "void":
return _multiresolutionimageinterface.vector_uint___setitem__(self, *args)
def pop(self) -> "std::vector< unsigned int >::value_type":
return _multiresolutionimageinterface.vector_uint_pop(self)
def append(self, x: 'std::vector< unsigned int >::value_type const &') -> "void":
return _multiresolutionimageinterface.vector_uint_append(self, x)
def empty(self) -> "bool":
return _multiresolutionimageinterface.vector_uint_empty(self)
def size(self) -> "std::vector< unsigned int >::size_type":
return _multiresolutionimageinterface.vector_uint_size(self)
def swap(self, v: 'vector_uint') -> "void":
return _multiresolutionimageinterface.vector_uint_swap(self, v)
def begin(self) -> "std::vector< unsigned int >::iterator":
return _multiresolutionimageinterface.vector_uint_begin(self)
def end(self) -> "std::vector< unsigned int >::iterator":
return _multiresolutionimageinterface.vector_uint_end(self)
def rbegin(self) -> "std::vector< unsigned int >::reverse_iterator":
return _multiresolutionimageinterface.vector_uint_rbegin(self)
def rend(self) -> "std::vector< unsigned int >::reverse_iterator":
return _multiresolutionimageinterface.vector_uint_rend(self)
def clear(self) -> "void":
return _multiresolutionimageinterface.vector_uint_clear(self)
def get_allocator(self) -> "std::vector< unsigned int >::allocator_type":
return _multiresolutionimageinterface.vector_uint_get_allocator(self)
def pop_back(self) -> "void":
return _multiresolutionimageinterface.vector_uint_pop_back(self)
def erase(self, *args) -> "std::vector< unsigned int >::iterator":
return _multiresolutionimageinterface.vector_uint_erase(self, *args)
def __init__(self, *args):
this = _multiresolutionimageinterface.new_vector_uint(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def push_back(self, x: 'std::vector< unsigned int >::value_type const &') -> "void":
return _multiresolutionimageinterface.vector_uint_push_back(self, x)
def front(self) -> "std::vector< unsigned int >::value_type const &":
return _multiresolutionimageinterface.vector_uint_front(self)
def back(self) -> "std::vector< unsigned int >::value_type const &":
return _multiresolutionimageinterface.vector_uint_back(self)
def assign(self, n: 'std::vector< unsigned int >::size_type', x: 'std::vector< unsigned int >::value_type const &') -> "void":
return _multiresolutionimageinterface.vector_uint_assign(self, n, x)
def resize(self, *args) -> "void":
return _multiresolutionimageinterface.vector_uint_resize(self, *args)
def insert(self, *args) -> "void":
return _multiresolutionimageinterface.vector_uint_insert(self, *args)
def reserve(self, n: 'std::vector< unsigned int >::size_type') -> "void":
return _multiresolutionimageinterface.vector_uint_reserve(self, n)
def capacity(self) -> "std::vector< unsigned int >::size_type":
return _multiresolutionimageinterface.vector_uint_capacity(self)
__swig_destroy__ = _multiresolutionimageinterface.delete_vector_uint
__del__ = lambda self: None
vector_uint_swigregister = _multiresolutionimageinterface.vector_uint_swigregister
vector_uint_swigregister(vector_uint)
class vector_float(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, vector_float, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, vector_float, name)
__repr__ = _swig_repr
def iterator(self) -> "swig::SwigPyIterator *":
return _multiresolutionimageinterface.vector_float_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self) -> "bool":
return _multiresolutionimageinterface.vector_float___nonzero__(self)
def __bool__(self) -> "bool":
return _multiresolutionimageinterface.vector_float___bool__(self)
def __len__(self) -> "std::vector< float >::size_type":
return _multiresolutionimageinterface.vector_float___len__(self)
def __getslice__(self, i: 'std::vector< float >::difference_type', j: 'std::vector< float >::difference_type') -> "std::vector< float,std::allocator< float > > *":
return _multiresolutionimageinterface.vector_float___getslice__(self, i, j)
def __setslice__(self, *args) -> "void":
return _multiresolutionimageinterface.vector_float___setslice__(self, *args)
def __delslice__(self, i: 'std::vector< float >::difference_type', j: 'std::vector< float >::difference_type') -> "void":
return _multiresolutionimageinterface.vector_float___delslice__(self, i, j)
def __delitem__(self, | |
<reponame>cheshire3/cheshire3
# -*- coding: utf-8 -ü-
import os
import re
import types
try:
from zopyx.txng3.ext import stemmer as Stemmer
except ImportError:
Stemmer = None
from cheshire3.baseObjects import Normalizer
from cheshire3.exceptions import (
ConfigFileException,
MissingDependencyException
)
class SimpleNormalizer(Normalizer):
"""Abstract Base Class for Normalizer.
Simply returns the data and should never be used as it will simply waste
CPU time.
"""
def __init__(self, session, config, parent):
Normalizer.__init__(self, session, config, parent)
def process_string(self, session, data):
"""Process a string into an alternative form."""
return data
def process_hash(self, session, data):
"""Process a hash of values into alternative forms."""
kw = {}
if not data:
return kw
process = self.process_string
#items = data.items()
#prox = items[0][1].has_key('positions')
for (k, d) in data.iteritems():
dv = d['text']
if type(dv) == list:
new = []
# Process list backwards so as not to munge character offsets
for x in range(len(dv) - 1, -1, -1):
dvi = dv[x]
ndvi = process(session, dvi)
if ndvi:
new.append(ndvi)
else:
try:
d['charOffsets'].pop(x)
except KeyError:
pass
new.reverse()
nd = d.copy()
nd['text'] = new
kw[k] = nd
continue
else:
new = process(session, d['text'])
if not new:
continue
if isinstance(new, dict):
# From string to hash
for nv in new.itervalues():
txt = nv['text']
if txt in kw:
kw[txt]['occurences'] += nv['occurences']
try:
kw[txt]['positions'].extend(nv['positions'])
except:
pass
try:
kw[txt]['proxLoc'].extend(nv['proxLoc'])
except:
pass
else:
kw[txt] = nv
else:
if new is not None:
try:
kw[new]['occurences'] += d['occurences']
try:
kw[new]['positions'].extend(d['positions'])
except:
pass
try:
kw[new]['proxLoc'].extend(d['proxLoc'])
except:
pass
except KeyError:
d = d.copy()
try:
d['positions'] = d['positions'][:]
except:
pass
try:
d['proxLoc'] = d['proxLoc'][:]
except:
pass
d['text'] = new
kw[new] = d
return kw
class DataExistsNormalizer(SimpleNormalizer):
""" Return '1' if any data exists, otherwise '0' """
def process_string(self, session, data):
if data:
return "1"
else:
return "0"
class TermExistsNormalizer(SimpleNormalizer):
""" Un-stoplist anonymizing normalizer. Eg for use with data mining """
_possibleSettings = {
'termlist': {
'docs': ("'splitChar' (defaulting to space) separated list of "
"terms. For each term, if it exists in this list, the "
"normalizer returns '1', otherwise '0'"),
'required': True
},
'splitChar': {
'docs': "Override for the character to split on"
},
'frequency': {
'docs': ("if 1, accumulate total occurences, otherwise add one "
"per term"),
'type': int,
'options': "0|1"
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
tlstr = self.get_setting(session, 'termlist', '')
splitter = self.get_setting(session, 'splitChar', ' ')
self.termlist = tlstr.split(splitter)
self.frequency = self.get_setting(session, 'frequency', 0)
def process_string(self, session, data):
if data in self.termlist:
return "1"
else:
return "0"
def process_hash(self, session, data):
kw = {}
vals = data.values()
if not vals:
return kw
process = self.process_string
total = 0
for d in vals:
new = process(session, d['text'])
if new == "1":
if self.frequency:
total += d['occurences']
else:
total += 1
return str(total)
class UndocumentNormalizer(SimpleNormalizer):
""" Take a document as if it were a string and turn into a string """
def process_string(self, session, data):
return data.get_raw(session)
class CaseNormalizer(SimpleNormalizer):
""" Reduce text to lower case """
def process_string(self, session, data):
return data.lower()
class ReverseNormalizer(SimpleNormalizer):
""" Reverse string (eg for left truncation) """
def process_string(self, session, data):
return data[::-1]
class SpaceNormalizer(SimpleNormalizer):
""" Reduce multiple whitespace to single space character """
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
# all strings should be treated as unicode internally
# this is default for lxml - primary Record implementation
self.whitespace = re.compile("\s+", re.UNICODE)
def process_string(self, session, data):
data = data.strip()
data = self.whitespace.sub(' ', data)
return data
class ArticleNormalizer(SimpleNormalizer):
"""Remove leading english articles (the, a, an)"""
def process_string(self, session, data):
d = data.lower()
if (d[:4] == "the "):
return data[4:]
elif (d[:2] == "a "):
return data[2:]
elif (d[:3] == "an "):
return data[3:]
else:
return data
class NumericEntityNormalizer(SimpleNormalizer):
"""Replaces named XML entities with numeric ones.
Replace encoded XML entities matching a regular expression with the
equivalent numeric character entity
"""
_possibleSettings = {
'regexp': {
'docs': ("Regular expression of that matches characters to turn "
"into XML numeric character entities")
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
regex = self.get_setting(session,
'regexp',
'([\x0e-\x1f]|[\x7b-\xff])')
self.regexp = re.compile(regex)
self.function = lambda x: "&#%s;" % ord(x.group(1))
def process_string(self, session, data):
return self.regexp.sub(self.function, data)
# Non printable characters (Printable)
# self.asciiRe = re.compile('([\x0e-\x1f]|[\x7b-\xff])')
# Non useful characters (Stripper)
# self.asciiRe = re.compile('["%#@~!*{}]')
class PointlessCharacterNormalizer(SimpleNormalizer):
def process_string(self, session, data):
t = data.replace(u'\ufb00', 'ff')
t = t.replace(u'\ufb01', 'fi')
t = t.replace(u'\xe6', 'fi')
t = t.replace(u'\ufb02', 'fl')
t = t.replace(u'\u201c', '"')
t = t.replace(u'\u201d', '"')
t = t.replace(u'\u2019', "'")
t = t.replace(u'\u2026', " ")
return t
class RegexpNormalizer(SimpleNormalizer):
"""Strip, replace or keep data matching a regular expression."""
_possibleSettings = {
'char': {
'docs': ("Character(s) to replace matches in the regular "
"expression with. Defaults to empty string (eg strip "
"matches)")
},
'regexp': {
'docs': "Regular expression to match in the data.",
'required': True
},
'keep': {
'docs': ("Should instead keep only the matches. Boolean, defaults "
"to False"),
'type': int,
'options': "0|1"
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
self.char = self.get_setting(session, 'char', '')
self.keep = self.get_setting(session, 'keep', 0)
regex = self.get_setting(session, 'regexp')
if regex:
self.regexp = re.compile(regex)
else:
raise ConfigFileException("Missing regexp setting for "
"%s." % (self.id))
def process_string(self, session, data):
if self.keep:
try:
l = self.regexp.findall(data)
except UnicodeDecodeError:
data = data.decode('utf-8')
l = self.regexp.findall(data)
return self.char.join(l)
else:
try:
return self.regexp.sub(self.char, data)
except UnicodeDecodeError:
data = data.decode('utf-8')
try:
return self.regexp.sub(self.char, data)
except:
raise
class NamedRegexpNormalizer(RegexpNormalizer):
"""A RegexpNormalizer with templating for named groups.
As RegexpNormalizer, but allow named groups and reconstruction of token
using a template and those groups.
"""
_possibleSettings = {
'template': {
'docs': ("Template using group names for replacement, as per % "
"substitution. Eg regexp = (?P<word>.+)/(?P<pos>.+) and "
"template = --%(pos)s--, cat/NN would generate --NN--")
}
}
def __init__(self, session, config, parent):
RegexpNormalizer.__init__(self, session, config, parent)
self.template = self.get_setting(session, 'template', '')
def process_string(self, session, data):
m = self.regexp.match(data)
if m:
try:
return self.template % m.groupdict()
except:
return ""
else:
return ""
class RegexpFilterNormalizer(SimpleNormalizer):
"""Normalizer to filter data with a regular expression.
If 'keep' setting is True:
filters out data that DOES NOT match 'regexp' setting
If 'keep' setting is False:
filters out data that DOES match the 'regexp' setting
"""
_possibleSettings = {
'regexp': {
'docs': "Regular expression to match in the data."
},
'keep': {
'docs': ("Should keep only data matching the regexp. Boolean "
"setting, defaults to True"),
'type': int
}
}
def __init__(self, session, config, parent):
SimpleNormalizer.__init__(self, session, config, parent)
regex = self.get_setting(session,
'regexp',
'^[a-zA-Z\'][a-zA-Z\'.-]+[?!,;:]?$')
self.re = re.compile(regex)
self.keep = self.get_setting(session, 'keep', 1)
def process_string(self, session, data):
if self.re.match(data):
return data if self.keep else None
else:
return None if self.keep else data
def process_hash(self, session, data):
data = SimpleNormalizer.process_hash(self, session, data)
try:
del data[None]
except:
# may not have filtered anything
pass
return data
class PossessiveNormalizer(SimpleNormalizer):
""" Remove trailing 's or s' from words """
def process_string(self, session, data):
# Not totally correct... eg: it's == 'it is', not 'of it'
if (data[-2:] == "s'"):
return data[:-1]
elif (data[-2:] == "'s"):
return data[:-2]
else:
return data
class IntNormalizer(SimpleNormalizer):
""" Turn a string into an integer """
def process_string(self, session, data):
try:
return long(data)
except:
return None
class StringIntNormalizer(SimpleNormalizer):
""" Turn an integer into a 0 padded string, 12 chrs long """
def process_string(self, session, data):
try:
d = long(data)
return "%012d" % (d)
except:
return None
class FileAssistedNormalizer(SimpleNormalizer):
"""Base Class for Normalizers configured with an additional file.
Abstract class for Normalizers that can be configured using an additional
file e.g. for specifying a stoplist, or a list of acronyms and their
expansions.
"""
def _processPath(self, session, path):
fp = self.get_path(session, path)
if fp is None:
raise ConfigFileException("No {0} file specified for object with "
"id '{1}'.".format(path, self.id))
if (not os.path.isabs(fp)):
dfp = self.get_path(session, "defaultPath")
fp = os.path.join(dfp, fp)
try:
fh = open(fp, 'r')
except IOError as e:
raise ConfigFileException("{0} for object with id '{1}'."
"".format(str(e), self.id))
l = fh.readlines()
fh.close()
return l
class StoplistNormalizer(FileAssistedNormalizer):
"""Normalizer to remove words that occur in a stopword list."""
stoplist = {}
_possiblePaths = {
'stoplist': {
'docs': | |
async def handle_messages(self) -> None:
"""
Read messages from the queue. If it matches a pending request to auxin-cli/signal-cli,
set the result for that request. If said result is being rate limited, retry sending it
after pausing. Otherwise, concurrently respond to each message.
"""
metrics_salt = utils.get_secret("METRICS_SALT")
while True:
message = await self.inbox.get()
if metrics_salt and message.uuid:
self.seen_users.add(hash_salt(message.uuid, metrics_salt))
if message.id and message.id in self.pending_requests:
logging.debug("setting result for future %s: %s", message.id, message)
self.pending_requests[message.id].set_result(message)
if (
message.error
and "status: 413" in message.error["data"]
and message.id in self.pending_messages_sent
):
sent_json_message = self.pending_messages_sent.pop(message.id)
warn = termcolor.colored(
"waiting to retry send after rate limit. message: %s", "red"
)
logging.warning(warn, sent_json_message)
self.backoff = True
await asyncio.sleep(4)
rpc_id = f"retry-send-{get_uid()}"
self.pending_messages_sent[rpc_id] = sent_json_message
self.pending_requests[rpc_id] = asyncio.Future()
await self.outbox.put(sent_json_message)
continue
self.pending_response_tasks = [
task for task in self.pending_response_tasks if not task.done()
] + [asyncio.create_task(self.respond_and_collect_metrics(message))]
# maybe this is merged with dispatch_message?
async def respond_and_collect_metrics(self, message: Message) -> None:
"""
Pass each message to handle_message. Notify an admin if an error happens.
If that returns a non-empty string, send it as a reply,
then record how long this took.
"""
rpc_id = None
start_time = time.time()
try:
response = await self.handle_message(message)
if response is not None:
rpc_id = await self.respond(message, response)
except UserError as e:
rpc_id = await self.respond(message, str(e))
except: # pylint: disable=bare-except
exception_traceback = "".join(traceback.format_exception(*sys.exc_info()))
logging.info("error handling message %s %s", message, exception_traceback)
self.pending_response_tasks.append(
asyncio.create_task(self.admin(f"{message}\n{exception_traceback}"))
)
python_delta = round(time.time() - start_time, 3)
note = message.arg0 or ""
if rpc_id:
logging.debug("awaiting future %s", rpc_id)
result = await self.wait_for_response(rpc_id=rpc_id)
roundtrip_delta = (result.timestamp - message.timestamp) / 1000
self.signal_roundtrip_latency.append(
(message.timestamp, note, roundtrip_delta)
)
roundtrip_summary.observe(roundtrip_delta)
roundtrip_histogram.observe(roundtrip_delta)
logging.info("noted roundtrip time: %s", roundtrip_delta)
if utils.get_secret("ADMIN_METRICS"):
await self.admin(
f"command: {note}. python delta: {python_delta}s. roundtrip delta: {roundtrip_delta}s",
)
def mentions_us(self, msg: Message) -> bool:
# "mentions":[{"name":"+447927948360","number":"+447927948360","uuid":"fc4457f0-c683-44fe-b887-fe3907d7762e","start":0,"length":1}
return any(mention.get("number") == self.bot_number for mention in msg.mentions)
def is_command(self, msg: Message) -> bool:
if msg.full_text:
return msg.full_text.startswith("/") or self.mentions_us(msg)
return False
def match_command(self, msg: Message) -> str:
"""return the appropriate command a message is calling for"""
if not msg.arg0:
return ""
# probably wrong
if self.mentions_us(msg) and msg.full_text:
msg.parse_text(msg.full_text.lstrip("\N{Object Replacement Character} "))
# happy part direct match
if hasattr(self, "do_" + msg.arg0):
return msg.arg0
# always match in dms, only match /commands or @bot in groups
if utils.get_secret("ENABLE_MAGIC") and (not msg.group or self.is_command(msg)):
logging.debug("running magic")
# don't leak admin commands
valid_commands = self.commands if is_admin(msg) else self.visible_commands
# closest match
score, cmd = string_dist.match(msg.arg0, valid_commands)
if score < (float(utils.get_secret("TYPO_THRESHOLD") or 0.3)):
return cmd
# check if there's a unique expansion
expansions = [
expanded_cmd
for expanded_cmd in valid_commands
if cmd.startswith(msg.arg0)
]
if len(expansions) == 1:
return expansions[0]
return ""
async def handle_message(self, message: Message) -> Response:
"""Method dispatch to do_x commands and goodies.
Overwrite this to add your own non-command logic,
but call super().handle_message(message) at the end"""
# try to get a direct match, or a fuzzy match if appropriate
if cmd := self.match_command(message):
# invoke the function and return the response
return await getattr(self, "do_" + cmd)(message)
if message.text == "TERMINATE":
return "signal session reset"
return await self.default(message)
def documented_commands(self) -> str:
# check for only commands that have docstrings
commands = ", ".join(
name.removeprefix("do_")
for name in dir(self)
if name.startswith("do_")
and not hasattr(getattr(self, name), "hide")
and hasattr(getattr(self, name), "__doc__")
)
return f'Documented commands: {commands}\n\nFor more info about a command, try "help" [command]'
async def default(self, message: Message) -> Response:
"Default response. Override in your class to change this behavior"
resp = "That didn't look like a valid command!\n" + self.documented_commands()
# if it messages an echoserver, don't get in a loop (or groups)
if message.text and not (
message.group
or "Documented commands" in message.text
or resp == message.text
):
return resp
return None
async def do_help(self, msg: Message) -> Response:
"""
help [command]. see the documentation for command, or all commands
"""
if msg.text and "Documented commands" in msg.text:
return None
if msg.arg1:
try:
cmd = getattr(self, f"do_{msg.arg1}")
if hasattr(getattr(self, f"do_{msg.arg1}"), "hide"):
raise AttributeError("Pretend this never happened.")
# allow messages to have a different helptext in groups
if hasattr(cmd, "__group_doc__") and msg.group:
return dedent(cmd.__group_doc__).strip()
doc = cmd.__doc__
if doc:
return dedent(doc).strip()
return f"{msg.arg1} isn't documented, sorry :("
except AttributeError:
return f"No such command '{msg.arg1}'"
else:
resp = self.documented_commands()
return resp
@requires_admin
async def do_eval(self, msg: Message) -> Response:
"""Evaluates a few lines of Python. Preface with "return" to reply with result."""
async def async_exec(stmts: str, env: Optional[dict] = None) -> Any:
parsed_stmts = ast.parse(stmts)
fn_name = "_async_exec_f"
my_fn = f"async def {fn_name}(): pass"
parsed_fn = ast.parse(my_fn)
for node in parsed_stmts.body:
ast.increment_lineno(node)
assert isinstance(parsed_fn.body[0], ast.AsyncFunctionDef)
# replace the empty async def _async_exec_f(): pass body
# with the AST parsed from the message
parsed_fn.body[0].body = parsed_stmts.body
code = compile(parsed_fn, filename="<ast>", mode="exec")
exec(code, env or globals()) # pylint: disable=exec-used
# pylint: disable=eval-used
return await eval(f"{fn_name}()", env or globals())
if msg.full_text and msg.tokens and len(msg.tokens) > 1:
source_blob = msg.full_text.replace(msg.arg0, "", 1).lstrip("/ ")
try:
return str(await async_exec(source_blob, globals() | locals()))
except: # pylint: disable=bare-except
exception_traceback = "".join(
traceback.format_exception(*sys.exc_info())
)
return exception_traceback
return None
def get_recipients(self) -> list[dict[str, str]]:
"""Returns a list of all known recipients by parsing underlying datastore."""
return json.loads(
open(f"data/{self.bot_number}.d/recipients-store").read()
).get("recipients", [])
def get_uuid_by_phone(self, phonenumber: str) -> Optional[str]:
"""Queries the recipients-store file for a UUID, provided a phone number."""
if phonenumber.startswith("+"):
maybe_recipient = [
recipient
for recipient in self.get_recipients()
if phonenumber == recipient.get("number")
]
if maybe_recipient:
return maybe_recipient[0]["uuid"]
return None
def get_number_by_uuid(self, uuid_: str) -> Optional[str]:
"""Queries the recipients-store file for a phone number, provided a uuid."""
if uuid_.count("-") == 4:
maybe_recipient = [
recipient
for recipient in self.get_recipients()
if uuid_ == recipient.get("uuid")
]
if maybe_recipient:
return maybe_recipient[0]["number"]
return None
class ExtrasBot(Bot):
async def do_printerfact(self, _: Message) -> str:
"Learn a fact about printers"
async with self.client_session.get(
utils.get_secret("FACT_SOURCE") or "https://colbyolson.com/printers"
) as resp:
fact = await resp.text()
return fact.strip()
async def do_ping(self, message: Message) -> str:
"""replies to /ping with /pong"""
if message.text:
return f"/pong {message.text}"
return "/pong"
@hide
async def do_pong(self, message: Message) -> str:
"""Stashes the message in context so it's accessible externally."""
if message.arg1 and message.arg2:
self.pongs[message.arg1] = message.arg2
return f"OK, stashing {len(message.arg2)} at {message.arg1}"
if message.text:
self.pongs[message.text] = message.text
return f"OK, stashing {message.text}"
return "OK"
@hide
async def do_commit_msg(self, _: Message) -> str:
try:
return f"Commit message: {open('COMMIT_EDITMSG').read()}"
except FileNotFoundError:
return "No commit message available"
async def do_signalme(self, _: Message) -> Response:
"""signalme
Returns a link to share the bot with friends!"""
return f"https://signal.me/#p/{self.bot_number}"
@hide
async def do_rot13(self, msg: Message) -> Response:
"""rot13 encodes the message.
> rot13 hello world
uryyb jbeyq"""
return codecs.encode(msg.text, "rot13")
@hide
async def do_uptime(self, _: Message) -> str:
"""Returns a message containing the bot uptime."""
tot_mins, sec = divmod(int(time.time() - self.start_time), 60)
hr, mins = divmod(tot_mins, 60)
t = "Uptime: "
t += f"{hr}h" if hr else ""
t += f"{mins}m" if mins else ""
t += f"{sec}s"
return t
class PayBot(ExtrasBot):
@requires_admin
async def do_fsr(self, msg: Message) -> Response:
"""
Make a request to the Full-Service instance behind the bot. Admin-only.
ie) /fsr [command] ([arg1] [val1]( [arg2] [val2])...)"""
if not msg.tokens:
return "/fsr [command] ([arg1] [val1]( [arg2] [val2]))"
if len(msg.tokens) == 1:
return await self.mobster.req(dict(method=msg.tokens[0]))
if (len(msg.tokens) % 2) == 1:
fsr_command = msg.tokens[0]
fsr_keys = msg.tokens[1::2]
fsr_values = msg.tokens[2::2]
params = dict(zip(fsr_keys, fsr_values))
return str(await self.mobster.req_(fsr_command, **params))
return "/fsr [command] ([arg1] [val1]( [arg2] [val2])...)"
@requires_admin
async def do_setup(self, _: Message) -> str:
if not utils.AUXIN:
return "Can't set payment address without auxin"
await self.set_profile_auxin(
mobilecoin_address=mc_util.b58_wrapper_to_b64_public_address(
await self.mobster.ensure_address()
)
)
return "OK"
@requires_admin
async def do_balance(self, _: Message) -> Response:
"""Returns bot balance in MOB."""
return f"Bot has balance of {mc_util.pmob2mob(await self.mobster.get_balance()).quantize(Decimal('1.0000'))} MOB"
async def handle_message(self, message: Message) -> Response:
if message.payment:
asyncio.create_task(self.handle_payment(message))
return None
return await super().handle_message(message)
async def get_user_usd_balance(self, account: str) -> float:
res = await self.mobster.ledger_manager.get_usd_balance(account)
return float(round(res[0].get("balance"), 2))
async def get_user_pmob_balance(self, account: str) -> float:
res = | |
# -*- coding: utf-8 -*-
"""
/*------------------------------------------------------*
| Spatial Uncertainty Research Framework |
| |
| Author: <NAME>, UC Berkeley, <EMAIL> |
| |
| Date: 07/11/2019 |
*------------------------------------------------------*/
"""
from __future__ import absolute_import, division, print_function
import os
import json
import pathlib
import random
import numpy as np
import pandas as pd
from scipy import spatial
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from scipy.spatial.distance import squareform, cdist, pdist
# fix random seed for reproducibility
#tf.set_random_seed(1234)
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0:
print(epoch)
print('.', end='')
class SpatialNeuralNet:
""" A Neural Net Doing Spatial Predictions. """
def __init__(self, X=None, Y=None, rawData=None, architecture=None, activation=None,modelType='regression', distScaler = 100000., numNei=10, trainFrac=0.8,testFrac=None, writeTmpData=False, workDir='./tmp', saveFigs=True, plotFigs=True):
'''
X: input
Y: output
rawData: [x1,x2,value]
numNei: number of neighbor to be considered
trainFrac: fraction of data used for training
'''
if architecture is None:
# default architecture
self.architecture = [256, 64, 64, 64, 1]
else:
if len(architecture)<2:
print("Length of NN architecture must be greater than 1")
exit()
self.architecture = architecture
self.activation = activation
self.modelType = modelType
self.numNei = numNei
self.distScaler = distScaler
self.writeTmpData = writeTmpData
self.workDir = workDir
self.saveFigs = saveFigs
self.plotFigs = plotFigs
hasInput = True
if rawData is not None:
self.rawData = rawData
self.processRawData()
elif X is not None:
self.X = X
self.Y = Y
else:
print("No input is provided, assuming the model will be used for predicting only. ")
hasInput = False
if hasInput:
if testFrac is not None: # testFrac dominates
self.trainFrac = 1.0 - testFrac
else: self.trainFrac = trainFrac
self.EPOCHS = 5000
n = self.X.shape[0]
ind = random.sample(range(n),n)
indTrain = ind[0:np.floor(n*trainFrac).astype(int)]
indTest = ind[np.floor(n*trainFrac).astype(int):]
self.train_dataset = self.X[indTrain]
self.test_dataset = self.X[indTest]
if self.Y is not None:
self.train_labels = self.Y[indTrain]
self.test_labels = self.Y[indTest]
self.mean_train_dataset = np.mean(self.train_dataset, axis = 0)
self.std_train_dataset = np.std(self.train_dataset, axis = 0)
self.normed_train_data = self.norm(self.train_dataset)
self.normed_test_data = self.norm(self.test_dataset)
# build model
#self.model = self.build_model()
# train model
#self.train()
# test model
#self.test()
if not os.path.exists(workDir):
pathlib.Path(workDir).mkdir(parents=True, exist_ok=True)
if writeTmpData:
if rawData is not None:
np.savetxt(workDir+'/test_dataset.txt', self.rawData[indTest,:])
np.savetxt(workDir+'/train_dataset.txt', self.rawData[indTrain,:])
def processRawData(self,rawData=None,numColumnsY=1):
numNei = self.numNei
perNei = 2
numPre = 2
# Defining input size, hidden layer size, output size and batch size respectively
n_in, n_h, n_out, batch_size = numNei * perNei + numPre, 10, 1, 1000
if rawData is None:# normally built model
if numColumnsY == 1:
rawData = self.rawData[:,:0-numColumnsY]
rawTarget = self.rawData[:,-numColumnsY:]
self.Y = rawTarget
elif numColumnsY == 0:# no target
rawData = self.rawData
else:
print('SURF currently can not deal with multi-dimensional targets.')
exit()
else:# loaded model
if numColumnsY == 1:
rawTarget = self.rawData[:,-numColumnsY:]
self.Y = rawTarget
elif numColumnsY == 0:# no target
rawData = rawData
else:
print('SURF currently can not deal with multi-dimensional targets.')
exit()
# Create data
coordsAll = np.array(rawData, dtype=np.float32)
kdTree = spatial.KDTree(coordsAll)
data = []
for i in range(rawData.shape[0]):
distance,index = kdTree.query(rawData[i,:],numNei+1) # nearest 10 points
distance = distance[1:]
index = index[1:]
datatmp = rawData[i,:]
for j in range(numNei):
if numColumnsY==1:
datatmp = np.append(np.append(datatmp, distance[j]*self.distScaler), rawTarget[index[j]])
elif numColumnsY==0:
datatmp = np.append(datatmp, distance[j]*self.distScaler)
else:
print('SURF currently can not deal with multi-dimensional targets.')
exit()
data.append(datatmp.tolist())
data = np.array(data)
self.X = data
return data
def processRawDataLoad(self,rawData=None):
numNei = self.numNei
perNei = 2
numPre = 2
# Defining input size, hidden layer size, output size and batch size respectively
n_in, n_h, n_out, batch_size = numNei * perNei + numPre, 10, 1, 1000
# Create data
coordsAll = np.array(self.rawData[:,0:-1], dtype=np.float32)
rawTarget = self.rawData[:,-1]
kdTree = spatial.KDTree(coordsAll)
data = []
for i in range(rawData.shape[0]):
distance,index = kdTree.query(rawData[i,:],numNei+1) # nearest 10 points
distance = distance[1:]
index = index[1:]
datatmp = rawData[i,:]
for j in range(numNei):
datatmp = np.append(np.append(datatmp, distance[j]*self.distScaler), rawTarget[index[j]])
data.append(datatmp.tolist())
data = np.array(data)
#self.X = data
return data
def norm(self, v):
#return v
return (v - self.mean_train_dataset) / self.std_train_dataset
# Build the model
def build_model(self,numTypes=None):
print("Building the neural network ...\n")
if self.modelType == "classification":
model = self.build_classification_model(numTypes)
return model
else:
archi = []
archi.append(layers.Dense(self.architecture[0], activation=tf.nn.relu, input_shape=[len(self.train_dataset.T)]))
for i in self.architecture[1:-1]:
archi.append(layers.Dense(i, activation=tf.nn.relu))
if self.activation is None:
archi.append(layers.Dense(self.architecture[-1]))
elif self.activation == "sigmoid":
archi.append(layers.Dense(self.architecture[-1], activation=tf.nn.sigmoid)) # for 0~1
else:#
#TODO: add more activation fuctions
archi.append(layers.Dense(self.architecture[-1]))
model = keras.Sequential(archi)
#optimizer = tf.train.RMSPropOptimizer(0.001)
#optimizer = tf.train.AdamOptimizer(1e-4)
model.compile(loss='mae', optimizer='adam', metrics=['mae', 'mse'])
self.model = model
return model
def load_model(self, modelName):
if os.path.isdir(modelName):
self.modelLoadedModelPath = modelName
else: self.modelLoadedModelPath = self.workDir + '/' + modelName
with open(self.modelLoadedModelPath+'/config.json') as json_file:
m = json.load(json_file)
self.numNei = m['numNei']
self.modelType = m['modelType']
self.model = tf.keras.models.load_model(self.modelLoadedModelPath)
# Check its architecture
self.model.summary()
# Build the classification model
def build_classification_model(self, numTypes):
model = keras.Sequential([
layers.Dense(len(self.train_dataset.T), activation=tf.nn.relu, input_shape=[len(self.train_dataset.T)]),
layers.Dense(len(self.train_dataset.T), activation=tf.nn.relu),
layers.Dense(len(self.train_dataset.T)/2, activation=tf.nn.relu),
layers.Dense(numTypes, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
self.model = model
return model
def train_classification_model(self):
self.model.summary()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = self.model.fit(self.normed_train_data, self.train_labels.astype(int).flatten(), epochs=self.EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
print('\n')
print(hist.tail())
#self.plot_history(history)
#plt.savefig('data/NN_ContinuumWall_TrainingLoss_V1.png')
#loss, mae, mse = self.model.evaluate(self.normed_test_data, self.test_labels, verbose=0)
#print("Testing set Mean Abs Error: {:5.2f} ".format(mae))
'''
# save model
# serialize model to JSON
model_json = self.model.to_json()
with open("data/NNModel_ContinuumWall_V1.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
self.model.save_weights("Data/NNModel_ContinuumWall_V1.h5")
print("Saved model to disk")
'''
def save(self, modelName = 'surf_model'):
modelDir = self.workDir+'/'+modelName
self.model.save(modelDir)
self.model.save(modelDir + '/saved_model.h5')
np.savetxt(modelDir+'/mean_train_dataset.txt',self.mean_train_dataset)
np.savetxt(modelDir+'/std_train_dataset.txt',self.std_train_dataset)
m = {'modelName':modelName,
'numNei':self.numNei,
'modelType':self.modelType}
with open(modelDir+'/config.json', 'w') as outfile:
json.dump(m, outfile)
formatted_data = np.concatenate((self.X,self.Y),axis=1)
np.savetxt(modelDir+'/formatted_data.txt',formatted_data)
formatted_data_train = np.concatenate((self.normed_train_data,self.train_labels),axis=1)
formatted_data_test = np.concatenate((self.normed_test_data,self.test_labels),axis=1)
formatted_data_norm = np.concatenate((formatted_data_train,formatted_data_test),axis=0)
np.savetxt(modelDir+'/formatted_data_norm.txt',formatted_data_norm)
print('model saved at ',modelDir)
def train(self):
if self.modelType == "classification":
model = self.train_classification_model()
else:
print("Training the neural network ... \n")
self.model.summary()
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
print(self.train_labels)
history = self.model.fit(self.normed_train_data, self.train_labels, epochs=self.EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
print('\n')
print(hist.tail())
if self.plotFigs:
self.plot_history(history)
#plt.savefig('data/NN_TrainingLoss.png')
#plt.savefig('data/NN_TrainingLoss.pdf')
loss, mae, mse = self.model.evaluate(self.normed_test_data, self.test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} ".format(mae))
def predictMulti(self,X):
self.mean_train_dataset = np.loadtxt(self.modelLoadedModelPath+'/mean_train_dataset.txt')
self.std_train_dataset = np.loadtxt(self.modelLoadedModelPath+'/std_train_dataset.txt')
X = self.processRawDataLoad(rawData=X)
#print([X.shape,self.mean_train_dataset.shape,self.std_train_dataset.shape])
X = (X - self.mean_train_dataset) / self.std_train_dataset
print(self.modelType)
#X = self.norm(X)[:,0:-1]
if self.modelType == 'classification':
Y = self.model.predict(X)
Y = np.argmax(Y,axis=1)
else: Y = self.model.predict(X).flatten()
np.savetxt(self.modelLoadedModelPath+'/Y.txt', Y)
print("Predictions are saved in ", self.modelLoadedModelPath+'/Y.txt')
return Y
def plot(self, trueValues, predictValues):
print(trueValues.shape, predictValues.shape)
if self.Y is not None:
plt.figure(figsize=(20,10))
plt.subplot(1,2,1)
#trueValues = self.test_labels.flatten()
##predictValues = test_predictions[0::5]
#predictValues = test_predictions
print(trueValues)
print(predictValues)
plt.scatter(trueValues, predictValues, marker='o', c="red", alpha=0.01)
plt.xlabel('True Values', fontsize=30)
plt.ylabel('Predictions', fontsize=30)
plt.axis('equal')
plt.axis('square')
#minV = min([min(predictValues),min(trueValues)])
#maxV = max([max(predictValues),max(trueValues)])
minV = min(trueValues)
maxV = max(trueValues)
marginV = 0.1 * (maxV - minV)
plt.xlim(minV-marginV,maxV+marginV)
plt.ylim(minV-marginV,maxV+marginV)
plt.tick_params(axis='x', labelsize=25)
plt.tick_params(axis='y', labelsize=25)
plt.plot([minV-marginV, minV-marginV,maxV+marginV], [minV-marginV, minV-marginV,maxV+marginV],'k-')
'''
# year built
plt.xlim(1875, 2050)
plt.ylim(1875, 2050)
'''
'''
# num of stories
plt.xlim([plt.xlim()[0],plt.xlim()[1]])
plt.ylim([plt.xlim()[0],plt.ylim()[1]])
plt.plot([0, 2050], [0, 2050],'k-')
'''
plt.subplot(1,2,2)
error = trueValues - predictValues
lenV = max([abs(min(error)),abs(max(error))])
print('errors: ')
print(error)
plt.xlim(0.-lenV*1.2, lenV*1.2)
plt.hist(error, bins=30, facecolor='g')
#plt.hist(error, bins=25, facecolor='g') #year built
#plt.xlim(-100, 100) # year built
#plt.hist(error, bins=36, facecolor='g') #num of stories
#plt.xlim(-26, 26) # num of stories
plt.xlabel("Prediction Error", fontsize=30)
plt.ylabel("Count", fontsize=30)
plt.tick_params(axis='x', labelsize=25)
#plt.savefig('data/Predictions_error.pdf')
#plt.savefig('data/Predictions_error.png')
if self.saveFigs:
plt.savefig(self.workDir+'/Prediction_errors.png')
plt.savefig(self.workDir+'/Prediction_errors.pdf')
print("Figures are saved in ", self.workDir)
plt.show()
def test(self):
# test
if self.modelType == "classification":
model = self.test_classification_model()
else:
test_predictions = self.model.predict(self.normed_test_data).flatten()
if self.writeTmpData:
np.savetxt(self.workDir+'/test_predictions.txt', test_predictions)
print("Test predictions are saved in ", self.workDir+'/test_predictions.txt')
trueValues = self.test_labels.flatten()
if self.plotFigs:
self.plot(trueValues, test_predictions)
return trueValues,test_predictions
def test_classification_model(self):
# test
test_predictions = self.model.predict(self.normed_test_data)
if self.writeTmpData:
np.savetxt(self.workDir+'/test_predictions.txt', test_predictions)
print("Results are saved in ", self.workDir+'/test_predictions.txt')
plt.figure(figsize=(20,10))
plt.subplot(1,2,1)
trueValues = self.test_labels.flatten()
#predictValues = test_predictions[0::5]
predictValues = np.argmax(test_predictions,axis=1)
print(trueValues)
print(predictValues)
print(len(predictValues))
plt.scatter(trueValues, predictValues)
plt.xlabel('True Values [label]')
plt.ylabel('Predictions [label]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
plt.subplot(1,2,2)
error = predictValues - trueValues
print(self.train_dataset)
print('errors: ')
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [label]")
_ = plt.ylabel("Count")
if self.saveFigs:
plt.savefig(self.workDir+'/Prediction_errors.png')
plt.savefig(self.workDir+'/Prediction_errors.pdf')
print("Figures are saved in ", self.workDir)
#plt.savefig('data/Predictions_classification_error.png')
plt.show()
def predict(self, pt):
X = self.getX(pt, N=self.numNei)
X = self.norm(X)
Y = self.model.predict([X]).flatten().item()
return Y
def predict_simple(self, pt):
X = [self.norm(pt)]
Y = self.model.predict([X]).flatten().item()
return Y
def |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.