input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
# (C) Datadog, Inc. 2019
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import json
import logging
import re
from collections import OrderedDict
import mock
import pytest
from datadog_checks.base import AgentCheck
pytestmark = pytest.mark.metadata
SET_CHECK_METADATA_METHOD = 'datadog_checks.base.stubs.datadog_agent.set_check_metadata'
# The order is used to derive the display name for the regex tests
NON_STANDARD_VERSIONS = OrderedDict()
class TestAttribute:
def test_default(self):
check = AgentCheck('test', {}, [{}])
assert check._metadata_manager is None
def test_no_check_id_error(self):
check = AgentCheck('test', {}, [{}])
with mock.patch('datadog_checks.base.checks.base.using_stub_aggregator', False):
with pytest.raises(RuntimeError):
check.set_metadata('foo', 'bar')
class TestRaw:
def test_default(self):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('foo', 'bar')
m.assert_called_once_with('test:123', 'foo', 'bar')
def test_new_transformer(self):
class NewAgentCheck(AgentCheck):
METADATA_TRANSFORMERS = {'foo': lambda value, options: value[::-1]}
check = NewAgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('foo', 'bar')
m.assert_called_once_with('test:123', 'foo', 'rab')
class TestVersion:
def test_override_allowed(self):
class NewAgentCheck(AgentCheck):
METADATA_TRANSFORMERS = {'version': lambda value, options: value[::-1]}
check = NewAgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', 'bar')
m.assert_called_once_with('test:123', 'version', 'rab')
def test_unknown_scheme(self, caplog):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with caplog.at_level(logging.DEBUG), mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', '1.0.0', scheme='foo')
assert m.call_count == 0
expected_message = 'Unable to transform `version` metadata value `1.0.0`: Unsupported version scheme `foo`'
for _, level, message in caplog.record_tuples:
if level == logging.ERROR and message == expected_message:
break
else:
raise AssertionError('Expected ERROR log with message: {}'.format(expected_message))
def test_semver_default(self):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', '1.0.5')
m.assert_any_call('test:123', 'version.major', '1')
m.assert_any_call('test:123', 'version.minor', '0')
m.assert_any_call('test:123', 'version.patch', '5')
m.assert_any_call('test:123', 'version.raw', '1.0.5')
m.assert_any_call('test:123', 'version.scheme', 'semver')
assert m.call_count == 5
def test_semver_release(self):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', '1.0.5-gke.6', scheme='semver')
m.assert_any_call('test:123', 'version.major', '1')
m.assert_any_call('test:123', 'version.minor', '0')
m.assert_any_call('test:123', 'version.patch', '5')
m.assert_any_call('test:123', 'version.release', 'gke.6')
m.assert_any_call('test:123', 'version.raw', '1.0.5-gke.6')
m.assert_any_call('test:123', 'version.scheme', 'semver')
assert m.call_count == 6
def test_semver_release_and_build(self):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', '1.0.5-gke.6+3', scheme='semver')
m.assert_any_call('test:123', 'version.major', '1')
m.assert_any_call('test:123', 'version.minor', '0')
m.assert_any_call('test:123', 'version.patch', '5')
m.assert_any_call('test:123', 'version.release', 'gke.6')
m.assert_any_call('test:123', 'version.build', '3')
m.assert_any_call('test:123', 'version.raw', '1.0.5-gke.6+3')
m.assert_any_call('test:123', 'version.scheme', 'semver')
assert m.call_count == 7
def test_semver_invalid(self, caplog):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with caplog.at_level(logging.DEBUG), mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', '1.0', scheme='semver')
assert m.call_count == 0
expected_prefix = 'Unable to transform `version` metadata value `1.0`: '
for _, level, message in caplog.record_tuples:
if level == logging.ERROR and message.startswith(expected_prefix):
break
else:
raise AssertionError('Expected ERROR log starting with message: {}'.format(expected_prefix))
@pytest.mark.parametrize(
'version, pattern, expected_parts',
[
(
NON_STANDARD_VERSIONS.setdefault('Docker', '18.03.0-ce, build 0520e24'),
r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)-(?P<release>\w+), build (?P<build>\w+)',
{'major': '18', 'minor': '03', 'patch': '0', 'release': 'ce', 'build': '0520e24'},
),
(
NON_STANDARD_VERSIONS.setdefault('Exchange Server', '2007 SP3 8.3.83.006'),
r'(?P<major>\d+) SP(?P<minor>\d+) (?P<build>[\w.]+)',
{'major': '2007', 'minor': '3', 'build': '8.3.83.006'},
),
(NON_STANDARD_VERSIONS.setdefault('Oracle', '19c'), r'(?P<major>\d+)\w*', {'major': '19'}),
(
NON_STANDARD_VERSIONS.setdefault('Presto', '0.221'),
r'(?P<major>\d+).(?P<minor>\d+)',
{'major': '0', 'minor': '221'},
),
(
NON_STANDARD_VERSIONS.setdefault('missing subgroup', '02'),
r'(?P<major>\d+)(\.(?P<minor>\d+))?',
{'major': '02'},
),
(
NON_STANDARD_VERSIONS.setdefault('precompiled', '1.2.3'),
re.compile(r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)'),
{'major': '1', 'minor': '2', 'patch': '3'},
),
],
ids=list(NON_STANDARD_VERSIONS),
)
def test_regex(self, version, pattern, expected_parts):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', version, scheme='regex', pattern=pattern)
for name, value in expected_parts.items():
m.assert_any_call('test:123', 'version.{}'.format(name), value)
m.assert_any_call('test:123', 'version.raw', version)
m.assert_any_call('test:123', 'version.scheme', 'test')
assert m.call_count == len(expected_parts) + 2
def test_regex_final_scheme(self):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata(
'version',
'1.2.3.beta',
scheme='regex',
final_scheme='semver',
pattern=r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+).(?P<release>\w+)',
)
m.assert_any_call('test:123', 'version.major', '1')
m.assert_any_call('test:123', 'version.minor', '2')
m.assert_any_call('test:123', 'version.patch', '3')
m.assert_any_call('test:123', 'version.release', 'beta')
m.assert_any_call('test:123', 'version.raw', '1.2.3.beta')
m.assert_any_call('test:123', 'version.scheme', 'semver')
assert m.call_count == 6
def test_regex_no_pattern(self, caplog):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with caplog.at_level(logging.DEBUG), mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', '1.0', scheme='regex')
assert m.call_count == 0
expected_message = (
'Unable to transform `version` metadata value `1.0`: Version scheme `regex` requires a `pattern` option'
)
for _, level, message in caplog.record_tuples:
if level == logging.ERROR and message == expected_message:
break
else:
raise AssertionError('Expected ERROR log with message: {}'.format(expected_message))
def test_regex_no_match(self, caplog):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with caplog.at_level(logging.DEBUG), mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', '1.0.0', scheme='regex', pattern='foo')
assert m.call_count == 0
expected_message = (
'Unable to transform `version` metadata value `1.0.0`: '
'Version does not match the regular expression pattern'
)
for _, level, message in caplog.record_tuples:
if level == logging.ERROR and message == expected_message:
break
else:
raise AssertionError('Expected ERROR log with message: {}'.format(expected_message))
def test_regex_no_subgroups(self, caplog):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with caplog.at_level(logging.DEBUG), mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', '1.0.0', scheme='regex', pattern=r'\d\.\d\.\d')
assert m.call_count == 0
expected_message = (
'Unable to transform `version` metadata value `1.0.0`: '
'Regular expression pattern has no named subgroups'
)
for _, level, message in caplog.record_tuples:
if level == logging.ERROR and message == expected_message:
break
else:
raise AssertionError('Expected ERROR log with message: {}'.format(expected_message))
def test_parts(self):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata(
'version',
'192.168.127.12',
scheme='parts',
part_map={'year': '19', 'major': '15', 'minor': '2', 'patch': '2', 'revision': '56789'},
)
m.assert_any_call('test:123', 'version.year', '19')
m.assert_any_call('test:123', 'version.major', '15')
m.assert_any_call('test:123', 'version.minor', '2')
m.assert_any_call('test:123', 'version.patch', '2')
m.assert_any_call('test:123', 'version.revision', '56789')
m.assert_any_call('test:123', 'version.raw', '1172.16.31.10')
m.assert_any_call('test:123', 'version.scheme', 'test')
assert m.call_count == 7
def test_parts_final_scheme(self):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata(
'version',
'19.15.2.2',
scheme='parts',
final_scheme='calver',
part_map={'year': '19', 'major': '15', 'minor': '2', 'patch': '2', 'revision': '56789'},
)
m.assert_any_call('test:123', 'version.year', '19')
m.assert_any_call('test:123', 'version.major', '15')
m.assert_any_call('test:123', 'version.minor', '2')
m.assert_any_call('test:123', 'version.patch', '2')
m.assert_any_call('test:123', 'version.revision', '56789')
m.assert_any_call('test:123', 'version.raw', '19.15.2.2')
m.assert_any_call('test:123', 'version.scheme', 'calver')
assert m.call_count == 7
def test_parts_no_part_map(self, caplog):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with caplog.at_level(logging.DEBUG), mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', '1.0', scheme='parts')
assert m.call_count == 0
expected_message = (
'Unable to transform `version` metadata value `1.0`: '
'Version scheme `parts` requires a `part_map` option'
)
for _, level, message in caplog.record_tuples:
if level == logging.ERROR and message == expected_message:
break
else:
raise AssertionError('Expected ERROR log with message: {}'.format(expected_message))
class TestConfig:
def test_no_section(self, caplog):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with caplog.at_level(logging.DEBUG), mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('config', {})
assert m.call_count == 0
expected_message = 'Unable to transform `config` metadata: The `section` option is required'
for _, level, message in caplog.record_tuples:
if level == logging.ERROR and message == expected_message:
break
else:
raise AssertionError('Expected ERROR log with message: {}'.format(expected_message))
def test_non_primitive(self, caplog):
check = AgentCheck('test', {}, [{'foo': ['bar']}])
check.check_id = 'test:123'
with caplog.at_level(logging.DEBUG), mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('config', check.instance, section='instance', whitelist=['foo'])
assert m.call_count == 1
args, _ = m.call_args
assert args[0] == 'test:123'
assert args[1] == 'config.instance'
expected_message = (
'Skipping metadata submission of non-primitive type `list` for field `foo` in section `instance`'
)
for _, level, message in caplog.record_tuples:
if level == logging.WARNING and message == expected_message:
break
else:
raise AssertionError('Expected ERROR log with message: {}'.format(expected_message))
def test_no_whitelist(self):
check = AgentCheck('test', {}, [{'foo': 'bar'}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('config', check.instance, section='instance')
assert m.call_count == 0
def test_whitelist(self):
check = AgentCheck('test', {}, [{'foo': 'bar'}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('config', check.instance, section='instance', whitelist=['foo'])
assert m.call_count == 1
args, _ = m.call_args
assert args[0] == 'test:123'
assert args[1] == 'config.instance'
data = json.loads(args[2])[0]
assert data.pop('is_set', None) is True
assert data.pop('value', None) == 'bar'
assert not data
def test_whitelist_no_field(self):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('config', check.instance, section='instance', whitelist=['foo'])
assert m.call_count == 1
args, _ = m.call_args
assert args[0] == 'test:123'
assert args[1] == 'config.instance'
data = json.loads(args[2])[0]
assert data.pop('is_set', None) is False
assert not data
def test_blacklist(self):
check = AgentCheck('test', {}, [{'product_pw': 'foo'}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('config', check.instance, section='instance', whitelist=['product_pw'], blacklist=['pw'])
assert m.call_count == 0
def test_blacklist_default(self):
check = AgentCheck('test', {}, [{'product_password': '<PASSWORD>'}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('config', check.instance, section='instance', whitelist=['product_password'])
assert m.call_count == 0
def test_whitelist_user_override(self):
check = AgentCheck('test', {}, [{'foo': 'bar', 'bar': 'foo', 'metadata_whitelist': ['bar']}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('config', check.instance, section='instance', whitelist=['foo', 'bar'])
assert m.call_count == 1
args, _ = m.call_args
assert args[0] == 'test:123'
assert args[1] == 'config.instance'
data = json.loads(args[2])
assert len(data) == 1
data = data[0]
assert data.pop('is_set', None) is True
assert data.pop('value', None) == 'foo'
assert not data
def test_blacklist_user_override(self):
check = AgentCheck('test', {}, | |
Complie the message
smsg = self.read(timeout = 0.1)
if smsg:
msg = msg+smsg.decode("utf-8")
self.touchDisable() # Turn off touch screen.
self.touchLogEnd=True # Make sure this function marked as ending.
self.flush()
if msg:
lines = msg.split('\r') # Split message into lines with CR
N = len(lines)
values=[touch() for i in range(N)]
#for all the touches
for i in range(N):
v = lines[i].split(';') # Split line into components at ;
# If we have read a touch event.
if '$touch' in v[0]:
#Grab the details of the event into a structure.
values[i].time = float(v[1])
values[i].x = int(float(v[2]))
values[i].y = int(float(v[3]))
if int(float(v[4])) == 1:
values[i].dir = 'touched'
else:
values[i].dir = 'released'
self.touchQ.put(values[i])
elif '#status' in v[0]: # GOt a status event by mistake
warning=("_touchLog found"
" status on input so skipping that")
logging.warning(warning)
else: # Have picked up some other stuff on the input
warning=("_touchLog found"
" unknown data on input so skipping that")
logging.warning(warning)
#============================================================#
# Touch event functions can be used to get a list of more #
# meaningful events following any getTouch commands #
# works a bit like an eye movement gaze detector #
#============================================================#
def setTouchEventParams(self, distance=None, t=None ,type=None):
""" Sets the parameters for touch event detection.
Distance is how far the touch should move to count
as a new touch.
Time is how long should lapse between touches for the
second one to count.
Example:
bits.startTouchLog()
while not event:
#do some processing
continue
bits.stopTouchLog()
res=bits.getTouchLog()
bits.setTouchEventParams(distance=20, t=0.001, type='touched')
events=bits.getTouchEvents()
Will extract touch events (not releases) that are 20 pixels and 1 ms apart.
"""
if distance!=None:
self.touchDistance = distance
if t != None:
self.touchTime = t
if type != None:
self.touchType = type
def getTouchEvents(self, distance=None, t=None, type=None):
""" Scans the touch log to extract touch events.
You need to run getTouchLog before you run this function.
Returns as list of Dict like structures with members
time, x, y, and dir
time is the time stamp of the event.
x and y are the x and y locations of the event.
direction is the type of event: 'touched', 'released'
These values can be read as a structure:
res=getTouchResponses(3)
res[0].dir, res[0].x, res[0].time
or dictionary
res[0]['dir'], res[0]['x'], res[0]['time']
Example:
bits.startTouchLog()
while not event:
#do some processing
continue
bits.stopTouchLog()
bits.getTouchLog()
res=bits.getTouchEvents(distance=20, t=0.001, type='touched')
print(res[0]['time']
Will extract touch events (not releases) that are 20 pixels and 1 ms apart.
"""
self.setTouchEventParams(distance,t,type)
N=len(self.touchValues)
self.touchEvents = []
nEvents = 0
rT = -999999
rX = -999999
rY = -999999
rType = 'None'
nEvents = 0
for i in range(N):
dist=(((self.touchValues[i].x - rX)**2.0)
+((self.touchValues[i].y - rY)**2.0))**0.5
T = self.touchValues[i].time - rT
# Only include events that are sufficiently far from
# last recorded event in time and distance, or if
# the direction of touch has changed and the new
# direction is in the looked for type descriptor.
if ((dist > self.touchDistance
and T > self.touchTime)
or (rType != self.touchValues[i].dir
and self.touchValues[i].dir in self.touchType)):
self.touchEvents.append(touch())
self.touchEvents[nEvents].time = self.touchValues[i].time
self.touchEvents[nEvents].x = self.touchValues[i].x
self.touchEvents[nEvents].y = self.touchValues[i].y
self.touchEvents[nEvents].dir = self.touchValues[i].dir
rT = self.touchValues[i].time
rX = self.touchValues[i].x
rY = self.touchValues[i].y
rType = self.touchValues[i].dir
nEvents = nEvents + 1
self.touch_nEvents = nEvents
return self.touchEvents
def getTouchEvent(self, N=0, distance=None, t=None, type=None):
""" Scans the touch log to return the Nth touch event.
You need to run getTouchLog before you run this function.
Returns as list of Dict like structures with members
time, x, y, and dir
time is the time stamp of the event.
x and y are the x and y locations of the event.
direction is the type of event: 'touched', 'released'
These values can be read as a structure:
res=getTouchResponses(3)
res[0].dir, res[0].x, res[0].time
or dictionary
res[0]['dir'], res[0]['x'], res[0]['time']
Example:
bits.startTouchLog()
while not event:
#do some processing
continue
bits.stopTouchLog()
bits.getTouchLog()
res=bits.getTouchEvent(N=10, distance=20, time=0.001, type='touched')
print(res.time)
Will extract the 10th touch events (ingnoreing releases) that are 20 pixels and 1 ms apart.
"""
values = self.getTouchEvents(distance, t, type)
value = values[N]
return value
class Config(object):
def __init__(self, bits):
# we need to set bits reference using weakref to avoid circular refs
self.bits = bits
self.load() # try to fetch previous config file
self.logFile = 0 # replace with a file handle if opened
def load(self, filename=None):
"""If name is None then we'll try to save to
"""
def parseLUTLine(line):
return line.replace('[', '').replace(']', '').split(',')
if filename is None:
from psychopy import prefs
filename = os.path.join(prefs.paths['userPrefsDir'],
'crs_bits.cfg')
if os.path.exists(filename):
config = configparser.RawConfigParser()
with open(filename) as f:
config.readfp(f)
self.os = config.get('system', 'os')
self.gfxCard = config.get('system', 'gfxCard')
self.identityLUT = np.ones([256, 3])
_idLUT = 'identityLUT'
self.identityLUT[:, 0] = parseLUTLine(config.get(_idLUT, 'r'))
self.identityLUT[:, 1] = parseLUTLine(config.get(_idLUT, 'g'))
self.identityLUT[:, 2] = parseLUTLine(config.get(_idLUT, 'b'))
return True
else:
logging.warn('no config file yet for %s' % self.bits)
self.identityLUT = None
self.gfxCard = None
self.os = None
return False
def _getGfxCardString(self):
from pyglet.gl import gl_info
return "%s: %s" % (gl_info.get_renderer(),
gl_info.get_version())
def _getOSstring(self):
import platform
return platform.platform()
def save(self, filename=None):
if filename is None:
from psychopy import prefs
filename = os.path.join(prefs.paths['userPrefsDir'],
'crs_bits.cfg')
logging.info('saved Bits# config file to %r' % filename)
# create the config object
config = configparser.RawConfigParser()
config.add_section('system')
self.os = config.set('system', 'os', self._getOSstring())
self.gfxCard = config.set('system', 'gfxCard',
self._getGfxCardString())
# save the current LUT
config.add_section('identityLUT')
config.set('identityLUT', 'r', list(self.identityLUT[:, 0]))
config.set('identityLUT', 'g', list(self.identityLUT[:, 1]))
config.set('identityLUT', 'b', list(self.identityLUT[:, 2]))
# save it to disk
with open(filename, 'w') as fileObj:
config.write(fileObj)
logging.info("Saved %s configuration to %s" % (self.bits, filename))
def quickCheck(self):
"""Check whether the current graphics card and OS match those of
the last saved LUT
"""
if self._getGfxCardString() != self.gfxCard:
logging.warn("The graphics card or its driver has changed. "
"We'll re-check the identity LUT for the card")
return 0
if self._getOSstring() != self.os:
logging.warn("The OS has been changed/updated. We'll re-check"
" the identity LUT for the card")
return 0
return 1 # all seems the same as before
def testLUT(self, LUT=None, demoMode=False):
"""Apply a LUT to the graphics card gamma table and test whether
we get back 0:255 in all channels.
:params:
LUT: The lookup table to be tested (256x3).
If None then the LUT will not be altered
:returns:
a 256 x 3 array of error values (integers in range 0:255)
"""
bits = self.bits # if you aren't yet in
win = self.bits.win
if LUT is not None:
win.gammaRamp = LUT
# create the patch of stimulus to test
expectedVals = list(range(256))
w, h = win.size
# NB psychopy uses -1:1
testArrLums = np.resize(np.linspace(-1, 1, 256), [256, 256])
stim = visual.ImageStim(win, image=testArrLums, size=[256, h],
pos=[128 - w//2, 0], units='pix')
expected = np.repeat(expectedVals, 3).reshape([-1, 3])
stim.draw()
# make sure the frame buffer was correct (before gamma was applied)
frm = np.array(win.getMovieFrame(buffer='back'))
assert np.alltrue(frm[0, 0:256, 0] == list(range(256)))
win.flip()
# use bits sharp to test
if demoMode:
return [0] * 256
pixels = bits.getVideoLine(lineN=50, nPixels=256)
errs = pixels - expected
if self.logFile:
for ii, channel in enumerate('RGB'):
self.logFile.write(channel)
for pixVal in pixels[:, ii]:
self.logFile.write(', %i' % pixVal)
self.logFile.write('\n')
return errs
def findIdentityLUT(self, maxIterations=1000, errCorrFactor=1.0/5000,
nVerifications=50,
demoMode=True,
logFile=''):
"""Search for the identity LUT for this card/operating system.
This requires that the window being tested is fullscreen on the Bits#
monitor (or at least occupies the first 256 pixels in the top left
corner!)
:params:
LUT: The lookup table to be tested (256 x 3).
If None then the LUT will not be altered
errCorrFactor: amount of correction done for each iteration
number of repeats (successful) to check dithering
has been eradicated
demoMode: generate the screen but don't go into status mode
:returns:
a 256x3 array of error values (integers in range 0:255)
"""
t0 = time.time()
# create standard options
intel = np.linspace(.05, .95, 256)
one = np.linspace(0, 1.0, 256)
fraction = np.linspace(0.0, 65535.0/65536.0, num=256)
LUTs = {'intel': np.repeat(intel, 3).reshape([-1, 3]),
| |
version = "0.3.0",
sha256 =
"b4d4fc7228a5eb575ff440245269cf20e6eddd81d131151d5b1ef0d39d194ea1",
),
"gogol-doubleclick-bids":
struct(
version = "0.3.0",
sha256 =
"0c985e054319520307981d3459d3a443dc01d9880fb4fd996a180ad76deb143f",
),
"gogol-doubleclick-search":
struct(
version = "0.3.0",
sha256 =
"8d94967b535ef3f8a437a4d16e3987403ce761da12dff36b374d32bc1ed89af3",
),
"gogol-drive":
struct(
version = "0.3.0",
sha256 =
"9fae145cf1bf4be57b9f53aba11b26f82b1ae36b4d3e2bf61d2af009f91a65d0",
),
"gogol-firebase-rules":
struct(
version = "0.3.0",
sha256 =
"185f71924141bdcf286a1207e8b0563374879e11c17fca97a7e01c3dee0b3857",
),
"gogol-fitness":
struct(
version = "0.3.0",
sha256 =
"bb866b003882fc932082e60898f4d6261f4ffd26833f6e1682972df2030b94ab",
),
"gogol-fonts":
struct(
version = "0.3.0",
sha256 =
"bb96e0afcc5cf0ae7285f49e3bf8b9d30274f1f315d0632e96f3b56c999d4c5c",
),
"gogol-freebasesearch":
struct(
version = "0.3.0",
sha256 =
"6c0b7d61a05f6ece6e9e7ca9e2653cfeb18127dc70cb37ea146d3769dc65a20a",
),
"gogol-fusiontables":
struct(
version = "0.3.0",
sha256 =
"47cf0cf75946f5ed25bf812e4cd677a946e79c231c70fe000afad1d5fe518a45",
),
"gogol-games":
struct(
version = "0.3.0",
sha256 =
"bf99505fd85be3943ed32f71b0eec554baeb109dbc143321b1dbe0c9d10b1d64",
),
"gogol-games-configuration":
struct(
version = "0.3.0",
sha256 =
"cf93351629177224c8615b2a0f63ec5f3b4e02fd0f116368ffcc5bf87d9ce211",
),
"gogol-games-management":
struct(
version = "0.3.0",
sha256 =
"97acc96a19a038bcd3b232645480fbd36eea741b5a1ff706b6ca6a0b338757a4",
),
"gogol-genomics":
struct(
version = "0.3.0",
sha256 =
"6ef65fc936a005edfbd8ddb0544b3452b9d1ebbcbe696c7c26cc25b0a9243135",
),
"gogol-gmail":
struct(
version = "0.3.0",
sha256 =
"03bd9cc7bd8147901e870dbd5b8e15df85b31392bcec2f8156e0fac656293f41",
),
"gogol-groups-migration":
struct(
version = "0.3.0",
sha256 =
"b3b65424c086c42ee8e77fc90f0c18699fc662989aa36cd16e4b987e6633c730",
),
"gogol-groups-settings":
struct(
version = "0.3.0",
sha256 =
"681cc39a36e82ea2fdc7f084c75ad50f1ee25961f6bae3983e71b19eba31c4f7",
),
"gogol-identity-toolkit":
struct(
version = "0.3.0",
sha256 =
"9f4f3a7cf728eb5d1abb237192ebbcdfc37712d58e00698b53d4a2f54afbb3d5",
),
"gogol-kgsearch":
struct(
version = "0.3.0",
sha256 =
"566c71568fdfdd8f83e263e41511eb36ca315da777b9e62a82e80ae788e18d4e",
),
"gogol-latencytest":
struct(
version = "0.3.0",
sha256 =
"d329a3c92505dfeb97225ae86cb5cf13ef35707782f6910071f96873d9a6b4bd",
),
"gogol-logging":
struct(
version = "0.3.0",
sha256 =
"8f0058d85ebd8eaa459ea9c7ac4ff8abc5033e00c3285488ed3810903116b8c4",
),
"gogol-maps-coordinate":
struct(
version = "0.3.0",
sha256 =
"76734dcea7ce6536bfd5538c3066e1da59e05ec460a1ab20ffa6299f9e704faa",
),
"gogol-maps-engine":
struct(
version = "0.3.0",
sha256 =
"7bb71e90c975d025ede3d6d185d10553f3f56ba498cf1b32f5a3a9dde7f94695",
),
"gogol-mirror":
struct(
version = "0.3.0",
sha256 =
"15bab1544b8beb41e5ecd3cd70c8ad337d601f2f49302fd8f287a126e7127032",
),
"gogol-monitoring":
struct(
version = "0.3.0",
sha256 =
"ea9d36d17913aa98c9acc088a5211a03f38dc85abe79eab37ad66656c2a35a0e",
),
"gogol-oauth2":
struct(
version = "0.3.0",
sha256 =
"b674d7211638a76305d1ea0995f9ce17ef12657dca929744d48c00b8a7c1b239",
),
"gogol-pagespeed":
struct(
version = "0.3.0",
sha256 =
"3d431c1dc6bcefc3b4a4a38e7296fb2091fcda424b69bb8af7ee9c4f3830d6cc",
),
"gogol-partners":
struct(
version = "0.3.0",
sha256 =
"dbb506a04bc7a866364b7c9cce30503dd2f1ffbf2f4f0d0c8ababa4dafae1076",
),
"gogol-people":
struct(
version = "0.3.0",
sha256 =
"dbe9523e3a7b5d0a2128b56a2f5a948c87c2e9bbf5ae4439d859ce940ace4e48",
),
"gogol-play-moviespartner":
struct(
version = "0.3.0",
sha256 =
"ec03c3465bd8435357f231ab227d7312b01aa6b156591288aaa492e483d02c6c",
),
"gogol-plus":
struct(
version = "0.3.0",
sha256 =
"1b6d98a0b9d1498153fc599b37599ba30e3266f6a9b6e387a82882cde4e49a63",
),
"gogol-plus-domains":
struct(
version = "0.3.0",
sha256 =
"71ef298c3ec48cc9f92fcc0a7a254ddf345278ccd8686c76bee90bdab68c0a34",
),
"gogol-prediction":
struct(
version = "0.3.0",
sha256 =
"cfc088e8e8837d65f4f3b3e3c2aea7491e1ae40aac30445924ff79ce29cfd28c",
),
"gogol-proximitybeacon":
struct(
version = "0.3.0",
sha256 =
"5b540108ac241848a83766ee5ce8100d805d368f7f6f11aea1f1d5b4499da4b8",
),
"gogol-pubsub":
struct(
version = "0.3.0",
sha256 =
"335adc19c913d446c226c16c9107ee7e69d85cf7fe4e2146634eae812be658b0",
),
"gogol-qpxexpress":
struct(
version = "0.3.0",
sha256 =
"a00c993c43ca00d141590c203162b923443d9f9a3a1dfc100791d8b984136883",
),
"gogol-replicapool":
struct(
version = "0.3.0",
sha256 =
"54861dacc5e2c299433d784bd0c8a8a6e8e6f8f7001ed9ece7e5d7e9d77153ce",
),
"gogol-replicapool-updater":
struct(
version = "0.3.0",
sha256 =
"8260d9fd9284f5aee5691118c60468177ded3a97cca561613aa015af1c2ff591",
),
"gogol-resourcemanager":
struct(
version = "0.3.0",
sha256 =
"cb50c3f678c5a93d3db6a68dd2b9e06f2da8a0f802e7faa913ed97ce234d5258",
),
"gogol-resourceviews":
struct(
version = "0.3.0",
sha256 =
"77aa60901029f7d9be3e93acbf92a6e9d6fdc7f1bf8931571c7892fa7b938f84",
),
"gogol-script":
struct(
version = "0.3.0",
sha256 =
"01cd14a58d60422083fffe8a1ff7401808f0d930f87924fc27b136fb476a4bd0",
),
"gogol-sheets":
struct(
version = "0.3.0",
sha256 =
"2dd5593fb556dc62237dbec27eef6f195cc3661201df019b504d5161443a41ce",
),
"gogol-shopping-content":
struct(
version = "0.3.0",
sha256 =
"a5c4728c17ee78d63c39770d503115d13e7931fc711d7724d816be3c45470617",
),
"gogol-siteverification":
struct(
version = "0.3.0",
sha256 =
"b0853d9a5cc5d1a6e18966b8b4e525487c8cac9e30a51b0297bd5c20b6a4245e",
),
"gogol-spectrum":
struct(
version = "0.3.0",
sha256 =
"ab387e91fbfb6aa4695f2785383df1991891d900d072432812f01adfa532a92c",
),
"gogol-sqladmin":
struct(
version = "0.3.0",
sha256 =
"7964b65fa87ef3540dc96a9e09ea13f210e361cc7e1213b76ea196e3b1069c98",
),
"gogol-storage":
struct(
version = "0.3.0",
sha256 =
"3b95f472a42d714031d5b553707c403dedc31b86d8dc0760f5e071be567ec4a2",
),
"gogol-storage-transfer":
struct(
version = "0.3.0",
sha256 =
"eb13d8452cff8e356ef40fec0d0333851a367c3a5a76c1db7a242849b5a18a47",
),
"gogol-tagmanager":
struct(
version = "0.3.0",
sha256 =
"f7511a169acb4d04f86f6e7bad9a1a026a66d8642cd18bc6a6afcdedc7a45743",
),
"gogol-taskqueue":
struct(
version = "0.3.0",
sha256 =
"03cbed41c2add112dbddf606cd3d7ea1bc857bd93e6742c296c40a0cd0f63f47",
),
"gogol-translate":
struct(
version = "0.3.0",
sha256 =
"73b3fcc645c6bc55303dd4f5877c289815298eda93feee5c160cf2f063f5952f",
),
"gogol-urlshortener":
struct(
version = "0.3.0",
sha256 =
"9bb9444e5b0a60494751ffba65b1bd887a71d8d1558e613b04dbffeeee98bcc2",
),
"gogol-useraccounts":
struct(
version = "0.3.0",
sha256 =
"298efe783f4dcdab3c181ec1ca12ae3f6e9bf758b05fc010ad7b832e10c0f53b",
),
"gogol-vision":
struct(
version = "0.3.0",
sha256 =
"c99525ab5beec654bbcdbf0d751869971aa915d9b28d4a04f870ecb1d9f94deb",
),
"gogol-webmaster-tools":
struct(
version = "0.3.0",
sha256 =
"b747ffd277b136153717309d97b149c55beba77d0803698a389118b02c977766",
),
"gogol-youtube":
struct(
version = "0.3.0",
sha256 =
"32e812a1d9fd447b23b6dfb3e6f93db64f62fef7ccf6c5001f769c2eb26c67eb",
),
"gogol-youtube-analytics":
struct(
version = "0.3.0",
sha256 =
"70b5a5eac8da50986b23a1e2dcd44ebe0adb6ba394696db5e1579a1295440881",
),
"gogol-youtube-reporting":
struct(
version = "0.3.0",
sha256 =
"8edc2c9f8bcc854e61a89f6dc92732dc92a197d109808f56df79c01733551d49",
),
"google-cloud":
struct(
version = "0.0.4",
sha256 =
"09a77ce6846ea0c5f9d7e5578dcddcbaf4905437445edb45c2da35456324fb9a",
),
"google-oauth2-jwt":
struct(
version = "0.2.2",
sha256 =
"38dc52d516d085c50c4c1771a0364417d6b79aef1caa7558af21feda35c09b2a",
),
"google-translate":
struct(
version = "0.4.1",
sha256 =
"b663f7fd7c72cac2b630b234074b5acf3d1dcdfccef1f87a03db32a1351ef176",
),
"gpolyline":
struct(
version = "0.1.0.1",
sha256 =
"28b3a644853ba6f0a7d6465d8d62646a10c995008a799ae67e728c8cf4a17a05",
),
"graph-core":
struct(
version = "0.3.0.0",
sha256 =
"378f0baa40ebbb78e8c389f79e363eb573cdf182f799684d2f3d6ac51b10e854",
),
"graph-wrapper":
struct(
version = "0.2.5.1",
sha256 =
"8361853fca2d2251bd233e18393053dd391d21ca6f210b2bc861b0e0f4c2e113",
),
"graphs":
struct(
version = "0.7",
sha256 =
"eea656ac6092eac99bafc0b7817efa34529b895408fc1267a5b573fb332f6f4c",
),
"graphviz":
struct(
version = "2999.19.0.0",
sha256 =
"af0a7ff197c9de3f23e6653541446f755c824083ced04b629df6d19523fe04ea",
),
"gravatar":
struct(
version = "0.8.0",
sha256 =
"6f6000acaea47f3fc8711f5a2a62d5fbe96f5bb698fcb997f9f07ffe3102f4d7",
),
"graylog":
struct(
version = "0.1.0.1",
sha256 =
"2d8173e61da8d02c39cb95e6ccea8a167c792f682a496aed5fe4edfd0e6a0082",
),
"groom":
struct(
version = "0.1.2.1",
sha256 =
"a6b4a4d3af1b26f63039f04bd4176493f8dd4f6a9ab281f0e33c0151c20de59d",
),
"groundhog":
struct(
version = "0.8",
sha256 =
"16955dfe46737481400b1accd9e2b4ef3e7318e296c8b4838ba0651f7d51af1c",
),
"groundhog-inspector":
struct(
version = "0.8.0.2",
sha256 =
"bfbad62b62174e24f8fe29ce7d3d232392a23221107a32397d91c22531e87af1",
),
"groundhog-mysql":
struct(
version = "0.8",
sha256 =
"51ad8be513110081fff4333ae532b35e7ac5b35c4673e4c982bc0eca6c485666",
),
"groundhog-postgresql":
struct(
version = "0.8.0.1",
sha256 =
"ad8ef33fb170dc63f97ef2add891d2e20f279f12495a2f56c7086d49c20b95e8",
),
"groundhog-sqlite":
struct(
version = "0.8",
sha256 =
"7dcbbd4bcf9b38408bc29608a514a2b535c85490e4649090c342603c91283092",
),
"groundhog-th":
struct(
version = "0.8.0.2",
sha256 =
"26958d982f2dd17aeacdf22386fd87bf81a5acdc93b28e80b93beaba6c6d3d8f",
),
"group-by-date":
struct(
version = "0.1.0.2",
sha256 =
"b0b863add81e83c817dba93a8ab22c0f4b7e57643fafc630ac73190d9ee2a527",
),
"grouped-list":
struct(
version = "0.2.1.4",
sha256 =
"309d8b5409ef785bd8720658e4fecc233c65f56002741f1e9b5d0f7f584d369c",
),
"groupoids":
struct(
version = "4.0",
sha256 =
"6671953fa0970c13ac8014278fcd6227b4c07e1a69d5a23965e2df1418218a22",
),
"groups":
struct(
version = "0.4.1.0",
sha256 =
"dd4588b71dfff42b9a30cb40304912742b95db964b20f51951aff0eee7f3f33d",
),
"gtk2hs-buildtools":
struct(
version = "0.13.3.1",
sha256 =
"220f2f4aa1e01b8585fddf35bfc9f3a9dd300f2308d3c2b800c621cdd2ce7154",
),
"gym-http-api":
struct(
version = "0.1.0.0",
sha256 =
"1708df8beba2df0cd2d4dfd34f1a138a96930f9713bb22415d11c79ff8b5a845",
),
"h2c":
struct(
version = "1.0.0",
sha256 =
"4be2c9d54084175777624770640850aba33d7e4a31e2dc8096c122f737965499",
),
"hOpenPGP":
struct(
version = "2.5.5",
sha256 =
"1801efa965085572197253eb77bfaf2fc2a20c18d93c43c436d506237871ad54",
),
"hackage-db":
struct(
version = "2.0",
sha256 =
"f8390ab421f89bd8b03df9c3d34c86a82ea26d150dfb5cfb1bdb16f20452bf27",
),
"hackage-security":
struct(
version = "0.5.2.2",
sha256 =
"507a837851264a774c8f4d400f798c3dac5be11dc428fe72d33ef594ca533c41",
),
"hackernews":
struct(
version = "1.3.0.0",
sha256 =
"65944d0feb940d967c6b9823d28550f797cb6bc85f0b5bb06fe588cbe97090a0",
),
"haddock-library":
struct(
version = "1.4.3",
sha256 =
"f764763f8004715431a184a981493781b8380e13fd89ca0075ac426edc5d445b",
),
"hailgun":
struct(
version = "0.4.1.6",
sha256 =
"066c4a4e6362420d7cd60315c3be561ea8ac06058682dda79d5180b68d317f42",
),
"hailgun-simple":
struct(
version = "0.1.0.0",
sha256 =
"30526e6b7ec6083b090e880ef6fe942cc8425d3b2700bac565e4fc6629ec2954",
),
"hakyll":
struct(
version = "4.10.0.0",
sha256 =
"82b7b84c5a45bcac95ba9652205a24c03418c7761d8ac0597816c646237ae57f",
),
"half":
struct(
version = "0.2.2.3",
sha256 =
"85c244c80d1c889a3d79073a6f5a99d9e769dbe3c574ca11d992b2b4f7599a5c",
),
"hamilton":
struct(
version = "0.1.0.2",
sha256 =
"15acc8563f60448621cffc58acf880487cc997e682e8cbc79032d5886bdc1cba",
),
"hamlet":
struct(
version = "1.2.0",
sha256 =
"d1c94b259163cb37f5c02ef3418ebf4caf8d95c8ee00588d4493aa3aae1a8a66",
),
"handwriting":
struct(
version = "0.1.0.3",
sha256 =
"7e1b406d19b2f39b34910462dce214c7ca91bb9d78bf9fafb9f906dd44d5beaa",
),
"hapistrano":
struct(
version = "0.3.5.2",
sha256 =
"845fed8a507e3d1b646f7b93d15d29842add5a142f034572ac1cd191face4b61",
),
"happstack-hsp":
struct(
version = "7.3.7.3",
sha256 =
"63185f6d991acf2bca3a060a40f4ba153e9cbbf8bd0d0db58c7d8cd74cd3f754",
),
"happstack-jmacro":
struct(
version = "7.0.12",
sha256 =
"bab4b4197373cb674c6fcbfe48a7a5d34ec31967b6e3c771f0049d885b74aeae",
),
"happstack-server":
struct(
version = "172.16.17.32",
sha256 =
"8075f6b4e8e1a88365989e64e2ace2dfd9b948b572af394b3eda632233108b15",
),
"happstack-server-tls":
struct(
version = "192.168.127.12",
sha256 =
"782fb8d8bf6b2f63c337a1308710f1611d789f42cedf7571a346c3a73a1fe142",
),
"happy":
struct(
version = "1.19.9",
sha256 =
"3e81a3e813acca3aae52721c412cde18b7b7c71ecbacfaeaa5c2f4b35abf1d8d",
),
"harp":
struct(
version = "0.4.3",
sha256 =
"4749146989a144c430f9aa52b4502570828080adb5b52117f335efc69f8ea99d",
),
"hasbolt":
struct(
version = "0.1.3.0",
sha256 =
"fd6fc49f57e8c03087103f733c130739a046398b5118b078aad2def31059665d",
),
"hashable":
struct(
version = "1.2.6.1",
sha256 =
"94ca8789e13bc05c1582c46b709f3b0f5aeec2092be634b8606dbd9c5915bb7a",
),
"hashable-time":
struct(
version = "0.2.0.1",
sha256 =
"b5752bb9b91d7cb98b01aa68c27d6a9338e1af39763c0157ef8322d0bc15234d",
),
"hashids":
struct(
version = "1.0.2.3",
sha256 =
"ecd74235e8f729514214715b828bf479701aa4b777e4f104ea07534a30822534",
),
"hashmap":
struct(
version = "1.3.3",
sha256 =
"dc06b57cd1bcd656d4602df7705a3f11a54ae65f664e9be472d42a9bdcd64755",
),
"hashtables":
struct(
version = "1.2.2.1",
sha256 =
"7aad530a9acca57ebe58774876c5a32e13f9c4ea37e80aa50c9be9ca88dcf6bc",
),
"haskeline":
struct(
version = "0.7.4.2",
sha256 =
"5543ec8cd932396360a5c707bf0958b39bb99a559fb4fd80ed366a953c6cb0eb",
),
"haskell-gi":
struct(
version = "0.20.3",
sha256 =
"18d9289daa6d5fb7b52463f58f18339791e18644a131d4f3b28926dbc66f4910",
),
"haskell-gi-base":
struct(
version = "0.20.8",
sha256 =
"91a82f170b92ed8374e1f7835c50434568702448f21ba32f0c87a15fb2fed3e1",
),
"haskell-gi-overloading":
struct(
version = "1.0",
sha256 =
"3ed797f8dd8d3535640b1ca99851bbc5968817c25a80fc499af42715d371682a",
),
"haskell-import-graph":
struct(
version = "1.0.3",
sha256 =
"6284909ac8edd0eb3e9ac3fcc606846c3531fbf49dfe5007a346320c89b503ba",
),
"haskell-lexer":
struct(
version = "1.0.1",
sha256 =
"d7d42ab3c4bc2f0232ede8b005fb9de57f862ee4c1c83aa61e1022346fc84366",
),
"haskell-lsp":
struct(
version = "0.2.0.1",
sha256 =
"1ed5ce34cf5ddd86ad7babdde239fc81455a1ebc07b8445270be7c3767a86f77",
),
"haskell-lsp-client":
struct(
version = "1.0.0.1",
sha256 =
"1c9b131e405bd1aec6e98e43f3926061fbe4e5ef4ac64cd08cae38082d40fd1b",
),
"haskell-names":
struct(
version = "0.9.0",
sha256 =
"bd202a3ea66f0ad3ce85fb79eac4ea4aae613b762b965d6708dd20171bf7f684",
),
"haskell-neo4j-client":
struct(
version = "0.3.2.4",
sha256 =
"30eea529b6d8bd4b887cec7a6b210dd80223d97811bb26042b0c1ccfc8c381c2",
),
"haskell-spacegoo":
struct(
version = "0.2.0.1",
sha256 =
"1eb3faa9a7f6a5870337eeb0bb3ad915f58987dfe4643fe95c91cbb2738ddd3c",
),
"haskell-src":
struct(
version = "1.0.2.0",
sha256 =
"2a25ee5729230edddb94af8d9881efbc1d8798bd316097f4646749cb2fa491a6",
),
"haskell-src-exts":
struct(
version = "1.19.1",
sha256 =
"f0f5b2867673d654c7cce8a5fcc69222ea09af460c29a819c23cccf6311ba971",
),
"haskell-src-exts-simple":
struct(
version = "1.19.0.0",
sha256 =
"41bc9166e7d08bb18b5309eb2af00ce122c70eeffd047da47e9e2d9db89a2406",
),
"haskell-src-exts-util":
struct(
version = "0.2.2",
sha256 =
"8f325e89da8b2856d22ddf5199a5ba961fbafc441613c392596e8edf32e33093",
),
"haskell-src-meta":
struct(
version = "0.8.0.2",
sha256 =
"4b7b143b94fcf147b96bb34822c2feeae29daadd3a22796ee36cadd5ca262c8b",
),
"haskell-tools-ast":
struct(
version = "1.0.0.4",
sha256 =
"a9bd5a15b850ed7d4d9c28506bb43b8ed22ec0af71eaeefc3584552cfe570d00",
),
"haskell-tools-backend-ghc":
struct(
version = "1.0.0.4",
sha256 =
"4f78531f87fec816c64e925bf0a40cfff91c5fe99331523143029365674f2031",
),
"haskell-tools-builtin-refactorings":
struct(
version = "1.0.0.4",
sha256 =
"9141196c58f6797729ce5becb31789d74ba89784defdd5792a6ed8699144b4c3",
),
"haskell-tools-cli":
struct(
version = "1.0.0.4",
sha256 =
"d878d8095429711f8426d85a7683e352a93ab781c2fb6ff44a924a109b4f3888",
),
"haskell-tools-daemon":
struct(
version = "1.0.0.4",
sha256 =
"9b2c32d9659b7b0db4fce768c1b432af91c22fd5807e2ac4b102d783ac4d1dbf",
),
"haskell-tools-debug":
struct(
version = "1.0.0.4",
sha256 =
"402ce6c2cf22297a7f8c31f19c90735001de8d0d897eb5c5d4f2b228dd420452",
),
"haskell-tools-demo":
struct(
version = "1.0.0.4",
sha256 =
"d87af00c4f15567ac3f2b1a29bac35337a0b9dda6e7a68e2904cc99739d1126f",
),
"haskell-tools-prettyprint":
struct(
version = "1.0.0.4",
sha256 =
"8905a72281f09927cff4e3426f535ab3201d402231f0a1118d06d0de1c9a3500",
),
"haskell-tools-refactor":
struct(
version = "1.0.0.4",
sha256 =
"6989c55c56547a9bf876bedcde7440a054e431356f05413bdc05e4e19d5456a4",
),
"haskell-tools-rewrite":
struct(
version = "1.0.0.4",
sha256 =
"4fba9235f33e47728fb6a21673778fb452af9ea8f69343e1f9d252a904374fca",
),
"haskintex":
struct(
version = "0.8.0.0",
sha256 =
"9d4974112f33baf47124a56f87b96892a0a37c10587098f851c71256d15cddd8",
),
"hasmin":
struct(
version = "1.0.1",
sha256 =
"53ae47c97b56590dd7b80c2d72a7c30d14c4bfa810a2b21c86abfe8a137dbec0",
),
"hasql":
struct(
version = "1.1.1",
sha256 =
"262514375a08afac2445e725ebb2d749537ce676984c2ae74f737aea931d088b",
),
"hasql-migration":
struct(
version = "0.1.3",
sha256 =
"2d49e3b7a5ed775150abf2164795b10d087d2e1c714b0a8320f0c0094df068b3",
),
"hasql-optparse-applicative":
struct(
version = "0.2.4",
sha256 =
"796b6522469fe0d7f437c82b1f833b87591572b26e16a8bcc0314bc73ee4ab3d",
),
"hasql-pool":
struct(
version = "0.4.3",
sha256 =
"124481643c6ba9a6150d1cc7ba9b9393b5a1a14cd70815d1a55a75163c80df21",
),
"hasql-transaction":
struct(
version = "0.5.2",
sha256 =
"d557161241449e9743e2a13fa2b5bdcc68b5fe97e9c6db8d9997b08777319e70",
),
"hastache":
struct(
version = "0.6.1",
sha256 =
"8c8f89669d6125201d7163385ea9055ab8027a69d1513259f8fbdd53c244b464",
),
"hasty-hamiltonian":
struct(
version = "1.3.0",
sha256 =
"15f713bc72cda97a5efad2c75d38915f3b765966142710f069db72ff49eefb31",
),
"haxl":
struct(
version = "0.5.1.0",
sha256 =
"49d485041646d3210385c312d34b0cc0c61d130e95ad935e06a695515f24a827",
),
"haxl-amazonka":
struct(
version = | |
<filename>utils/pandafsm.py
# Copyright (c) 2020 NVIDIA Corporation
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""Class to represent grasp process as finite state machine."""
import copy
import numpy as np
from isaacgym import gymapi
from isaacgym import gymtorch
from utils import panda_fk
from utils import tet_based_metrics
DEBUG = False
class PandaFsm:
"""FSM for control of Panda hand for the grasp tests."""
def __init__(self, cfg, gym_handle, sim_handle, env_handles, franka_handle,
platform_handle, object_cof,
grasp_transform, obj_name, env_id, hand_origin, viewer,
envs_per_row, env_dim, youngs, density, directions, mode):
"""Initialize attributes of grasp evaluation FSM.
Args: gym_handle (gymapi.Gym): Gym object.
sim_handle (gymapi.Sim): Simulation object.
env_handles (list of gymapi.Env): List of all environments.
franka_handle (int): Handle of Franka panda hand actor.
platform_handle (int): Handle of support plane actor.
state (str): Name of initial FSM state.
object_cof (float): Coefficient of friction.
grasp_transform (isaacgym.gymapi.Transform): Initial pose of Franka panda hand.
obj_name (str): Name of object to be grasped.
env_id (int): Index of environment from env_handles.
hand_origin (gymapi.Transform): Pose of the hand origin (at its base).
viewer (gymapi.Viewer): Graphical display object.
envs_per_row (int): Number of environments to be placed in a row.
env_dim (float): Size of each environment.
youngs (str): Elastic modulus of the object, eg '3e5'.
density (str): Density of the object, eg. '1000'.
directions (np.ndarray): Array of directions to be evaluated in this env.
mode (str): Name of grasp test {e.g., 'pickup', 'reorient', 'lin_acc', 'ang_acc'}.
"""
self.mode = mode
self.started = False
self.state = 'open'
self.cfg = cfg
# Simulation handles
self.gym_handle = gym_handle
self.sim_handle = sim_handle
self.env_handles = env_handles
self.env_id = env_id
self.env_handle = self.env_handles[self.env_id]
self.viewer = viewer
# Sim params
self.sim_params = gymapi.SimParams()
self.sim_params = self.gym_handle.get_sim_params(self.sim_handle)
self.envs_per_row = envs_per_row
self.env_dim = env_dim
self.env_x_offset = 2. * self.env_dim * (self.env_id %
self.envs_per_row)
self.env_z_offset = 2. * self.env_dim * int(
self.env_id / self.envs_per_row)
# Actors
self.franka_handle = franka_handle
self.platform_handle = platform_handle
num_franka_bodies = self.gym_handle.get_actor_rigid_body_count(
self.env_handle, self.franka_handle)
num_platform_bodies = self.gym_handle.get_actor_rigid_body_count(
self.env_handle, self.platform_handle)
total_num_bodies = num_franka_bodies + num_platform_bodies
self.finger_indices = [
total_num_bodies * self.env_id + num_franka_bodies - 2,
total_num_bodies * self.env_id + num_franka_bodies - 1
] # [left, right]
self.hand_indices = range(
total_num_bodies * self.env_id,
total_num_bodies * self.env_id + num_franka_bodies)
self.platform_indices = [
total_num_bodies * self.env_id + num_franka_bodies + 1
]
self.left_finger_handle = self.gym_handle.get_actor_rigid_body_handle(
self.env_handle, self.franka_handle, self.finger_indices[-2])
self.right_finger_handle = self.gym_handle.get_actor_rigid_body_handle(
self.env_handle, self.franka_handle, self.finger_indices[-1])
self.running_saved_franka_state = []
# Object material and mesh values
self.obj_name = obj_name
self.object_cof = object_cof
self.particle_state_tensor = gymtorch.wrap_tensor(
self.gym_handle.acquire_particle_state_tensor(self.sim_handle))
self.previous_particle_state_tensor = None
self.state_tensor_length = 0
self.youngs = float(youngs)
self.density = float(density)
# Contacts and force control
self.contacts = np.array([])
self.particles_contacting_gripper = np.zeros(2)
self.FOS = 1 + np.log10(self.youngs) / 10.
self.initial_desired_force = 0.0
self.corrected_desired_force = 0.0
self.F_history, self.stress_history, self.F_on_nodes_history = [], [], []
# Low pass filtering of physics
self.lp_running_window_size = self.cfg['lp_filter']['running_window_size']
self.filtered_forces, self.filtered_stresses, self.filtered_f_on_nodes = [], [], []
self.f_moving_average, self.stress_moving_average = [], []
self.f_on_nodes_moving_average = []
self.f_errs = np.ones(10, dtype=np.float32)
# Gripper positions
self.gripper_positions_under_gravity = np.zeros(2)
self.squeeze_min_gripper_width = 0.0
# 16 vector directions for reorient and acceleration tests
self.directions = np.column_stack(
(directions[:, 1], directions[:, 2], directions[:, 0]))
self.direction = self.directions[0]
# Linear and angular acceleration testing
self.lin_acc_vel, self.ang_acc_vel, self.travel_speed = 0.0, 0.0, 0.0
# Franka Panda hand kinematics
self.grasp_transform = grasp_transform
self.franka_dof_states = None
self.hand_origin = hand_origin
self.mid_finger_origin = np.array([
self.hand_origin.p.x, self.hand_origin.p.y,
self.hand_origin.p.z + self.cfg['franka']['gripper_tip_z_offset'], 1
])
self.mid_finger_position_transformed = np.zeros(3)
self.left_finger_position_origin = np.array([
self.hand_origin.p.x, self.hand_origin.p.y,
self.hand_origin.p.z, 1
])
self.left_finger_position = np.array([
self.hand_origin.p.x, self.hand_origin.p.y + self.cfg['franka']['gripper_tip_y_offset'],
self.hand_origin.p.z + self.cfg['franka']['gripper_tip_z_offset'], 1
])
self.right_finger_position_origin = np.array([
self.hand_origin.p.x, self.hand_origin.p.y,
self.hand_origin.p.z, 1
])
self.right_finger_position = np.array([
self.hand_origin.p.x, self.hand_origin.p.y - self.cfg['franka']['gripper_tip_y_offset'],
self.hand_origin.p.z + self.cfg['franka']['gripper_tip_z_offset'], 1
])
self.mid_finger_position = np.array([
self.hand_origin.p.x, self.hand_origin.p.y,
self.hand_origin.p.z + self.cfg['franka']['gripper_tip_z_offset'], 1
])
self.left_normal = self.grasp_transform.transform_vector(
gymapi.Vec3(1.0, 0., 0.))
self.right_normal = self.grasp_transform.transform_vector(
gymapi.Vec3(-1.0, 0., 0.))
# Franka Panda hand control outputs
self.vel_des = np.zeros(self.cfg['franka']['num_joints'])
self.pos_des = np.zeros(self.cfg['franka']['num_joints'])
self.torque_des = np.zeros(self.cfg['franka']['num_joints'])
self.running_torque = [-0.1, -0.1]
# FSM: Close state
self.close_fails = 0
self.left_has_contacted = False
self.right_has_contacted = False
self.franka_positions_at_contact = np.zeros(self.cfg['franka']['num_joints'])
self.desired_closing_gripper_pos = [0.0, 0.0]
self.grippers_pre_squeeze = [-1, -1]
# FSM: Squeeze state
self.squeeze_counter = 0
self.squeeze_holding_counter = 0
self.squeeze_no_gravity_counter = 0
self.squeeze_no_gravity_max_force = 0
self.squeeze_no_gravity_force_increase_fails = 0
self.squeeze_no_gravity_contact_fails = 0
self.squeeze_no_gravity_failed_to_increase = False
self.squeeze_no_gravity_lost_contact = False
self.squeeze_lost_contact_counter = 0
self.squeeze_intensity = 0
self.squeezing_close_fails = 0
self.squeezing_no_grasp = 0
self.squeezed_until_force = False
self.num_dp = self.cfg['squeeze_no_gravity']['num_dp']
# FSM: Hang state
self.reached_hang = False
self.hang_stresses = []
self.hang_separations = []
# FSM: Pickup
self.inferred_rot_force = False
# FSM: Accelerations
self.reached_ang_acc_location = False
self.lin_acc_counter, self.ang_acc_counter, self.ang_acc_travel_counter = 0, 0, 0
self.reached_lin_acc_speed = False
self.reached_ang_acc_speed = False
# Success flags
self.pickup_success = False
self.timed_out = False
# Counters
self.full_counter = 0
self.inferred_rot_force_counter = 0
self.hang_counter = 0
self.reorient_counter = 0
self.open_counter = 0
self.close_soft_counter = 0
def init_metrics_and_features(self):
"""Initialize attributes to store metrics and features."""
# Tet structure
self.num_nodes = self.state_tensor_length
(tet_particles,
tet_stresses) = self.gym_handle.get_sim_tetrahedra(self.sim_handle)
num_envs = self.gym_handle.get_env_count(self.sim_handle)
num_tets_per_env = int(len(tet_stresses) / num_envs)
self.undeformed_mesh = np.zeros((self.num_nodes, 3))
# Pre contact
self.pre_contact_stresses = np.zeros((num_envs, num_tets_per_env))
self.pre_contact_se = 0.0
# Metrics at target squeeze force
self.stresses_at_force = np.zeros(num_tets_per_env)
self.se_at_force = 0.0
self.positions_at_force = np.zeros((self.num_nodes, 3))
self.gripper_force_at_force = 0.0
self.gripper_distance_at_force = 0.0
# Squeeze no gravity
self.squeeze_torque = np.ones(2) * -0.1
self.squeeze_stress = np.zeros(num_tets_per_env)
self.squeeze_stresses_window = [np.zeros(num_tets_per_env)] * 10
self.running_left_node_contacts, self.running_right_node_contacts = [], []
self.running_stresses, self.running_positions = [], []
self.running_gripper_positions, self.running_forces = [], []
self.running_forces_on_nodes = []
self.stacked_left_node_contacts, self.stacked_right_node_contacts = np.zeros(
(self.num_dp, self.num_nodes)), np.zeros((self.num_dp, self.num_nodes))
self.stacked_left_node_contacts, self.stacked_right_node_contacts = np.zeros(
(self.num_dp, self.num_nodes, 6)), np.zeros((self.num_dp, self.num_nodes, 6))
self.stacked_forces_on_nodes = np.zeros((self.num_dp, self.num_nodes))
self.running_l_gripper_contacts, self.running_r_gripper_contacts = [], []
self.stacked_left_gripper_contact_points = np.zeros((self.num_dp, self.num_nodes, 3))
self.stacked_right_gripper_contact_points = np.zeros((self.num_dp, self.num_nodes, 3))
self.stacked_forces = np.zeros(self.num_dp)
self.stacked_stresses, self.stacked_positions = np.zeros(
(self.num_dp, num_tets_per_env)), np.zeros(
(self.num_dp, self.num_nodes, 3))
self.stacked_gripper_positions = np.zeros((self.num_dp, 2))
# Metrics after pickup
self.stresses_under_gravity = np.zeros(num_tets_per_env)
self.se_under_gravity = 0.0
self.positions_under_gravity = np.zeros((self.num_nodes, 3))
self.gripper_force_under_gravity = 0.0
# Reorientation metrics
self.reorientation_meshes = np.zeros((4, self.num_nodes, 3))
self.reorientation_stresses = np.zeros((4, num_tets_per_env))
# Linear and angular accelerations at fail
self.lin_acc_fail_acc, self.ang_acc_fail_acc = 0, 0
# Geometry metrics
self.pure_distances = np.zeros(2)
self.perp_distances = np.zeros(2)
self.edge_distances = np.zeros(2)
self.num_gripper_contacts = np.zeros(2)
self.left_gripper_node_contacts = np.zeros(self.num_nodes)
self.right_gripper_node_contacts = np.zeros(self.num_nodes)
self.left_gripper_node_contacts_initial = np.zeros(self.num_nodes)
self.right_gripper_node_contacts_initial = np.zeros(self.num_nodes)
def get_force_based_torque(self, F_des, F_curr):
"""Torque-based control with target gripper force F_des."""
total_F_curr = self.f_moving_average[
-1] # Use the LP and averaged value instead of raw readings
if np.sum(F_curr) == 0.0 and np.all(
self.particles_contacting_gripper == 0):
total_F_curr = 0
total_F_err = np.sum(F_des) - total_F_curr
# Compute error values for state transitions
F_curr_mag = (np.abs(F_curr[0]) + np.abs(F_curr[1])) / 2.0
Kp = self.cfg['force_control']['Kp']
min_torque = self.cfg['force_control']['min_torque']
self.running_torque[0] -= min(total_F_err * Kp, 3 * Kp)
self.running_torque[1] -= min(total_F_err * Kp, 3 * Kp)
self.running_torque[0] = min(min_torque, self.running_torque[0])
self.running_torque[1] = min(min_torque, self.running_torque[1])
if DEBUG:
print(self.running_torque, total_F_curr, self.desired_force)
return self.running_torque, F_curr_mag, total_F_err
def get_grasp_F_curr(self, body_index, debug=False):
"""Get current forces acting on fingerpads, as sum of nodal forces."""
net_hor_force_left = 0.0
net_hor_force_right = 0.0
left_contacts = []
left_force_mags = []
left_barys = []
right_contacts = []
forces_on_nodes = np.zeros(self.state_tensor_length)
for contact in self.contacts:
curr_body_index = contact[4]
# If the rigid body (identified by body_index) is in contact
if curr_body_index in body_index:
curr_force_dir = contact[6].view(
(np.float32, len(
contact[6].dtype.names)))
curr_force_mag = contact[7]
normal_to_gripper = self.grasp_transform.transform_vector(
gymapi.Vec3(1., | |
# Copyright 2002-2011 <NAME>. See LICENSE for licensing information.
"""mixminion.server.EventStats
Classes to gather time-based server statistics"""
__all__ = [ 'EventLog', 'NilEventLog' ]
import os
from threading import RLock
from time import time
from mixminion.Common import formatTime, LOG, previousMidnight, floorDiv, \
createPrivateDir, MixError, readPickled, tryUnlink, writePickled
# _EVENTS: a list of all recognized event types.
_EVENTS = [ 'ReceivedPacket',
'ReceivedConnection',
'AttemptedConnect', 'SuccessfulConnect', 'FailedConnect',
'AttemptedRelay', 'SuccessfulRelay',
'FailedRelay', 'UnretriableRelay',
'AttemptedDelivery', 'SuccessfulDelivery',
'FailedDelivery', 'UnretriableDelivery',
]
class NilEventLog:
"""Null implementation of EventLog interface: ignores all events and
logs nothing.
"""
def __init__(self):
pass
def save(self, now=None):
"""Flushes this eventlog to disk."""
pass
def rotate(self, now=None):
"""Move the pending events from this EventLog into a
summarized text listing, and start a new pool. Requires
that it's time to rotate.
"""
pass
def getNextRotation(self):
"""Return a time after which it's okay to rotate the log."""
return 0
def _log(self, event, arg=None):
"""Notes that an event has occurred.
event -- the type of event to note
arg -- an optional topic of the event.
"""
pass
def receivedPacket(self, arg=None):
"""Called whenever a packet is received via MMTP."""
self._log("ReceivedPacket", arg)
def receivedConnection(self, arg=None):
"""Called whenever we get an incoming MMTP connection."""
self._log("ReceivedConnection", arg)
def attemptedConnect(self, arg=None):
"""Called whenever we try to connect to an MMTP server."""
self._log("AttemptedConnect", arg)
def successfulConnect(self, arg=None):
"""Called whenever we successfully connect to an MMTP server."""
self._log("SuccessfulConnect", arg)
def failedConnect(self, arg=None):
"""Called whenever we fail to connect to an MMTP server."""
self._log("FailedConnect", arg)
def attemptedRelay(self, arg=None):
"""Called whenever we attempt to relay a packet via MMTP."""
self._log("AttemptedRelay", arg)
def successfulRelay(self, arg=None):
"""Called whenever packet delivery via MMTP succeeds"""
self._log("SuccessfulRelay", arg)
def failedRelay(self, arg=None):
"""Called whenever packet delivery via MMTP fails retriably"""
self._log("FailedRelay", arg)
def unretriableRelay(self, arg=None):
"""Called whenever packet delivery via MMTP fails unretriably"""
self._log("UnretriableRelay", arg)
def attemptedDelivery(self, arg=None):
"""Called whenever we attempt to deliver a message via an exit
module.
"""
self._log("AttemptedDelivery", arg)
def successfulDelivery(self, arg=None):
"""Called whenever we successfully deliver a message via an exit
module.
"""
self._log("SuccessfulDelivery", arg)
def failedDelivery(self, arg=None):
"""Called whenever an attempt to deliver a message via an exit
module fails retriably.
"""
self._log("FailedDelivery", arg)
def unretriableDelivery(self, arg=None):
"""Called whenever an attempt to deliver a message via an exit
module fails unretriably.
"""
self._log("UnretriableDelivery", arg)
BOILERPLATE = """\
# Mixminion server statistics
#
# NOTE: These statistics _do not_ necessarily cover the current interval
# of operation. To see pending statistics that have not yet been flushed
# to this file, run 'mixminion server-stats'.
"""
class EventLog(NilEventLog):
"""An EventLog records events, aggregates them according to some time
periods, and logs the totals to disk.
Currently we retain two log files: one holds an interval-by-interval
human-readable record of past intervals; the other holds a pickled
record of events in the current interval.
We take some pains to avoid flushing the statistics when too
little time has passed. We only rotate an aggregated total to disk
when:
- An interval has passed since the last rotation time
AND
- We have accumulated events for at least 75% of an interval's
worth of time.
The second requirement prevents the following unpleasant failure mode:
- We set the interval to '1 day'. At midnight on Monday,
we rotate. At 00:05, we go down. At 23:55 we come back
up. At midnight at Tuesday, we noticing that it's been one
day since the last rotation, and rotate again -- thus making
a permanent record that reflects 10 minutes worth of traffic,
potentially exposing more about individual users than we should.
"""
### Fields:
# count: a map from event name -> argument|None -> total events received.
# lastRotation: the time at which we last flushed the log to disk and
# reset the log.
# filename, historyFile: Names of the pickled and long-term event logs.
# rotateInterval: Interval after which to flush the current statistics
# to disk.
# _lock: a threading.RLock object that must be held when modifying this
# object.
# accumulatedTime: number of seconds since last rotation that we have
# been logging events.
# lastSave: last time we saved the file.
### Pickled format:
# Map from {"count","lastRotation","accumulatedTime"} to the values
# for those fields.
def __init__(self, filename, historyFile, interval):
"""Initializes an EventLog that caches events in 'filename', and
periodically writes to 'historyFile' every 'interval' seconds."""
NilEventLog.__init__(self)
if os.path.exists(filename):
self.__dict__.update(readPickled(filename))
assert self.count is not None
assert self.lastRotation is not None
assert self.accumulatedTime is not None
for e in _EVENTS:
if not self.count.has_key(e):
self.count[e] = {}
else:
self.count = {}
for e in _EVENTS:
self.count[e] = {}
self.lastRotation = time()
self.accumulatedTime = 0
self.filename = filename
self.historyFilename = historyFile
for fn in filename, historyFile:
parent = os.path.split(fn)[0]
createPrivateDir(parent)
self.rotateInterval = interval
self.lastSave = time()
self._setNextRotation()
self._lock = RLock()
self.save()
def save(self, now=None):
"""Write the statistics in this log to disk, rotating if necessary."""
try:
self._lock.acquire()
self._save(now)
finally:
self._lock.release()
def _save(self, now=None):
"""Implements 'save' method. For internal use. Must hold self._lock
to invoke."""
LOG.debug("Syncing statistics to disk")
if not now: now = time()
tmpfile = self.filename + "_tmp"
tryUnlink(tmpfile)
self.accumulatedTime += int(now-self.lastSave)
self.lastSave = now
writePickled(self.filename, { 'count' : self.count,
'lastRotation' : self.lastRotation,
'accumulatedTime' : self.accumulatedTime,
})
def _log(self, event, arg=None):
try:
self._lock.acquire()
try:
self.count[event][arg] += 1
except KeyError:
try:
self.count[event][arg] = 1
except KeyError:
raise KeyError("No such event: %r" % event)
finally:
self._lock.release()
def getNextRotation(self):
return self.nextRotation
def rotate(self,now=None):
if now is None: now = time()
if now < self.nextRotation:
raise MixError("Not ready to rotate event stats")
try:
self._lock.acquire()
self._rotate(now)
finally:
self._lock.release()
def _rotate(self, now=None):
"""Flush all events since the last rotation to the history file,
and clears the current event log."""
# Must hold lock
LOG.debug("Flushing statistics log")
if now is None: now = time()
starting = not os.path.exists(self.historyFilename)
f = open(self.historyFilename, 'a')
if starting:
f.write(BOILERPLATE)
self.dump(f, now)
f.close()
self.count = {}
for e in _EVENTS:
self.count[e] = {}
self.lastRotation = now
self._save(now)
self.accumulatedTime = 0
self._setNextRotation(now)
def dump(self, f, now=None):
"""Write the current data to a file handle 'f'."""
if now is None: now = time()
try:
self._lock.acquire()
startTime = self.lastRotation
endTime = now
print >>f, "========== From %s to %s:" % (formatTime(startTime,1),
formatTime(endTime,1))
for event in _EVENTS:
count = self.count[event]
if len(count) == 0:
print >>f, " %s: 0" % event
continue
elif len(count) == 1 and count.keys()[0] is None:
print >>f, " %s: %s" % (event, count[None])
continue
print >>f, " %s:" % event
total = 0
args = count.keys()
args.sort()
length = max([ len(str(arg)) for arg in args ])
length = max((length, 10))
fmt = " %"+str(length)+"s: %s"
for arg in args:
v = count[arg]
if arg is None: arg = "{Unknown}"
print >>f, fmt % (arg, v)
total += v
print >>f, fmt % ("Total", total)
finally:
self._lock.release()
def _setNextRotation(self, now=None):
"""Helper function: calculate the time when we next rotate the log."""
# ???? Lock to 24-hour cycle
# This is a little weird. We won't save *until*:
# - .75 * rotateInterval seconds are accumulated.
# AND - rotateInterval seconds have elapsed since the last
# rotation.
#
# IF the rotation interval is divisible by one hour, we also
# round to the hour, up to 5 minutes down and 55 up.
if not now: now = time()
accumulatedTime = self.accumulatedTime + (now - self.lastSave)
secToGo = max(0, self.rotateInterval * 0.75 - accumulatedTime)
self.nextRotation = max(self.lastRotation + self.rotateInterval,
now + secToGo)
if self.nextRotation < now:
self.nextRotation = now
if (self.rotateInterval % 3600) == 0:
mid = previousMidnight(self.nextRotation)
rest = self.nextRotation - mid
self.nextRotation = mid + 3600 * floorDiv(rest+55*60, 3600)
def configureLog(config):
"""Given a configuration file, set up the log. May replace the log global
variable.
"""
global log
if config['Server']['LogStats']:
LOG.info("Enabling statistics logging")
statsfile = config.getStatsFile()
if not os.path.exists(os.path.split(statsfile)[0]):
# create parent if needed.
os.makedirs(os.path.split(statsfile)[0], 0700)
workfile = os.path.join(config.getWorkDir(), "stats.tmp")
log = EventLog(
workfile, statsfile, config['Server']['StatsInterval'].getSeconds())
import mixminion.MMTPClient
mixminion.MMTPClient.useEventStats()
LOG.info("Statistics logging enabled")
else:
log = NilEventLog()
LOG.info("Statistics logging disabled")
# Global | |
: {}".format(key,kwargs[key],rlist[key]))
st.log("\n")
for key in common_key_list:
if key in kwargs:
dict1[key] = kwargs[key]
del kwargs[key]
if no_common_key > 0 and ret_val1 is False:
st.error("DUT {} -> Match Not Found {}".format(dut,dict1))
return ret_val1
ret_val = "True"
#Converting all kwargs to list type to handle single or list of instances
for key in kwargs:
if type(kwargs[key]) is list:
kwargs[key] = list(kwargs[key])
else:
kwargs[key] = [kwargs[key]]
#convert kwargs into list of dictionary
input_dict_list =[]
for i in range(len(kwargs[kwargs.keys()[0]])):
temp_dict = {}
for key in kwargs.keys():
temp_dict[key] = kwargs[key][i]
input_dict_list.append(temp_dict)
for input_dict in input_dict_list:
entries = filter_and_select(output,None,match=input_dict)
if entries:
st.log("DUT {} -> Match Found {} ".format(dut,input_dict))
else:
st.error("DUT {} -> Match Not Found {}".format(dut,input_dict))
ret_val = False
return ret_val
def verify_bgp_l2vpn_evpn_vni(dut,**kwargs):
### NOT USED
"""
Author: <NAME> (<EMAIL>)
verify_bgp_l2vpn_evpn_vni(dut=dut1,vni="100",rd="11:11",type="L2",tenant_vrf="default",import_rt='20:20',export_rt='20:20',gw_macip="Enabled")
verify_bgp_l2vpn_evpn_vni(dut=dut1,vni="200",rd="15:15",type="L2",tenant_vrf="default",import_rt='5:5',export_rt='6:6',gw_macip="Enabled")
verify_bgp_l2vpn_evpn_vni(dut=dut1,vni=["100","200"],rd=["11:11","15:15"],type=["L2","L2"],tenant_vrf=["default","default"],import_rt=['20:20','5:5'],export_rt=['20:20','6:6'])
To verify bgp l2vpn evpn vni
:param dut:
:param vni:
:param type:
:param tenant_vrf:
:param rd:
:param bum_flooding:
:param all_vni_flag:
:param no_l2vni:
:param no_l3vni:
:param gw_macip:
:param import_rt:
:param export_rt:
:return:
:reteturn:
"""
output = st.show(dut,"show bgp l2vpn evpn vni",type="vtysh")
if len(output) == 0:
st.error("Output is Empty")
return False
count = 0
no_common_key = 0
ret_val1 = False
dict1 = {}
common_key_list = ['gw_macip','all_vni_flag','bum_flooding','no_l2vni','no_l3vni']
for key in kwargs:
if key in common_key_list:
no_common_key = no_common_key + 1
if no_common_key > 0:
rlist = output[0]
count = 0
for key in kwargs:
if rlist[key] == kwargs[key] and key in common_key_list:
count = count + 1
if no_common_key == count:
ret_val1 = True
for key in kwargs:
if key in common_key_list:
st.log("Match: Match key {} found => {} : {}".format(key,kwargs[key],rlist[key]))
else:
for key in kwargs:
if key in common_key_list:
if rlist[key] == kwargs[key]:
st.log("Match: Match key {} found => {} : {}".format(key,kwargs[key],rlist[key]))
else:
st.log("No-Match: Match key {} NOT found => {} : {}".format(key,kwargs[key],rlist[key]))
st.log("\n")
for key in common_key_list:
if key in kwargs:
dict1[key] = kwargs[key]
del kwargs[key]
if no_common_key > 0 and ret_val1 is False:
st.error("DUT {} -> Match Not Found {}".format(dut,dict1))
return ret_val1
ret_val = "True"
#Converting all kwargs to list type to handle single or list of instances
for key in kwargs:
if type(kwargs[key]) is list:
kwargs[key] = list(kwargs[key])
else:
kwargs[key] = [kwargs[key]]
#convert kwargs into list of dictionary
input_dict_list =[]
for i in range(len(kwargs[kwargs.keys()[0]])):
temp_dict = {}
for key in kwargs.keys():
temp_dict[key] = kwargs[key][i]
input_dict_list.append(temp_dict)
for input_dict in input_dict_list:
entries = filter_and_select(output,None,match=input_dict)
if entries:
st.log("DUT {} -> Match Found {} ".format(dut,input_dict))
else:
st.error("DUT {} -> Match Not Found {}".format(dut,input_dict))
ret_val = False
return ret_val
def verify_bgp_l2vpn_evpn_rd(dut,**kwargs):
"""
Author: <NAME> (<EMAIL>)
verify_bgp_l2vpn_evpn_rd(dut=dut1,evpn_type_5_prefix="[5]:[0]:[24]:[192.168.127.12]",rd="13:1",rd_name="as2",status_code="*>",metric="0",next_hop="0.0.0.0",weight="32768",origin_code="i",displayed_prefixes="1")
To verify bgp l2vpn evpn rd <rd-value>
:param dut:
:param evpn_type_2_prefix:
:param evpn_type_3_prefix:
:param evpn_type_4_prefix:
:param evpn_type_5_prefix:
:param rd:
:param rd_name:
:param status_code:
:param metric:
:param next_hop:
:param origin_code:
:param displayed_prefixes:
:param total_prefixes:
:return:
:reteturn:
"""
if 'rd' not in kwargs:
st.error("Mandetory arg rd is not present")
return False
output = st.show(dut,"show bgp l2vpn evpn rd {}".format(kwargs['rd']),type="vtysh")
if len(output) == 0:
st.error("Output is Empty")
return False
for a in output:
for key in a:
output[output.index(a)][key]=output[output.index(a)][key].lstrip()
output[output.index(a)][key]=output[output.index(a)][key].rstrip()
count = 0
no_common_key = 0
ret_val1 = False
dict1 = {}
common_key_list = ['rd_name','rd','displayed_prefixes','total_prefixes']
for key in kwargs:
if key in common_key_list:
no_common_key = no_common_key + 1
if no_common_key > 0:
rlist = output[0]
count = 0
for key in kwargs:
if rlist[key] == kwargs[key] and key in common_key_list:
count = count + 1
if no_common_key == count:
ret_val1 = True
for key in kwargs:
if key in common_key_list:
st.log("Match: Match key {} found => {} : {}".format(key,kwargs[key],rlist[key]))
else:
for key in kwargs:
if key in common_key_list:
if rlist[key] == kwargs[key]:
st.log("Match: Match key {} found => {} : {}".format(key,kwargs[key],rlist[key]))
else:
st.log("No-Match: Match key {} NOT found => {} : {}".format(key,kwargs[key],rlist[key]))
st.log("\n")
for key in common_key_list:
if key in kwargs:
dict1[key] = kwargs[key]
del kwargs[key]
if no_common_key > 0 and ret_val1 is False:
st.error("DUT {} -> Match Not Found {}".format(dut,dict1))
return ret_val1
ret_val = "True"
#Converting all kwargs to list type to handle single or list of instances
for key in kwargs:
if type(kwargs[key]) is list:
kwargs[key] = list(kwargs[key])
else:
kwargs[key] = [kwargs[key]]
#convert kwargs into list of dictionary
input_dict_list =[]
for i in range(len(kwargs[kwargs.keys()[0]])):
temp_dict = {}
for key in kwargs.keys():
temp_dict[key] = kwargs[key][i]
input_dict_list.append(temp_dict)
for input_dict in input_dict_list:
entries = filter_and_select(output,None,match=input_dict)
if entries:
st.log("DUT {} -> Match Found {} ".format(dut,input_dict))
else:
st.error("DUT {} -> Match Not Found {}".format(dut,input_dict))
ret_val = False
return ret_val
def verify_bgp_l2vpn_evpn_route_type_prefix(dut,**kwargs):
"""
Author: <NAME> (<EMAIL>)
verify_bgp_l2vpn_evpn_route_type_prefix(dut=dut1,evpn_type_5_prefix="[5]:[0]:[24]:[192.168.127.12]",rd="13:1",rd_name="as2",status_code="*>",metric="0",next_hop="0.0.0.0",weight="32768",origin_code="i",displayed_prefixes="1")
evpn.verify_bgp_l2vpn_evpn_route_type_prefix(dut=data.dut1,evpn_type_5_prefix="[5]:[0]:[24]:[192.168.127.12]",rd="13:2",status_code="*>",metric="0",next_hop="0.0.0.0",weight="0",path="20",origin_code="i",displayed_prefixes="4",no_of_paths="6")
To verify bgp l2vpn evpn route type prefix
:param dut:
:param evpn_type_5_prefix:
:param rd:
:param path:
:param status_code:
:param weight:
:param metric:
:param next_hop:
:param origin_code:
:param displayed_prefixes:
:param no_of_paths:
:return:
:reteturn:
"""
output = st.show(dut,"show bgp l2vpn evpn route type prefix",type="vtysh")
if len(output) == 0:
st.error("Output is Empty")
return False
count = 0
no_common_key = 0
ret_val1 = False
dict1 = {}
common_key_list = ['bgp_version','router_id','displayed_prefixes','no_of_paths']
for a in output:
for key in a:
output[output.index(a)][key]=output[output.index(a)][key].lstrip()
output[output.index(a)][key]=output[output.index(a)][key].rstrip()
for key in kwargs:
if key in common_key_list:
no_common_key = no_common_key + 1
if no_common_key > 0:
rlist = output[0]
count = 0
for key in kwargs:
if rlist[key] == kwargs[key] and key in common_key_list:
count = count + 1
if no_common_key == count:
ret_val1 = True
for key in kwargs:
if key in common_key_list:
st.log("Match: Match key {} found => {} : {}".format(key,kwargs[key],rlist[key]))
else:
for key in kwargs:
if key in common_key_list:
if rlist[key] == kwargs[key]:
st.log("Match: Match key {} found => {} : {}".format(key,kwargs[key],rlist[key]))
else:
st.log("No-Match: Match key {} NOT found => {} : {}".format(key,kwargs[key],rlist[key]))
st.log("\n")
for key in common_key_list:
if key in kwargs:
dict1[key] = kwargs[key]
del kwargs[key]
if no_common_key > 0 and ret_val1 is False:
st.error("DUT {} -> Match Not Found {}".format(dut,dict1))
return ret_val1
ret_val = "True"
#Converting all kwargs to list type to handle single or list of instances
for key in kwargs:
if type(kwargs[key]) is list:
kwargs[key] = list(kwargs[key])
else:
kwargs[key] = [kwargs[key]]
#convert kwargs into list of dictionary
input_dict_list =[]
for i in range(len(kwargs[kwargs.keys()[0]])):
temp_dict = {}
for key in kwargs.keys():
temp_dict[key] = kwargs[key][i]
input_dict_list.append(temp_dict)
for input_dict in input_dict_list:
entries = filter_and_select(output,None,match=input_dict)
if entries:
st.log("DUT {} -> Match Found {} ".format(dut,input_dict))
else:
st.error("DUT {} -> Match Not Found {}".format(dut,input_dict))
ret_val = False
return ret_val
def create_overlay_intf(dut, vtep_name, ip_addr, config='yes', skip_error=False, cli_type=''):
"""
purpose:
This definition is used to create overlay interface
Arguments:
:param dut: device to be configured
:type dut: string
:param vtep_name: VTEP name to be created
:type vtep_name: string
:param ip_addr: ip address to be bound to overlay gateway
:type ip_addr: string
:param config: it takes value as 'yes' or 'no' to configure or remove overlay respectively
:type config: string
:param : cli_type
:return: None
usage:
create_overlay_intf(dut1, "dut1VTEP", "1.1.1.1", cli_type='click')
create_overlay_intf(dut1, "dut1VTEP", "1.1.1.1", config='no', cli_type='klish')
Created by: Julius <<EMAIL>
"""
cli_type = st.get_ui_type(dut,cli_type=cli_type)
if config == 'yes':
conf_str = ''
action = 'add'
else:
conf_str = 'no'
ip_addr = ''
action = 'del'
if cli_type == 'click':
command = "config vxlan {} {} {}".format(action, vtep_name, ip_addr)
elif cli_type == 'klish':
command = []
command.append('interface vxlan {}'.format(vtep_name))
command.append('{} source-ip {}'.format(conf_str, ip_addr))
command.append('exit')
elif cli_type in ["rest-put", "rest-patch"]:
rest_urls = st.get_datastore(dut, "rest_urls")
if config == 'yes':
url = rest_urls['config_vxlan_with_ip']
payload = { "openconfig-interfaces:interface":
[ { "name": vtep_name,
"config": { "name": vtep_name, "type": "IF_NVE" },
"openconfig-vxlan:vxlan-if": { "config": { "source-vtep-ip": ip_addr } }
} ]
}
### PUT and PATCH doesn't work for this URI hence use POST
### PUT and PATCH URIs does config similar to klish clis
if not config_rest(dut, http_method='post', rest_url=url, json_data=payload):
st.banner('FAIL-OCYANG: Create Vxlan Interface with src vtep IP failed')
return False
else:
url = rest_urls['delete_vxlan_ip'].format(vtep_name)
if not delete_rest(dut, rest_url=url):
st.banner('FAIL-OCYANG')
url = rest_urls['delete_vxlan'].format(vtep_name)
if not delete_rest(dut, rest_url=url):
st.banner('FAIL-OCYANG')
return
else:
st.error("Invalid CLI type - {}".format(cli_type))
return False
st.debug(command)
return st.config(dut, command, type=cli_type, skip_error_check=skip_error)
def create_evpn_instance(dut, nvo_name, vtep_name, config='yes', skip_error=False, cli_type=''):
"""
purpose:
This definition is used to create EVPN instance
Arguments:
:param dut: device to be configured
:type | |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The new artman CLI with the following syntax.
artman [Options] generate|publish <artifact_name>
.. note::
Only local execution is supported at this moment. The CLI syntax is
beta, and might have changes in the future.
"""
from __future__ import absolute_import
from logging import DEBUG, INFO
import argparse
from distutils.dir_util import copy_tree
import io
import os
import pprint
import subprocess
import sys
from ruamel import yaml
from taskflow import engines
from artman.config import converter, loader
from artman.config.proto.config_pb2 import Artifact, Config
from artman.cli import support
from artman.pipelines import pipeline_factory
from artman.utils import config_util
from artman.utils.logger import logger, setup_logging
ARTMAN_DOCKER_IMAGE = 'googleapis/artman:0.4.16'
RUNNING_IN_ARTMAN_DOCKER_TOKEN = 'RUNNING_IN_ARTMAN_DOCKER'
def main(*args):
"""Main method of artman."""
# If no arguments are sent, we are using the entry point; derive
# them from sys.argv.
if not args:
args = sys.argv[1:]
# Get to a normalized set of arguments.
flags = parse_args(*args)
user_config = read_user_config(flags)
_adjust_input_dir(flags.input_dir)
pipeline_name, pipeline_kwargs = normalize_flags(flags, user_config)
if flags.local:
pipeline = pipeline_factory.make_pipeline(pipeline_name, False,
**pipeline_kwargs)
# Hardcoded to run pipeline in serial engine, though not necessarily.
engine = engines.load(
pipeline.flow, engine='serial', store=pipeline.kwargs)
engine.run()
_change_owner(flags, pipeline_name, pipeline_kwargs)
else:
support.check_docker_requirements(flags.image)
# Note: artman currently won't work if input directory doesn't contain
# shared configuration files (e.g. gapic/packaging/dependencies.yaml).
# This will make artman less useful for non-Google APIs.
# TODO(ethanbao): Fix that by checking the input directory and
# pulling the shared configuration files if necessary.
logger.info('Running artman command in a Docker instance.')
_run_artman_in_docker(flags)
def _adjust_input_dir(input_dir):
""""Adjust input directory to use versioned common config and/or protos.
Currently che codegen has coupling with some shared configuration yaml
under under {googleapis repo}/gapic/[core,lang,packaging], causing library
generation to fail when a breaking change is made to such shared
configuration file. This delivers a poor user experience to artman
users, as their library generation could fail without any change at API
proto side.
Similarily, some common protos will be needed during protoc
compilation, but is not provided by users in some cases. When such shared
proto directories are not provided, copy and use the versioned ones.
TODO(ethanbao): Remove the config copy once
https://github.com/googleapis/toolkit/issues/1450 is fixed.
"""
if os.getenv(RUNNING_IN_ARTMAN_DOCKER_TOKEN):
# Only doing this when running inside Docker container
common_config_dirs = [
'gapic/core',
'gapic/lang',
'gapic/packaging',
]
common_proto_dirs = [
'google/api',
'google/iam/v1',
'google/longrunning',
'google/rpc',
'google/type',
]
for src_dir in common_config_dirs:
# /googleapis is the root of the versioned googleapis repo
# inside Artman Docker image.
copy_tree(os.path.join('/googleapis', src_dir),
os.path.join(input_dir, src_dir))
for src_dir in common_proto_dirs:
if not os.path.exists(os.path.join(input_dir, src_dir)):
copy_tree(os.path.join('/googleapis', src_dir),
os.path.join(input_dir, src_dir))
def parse_args(*args):
parser = argparse.ArgumentParser()
parser.add_argument(
'--config',
type=str,
default='artman.yaml',
help='[Optional] Specify path to artman config yaml, which can be '
'either an absolute path, or a path relative to the input '
'directory (specified by `--input-dir` flag). Default to '
'`artman.yaml`', )
parser.add_argument(
'--output-dir',
type=str,
default='./artman-genfiles',
help='[Optional] Directory to store output generated by artman. '
'Default to `./artman-genfiles`', )
parser.add_argument(
'--input-dir',
type=str,
default='',
help='[Optional] Directory with all input that is needed by artman, '
'which include but not limited to API protos, service config '
'yaml, and GAPIC config yaml. Default to the settings from user '
'configuration.',
)
parser.add_argument(
'-v',
'--verbose',
action='store_const',
const=10,
default=None,
dest='verbosity',
help='Show verbose / debug output.', )
parser.add_argument(
'--user-config',
default='~/.artman/config.yaml',
help='[Optional] User configuration file to stores credentials like '
'GitHub credentials. Default to `~/.artman/config.yaml`', )
parser.add_argument(
'--local',
dest='local',
action='store_true',
help='[Optional] If specified, running the artman on the local host '
'machine instead of artman docker instance that have all binaries '
'installed. Note: one will have to make sure all binaries get '
'installed on the local machine with this flag, a full list can '
'be found at '
'https://github.com/googleapis/artman/blob/master/Dockerfile', )
parser.set_defaults(local=False)
parser.add_argument(
'--image',
default=ARTMAN_DOCKER_IMAGE,
help=('[Optional] Specify docker image used by artman when running in '
'a Docker instance. Default to `%s`' % ARTMAN_DOCKER_IMAGE)),
# Add sub-commands.
subparsers = parser.add_subparsers(
dest='subcommand', help='Support [generate|publish] sub-commands')
# `generate` sub-command.
parser_generate = subparsers.add_parser(
'generate', help='Generate artifact')
parser_generate.add_argument(
'artifact_name',
type=str,
help='[Required] Name of the artifact for artman to generate. Must '
'match an artifact in the artman config yaml.')
# `publish` sub-command.
parser_publish = subparsers.add_parser('publish', help='Publish artifact')
parser_publish.add_argument(
'artifact_name',
type=str,
help='[Required] Name of the artifact for artman to generate. Must '
'match an artifact in the artman config yaml.')
parser_publish.add_argument(
'--target',
type=str,
default=None,
required=True,
help='[Required] Specify where the generated artifact should be '
'published to. It is defined as publishing targets in artman '
'config at artifact level.', )
parser_publish.add_argument(
'--github-username',
default=None,
help='[Optional] The GitHub username. Must be set if publishing the '
'artifact to github, but can come from the user config file.', )
parser_publish.add_argument(
'--github-token',
default=None,
help='[Optional] The GitHub personal access token. Must be set if '
'publishing the artifact to github, but can come from the user '
'config file.', )
parser_publish.add_argument(
'--dry-run',
dest='dry_run',
action='store_true',
help='[Optional] When specified, artman will skip the remote '
'publishing step.', )
parser_publish.set_defaults(dry_run=False)
return parser.parse_args(args=args)
def read_user_config(flags):
"""Read the user config from disk and return it.
Args:
flags (argparse.Namespace): The flags from sys.argv.
Returns:
dict: The user config.
"""
# Load the user configuration if it exists and save a dictionary.
user_config = {}
user_config_file = os.path.realpath(os.path.expanduser(flags.user_config))
if os.path.isfile(user_config_file):
with io.open(user_config_file) as ucf:
user_config = yaml.load(ucf.read(), Loader=yaml.Loader) or {}
# Sanity check: Is there a configuration? If not, abort.
if not user_config:
setup_logging(INFO)
logger.critical('No user configuration found.')
logger.warn('This is probably your first time running Artman.')
logger.warn('Run `configure-artman` to get yourself set up.')
sys.exit(64)
# Done; return the user config.
return user_config
def normalize_flags(flags, user_config):
"""Combine the argparse flags and user configuration together.
Args:
flags (argparse.Namespace): The flags parsed from sys.argv
user_config (dict): The user configuration taken from
~/.artman/config.yaml.
Returns:
tuple (str, dict): 2-tuple containing:
- pipeline name
- pipeline arguments
"""
if flags.input_dir:
flags.input_dir = os.path.abspath(flags.input_dir)
flags.output_dir = os.path.abspath(flags.output_dir)
flags.config = os.path.abspath(flags.config)
pipeline_args = {}
# Determine logging verbosity and then set up logging.
verbosity = support.resolve('verbosity', user_config, flags, default=INFO)
setup_logging(verbosity)
# Save local paths, if applicable.
# This allows the user to override the path to api-client-staging or
# toolkit on his or her machine.
pipeline_args['local_paths'] = support.parse_local_paths(
user_config, flags.input_dir)
# Save the input directory back to flags if it was not explicitly set.
if not flags.input_dir:
flags.input_dir = pipeline_args['local_paths']['googleapis']
artman_config_path = flags.config
if not os.path.isfile(artman_config_path):
logger.error(
'Artman config file `%s` doesn\'t exist.' % artman_config_path)
sys.exit(96)
try:
artifact_config = loader.load_artifact_config(
artman_config_path, flags.artifact_name, flags.input_dir)
except ValueError as ve:
logger.error('Artifact config loading failed with `%s`' % ve)
sys.exit(96)
# If we were given just an API or BATCH, then expand it into the --config
# syntax.
shared_config_name = 'common.yaml'
if artifact_config.language in (Artifact.RUBY, Artifact.NODEJS,):
shared_config_name = 'doc.yaml'
legacy_config_dict = converter.convert_to_legacy_config_dict(
artifact_config, flags.input_dir, flags.output_dir)
logger.debug('Below is the legacy config after conversion:\n%s' %
pprint.pformat(legacy_config_dict))
tmp_legacy_config_yaml = '%s.tmp' % artman_config_path
with io.open(tmp_legacy_config_yaml, 'w') as outfile:
yaml.dump(legacy_config_dict, outfile, default_flow_style=False)
googleapis = os.path.realpath(
os.path.expanduser(
pipeline_args['local_paths']['googleapis'], ))
config = ','.join([
'{artman_config_path}',
'{googleapis}/gapic/lang/{shared_config_name}',
]).format(
artman_config_path=tmp_legacy_config_yaml,
googleapis=googleapis,
shared_config_name=shared_config_name,
)
language = Artifact.Language.Name(
artifact_config.language).lower()
# Set the pipeline as well as package_type and packaging
artifact_type = artifact_config.type
if artifact_type in (Artifact.GAPIC, Artifact.GAPIC_ONLY):
pipeline_name = 'GapicClientPipeline'
pipeline_args['language'] = language
elif artifact_type in (Artifact.GRPC, Artifact.GRPC_COMMON):
pipeline_name = 'GrpcClientPipeline'
pipeline_args['language'] = language
elif artifact_type == Artifact.GAPIC_CONFIG:
pipeline_name = 'GapicConfigPipeline'
else:
raise ValueError('Unrecognized artifact.')
# Parse out the full configuration.
# Note: the var replacement is still needed because they are still being
# used in some shared/common config yamls.
config_sections = ['common']
for config_spec in config.split(','):
config_args = config_util.load_config_spec(
config_spec=config_spec,
config_sections=config_sections,
repl_vars={
k.upper(): v
for k, v in pipeline_args['local_paths'].items()
},
language=language, )
pipeline_args.update(config_args)
# Setup publishing related config if needed.
if flags.subcommand | |
"warmness": 1.5,
"flunkers": -1.6,
"kindnesses": 2.3,
"tout": -0.5,
"disappointment": -2.3,
"dwellers": -0.3,
"sunnier": 2.3,
"cornered": -1.1,
"despaired": -2.7,
"}:": -2.1,
"accusation": -1.0,
"grants": 0.9,
"luckless": -1.3,
"freebooter": -1.7,
"cancel": -1.0,
"disliking": -1.3,
"intimidators": -1.6,
"capable": 1.6,
"intimidatory": -1.1,
"joyfuller": 2.4,
"killjoys": -1.7,
"adorations": 2.2,
"vitalization": 1.6,
"devilish": -2.1,
"optimality": 1.9,
"freethinking": 1.1,
"isolator": -0.4,
"apologised": 0.4,
"diviner": 0.3,
"divines": 0.8,
"invigorate": 1.9,
"inadequacies": -1.7,
"sobbed": -1.9,
"divined": 0.8,
"apologises": 0.8,
"promising": 1.7,
"cancelled": -1.0,
"cock": -0.6,
"huckster": -0.9,
"rotgl": 2.9,
"blamelessly": 0.9,
"devotees": 0.5,
"sincere": 1.7,
"dynamos": 0.3,
"strain": -0.2,
"sillily": -0.1,
"dangerously": -2.0,
"obnoxious": -2.0,
"abhors": -2.9,
"pissers": -1.4,
"funnelled": -0.1,
"harsh": -1.9,
"pay": -0.4,
"nicety": 1.2,
"heartbreak": -2.7,
"arguments": -1.7,
"struggling": -1.8,
"exhaust": -1.2,
"inadequatenesses": -1.6,
"bittered": -1.8,
"repressurized": 0.1,
"sullen": -1.7,
"killock": -0.3,
"repressurizes": 0.1,
"bitterer": -1.9,
"dumpers": -0.8,
"amazes": 2.2,
"disregards": -1.4,
"weirdies": -1.0,
"solve": 0.8,
"easing": 1.0,
"amazed": 2.2,
"diffident": -1.0,
"fume": -1.2,
"pissing": -1.7,
"shying": -0.9,
"gratins": 0.2,
"clueless": -1.5,
"cancer": -3.4,
"grating": -0.4,
"dullards": -1.8,
"tenses": -0.9,
"stinking": -2.4,
"defectively": -2.1,
"inhibition": -2,
"disadvantageous": -1.8,
"disappointed": -2.1,
"grim": -2.7,
"grin": 2.1,
"distrustful": -2.1,
"possessive": -0.9,
"gossipy": -1.3,
"terrorize": -3.3,
"gossips": -1.3,
"gratifyingly": 2.0,
"laughing": 2.2,
"hallelujah": 3.0,
"passionate": 2.4,
"amortizing": 0.8,
"niceties": 1.5,
"hysterics": -1.8,
"obsessions": -0.9,
"smiles": 2.1,
"short-sightedness": -1.1,
"absolve": 1.2,
"ecstatics": 2.9,
"smarting": -0.7,
"gross": -2.1,
"honestest": 3.0,
"skepticisms": -1.2,
"honorifics": 1.7,
"critical": -1.3,
"mwah": 2.5,
"apocalyptic": -3.4,
"securement": 1.1,
":'-(": -2.4,
":'-)": 2.7,
"broken": -2.1,
"deviling": -2.2,
"sillimanites": 0.2,
"strangled": -2.5,
"giggle": 1.8,
"trivialization": -0.9,
"contagion": -2.0,
"smear": -1.5,
"violence": -3.1,
"avoidances": -1.1,
"giggly": 1.0,
"blessed": 2.9,
"blesses": 2.6,
"blesser": 2.6,
"pleasantry": 2.0,
"sadder": -2.4,
"|o:": -0.9,
"distracted": -1.4,
"totalitarian": -2.1,
"madder": -1.2,
"decisive": 0.9,
"hysterical": -0.1,
"obsessional": -1.5,
"pretends": -0.4,
"hesitance": -0.9,
"unsettled": -1.3,
"stressors": -2.1,
"strikes": -1.5,
"sophisticated": 2.6,
"shakers": 0.3,
"hesitancy": -0.9,
"harmfulness": -2.6,
"overreacts": -2.2,
"freers": 1.0,
"crazinesses": -1.0,
"eagers": 1.6,
"grouch": -2.2,
"deadlock": -1.4,
"powerless": -2.2,
"affection": 2.4,
"pessimisms": -2.0,
"leaked": -1.3,
"resolving": 1.6,
"censor": -2.0,
"ignorami": -1.9,
"casualty": -2.4,
"ignoramuses": -2.3,
"strikers": -0.6,
"pensive": 0.3,
"goddam": -2.5,
"short-sighted": -1.2,
"effin": -2.3,
"winnowers": -0.2,
"tragics": -2.2,
"victimize": -2.5,
"lousiest": -2.6,
"obsessed": -0.7,
"lowlanders": -0.3,
"rigidify": -0.3,
"pukes": -1.9,
"chucklers": 1.2,
"important": 0.8,
"puked": -1.8,
"awkwardly": -1.3,
"pitifully": -1.2,
"assets": 0.7,
"inhibited": -0.4,
"alright": 1.0,
"funnyman": 1.4,
"burdening": -1.4,
"liar": -2.3,
"forget": -0.9,
"vile": -3.1,
"forbidden": -1.8,
"bravest": 2.3,
"postponing": -0.5,
"forbidder": -1.6,
"ignorantly": -1.6,
"reluctant": -1.0,
"surprisals": 0.7,
"shattered": -2.1,
"lowlihead": -0.3,
"tranquillity": 1.8,
"determinableness": 0.2,
"enjoying": 2.4,
"distorting": -1.1,
"carefree": 1.7,
"elated": 3.2,
"trustful": 2.1,
"benevolent": 2.7,
"sluttishnesses": -2.0,
"mistaking": -1.1,
"enrapture": 3.0,
"vigors": 1.0,
"defeatist": -1.7,
"appreciating": 1.9,
"defence": 0.4,
"sweetly": 2.1,
"skeptic": -0.9,
"harmonizes": 1.5,
"harmonizer": 1.6,
"worrisome": -1.7,
"shakedowns": -1.4,
"harmonized": 1.6,
"outgoing": 1.2,
"solemnization": 0.7,
"disastrous": -2.9,
"gga": 1.7,
"affectionately": 2.2,
"peacekeepers": 1.6,
"mourning": -1.9,
"uglification": -2.2,
"antagonizing": -2.7,
"vigours": 0.4,
"falsified": -1.6,
"fwb": 2.5,
"misery": -2.7,
"pleasantnesses": 2.3,
"misers": -1.5,
"whimsical": 0.3,
"cowardly": -1.6,
"emptied": -0.7,
"unapproved": -1.4,
"empties": -0.7,
"emptier": -0.7,
"arrogance": -2.4,
"deprival": -2.1,
"libertine": -0.9,
"puke": -2.4,
"impressionable": 0.2,
"diamond": 1.4,
"tenderize": 0.1,
"handsomely": 1.9,
"brisk": 0.6,
"woeful": -1.9,
"apprehensions": -0.9,
"magnific": 2.3,
"reeker": -1.7,
"dazes": -0.3,
"sucky": -1.9,
"maniac": -2.1,
"sucks": -1.5,
"discomforted": -1.6,
"sarcasms": -0.9,
"dazed": -0.7,
"abandon": -1.9,
"stubborn": -1.7,
"comedones": -0.8,
"gravels": -0.5,
"bravely": 2.3,
"virtuous": 2.4,
"gravely": -1.5,
"share": 1.2,
"collision": -1.5,
"gracefuller": 2.2,
"tolerant": 1.1,
"unappreciated": -1.7,
"fatiguingly": -1.5,
"wisely": 1.8,
"drained": -1.5,
"lousy": -2.5,
"needy": -1.4,
"comfort": 1.5,
"0;^)": 1.6,
"profiting": 1.6,
"amusingness": 1.8,
"charms": 1.9,
"divinities": 1.8,
"questioning": -0.4,
"punishabilities": -1.7,
"champac": -0.2,
"bitchiest": -3.0,
"dumbhead": -2.6,
"gravelly": -0.9,
"toughening": 0.9,
"gracefullest": 2.8,
"spiteful": -1.9,
"pleasure": 2.7,
"playing": 0.8,
"idealisms": 0.8,
"heartfelt": 2.5,
"invigoratingly": 2.0,
"importancies": 0.4,
"defencemen": 0.6,
"suffer": -2.5,
"energetics": 0.3,
"arrogant": -2.2,
"destructively": -2.4,
"messed": -1.4,
"comedic": 1.7,
"thrilling": 2.1,
"energies": 0.9,
"resentments": -1.9,
"impressiveness": 1.7,
"worshiping": 1.0,
"triumphantly": 2.3,
"hurting": -1.7,
"good": 1.9,
"egotistically": -1.8,
"boldfacing": 0.1,
"strongmen": 0.5,
"<3": 1.9,
"smothering": -1.4,
"invigoration": 1.5,
"prejudices": -1.8,
"wickeder": -2.2,
"promisors": 0.4,
"prejudiced": -1.9,
"complain": -1.5,
"easily": 1.4,
"dominances": -0.1,
"satisfactoriness": 1.5,
"heroines": 1.8,
"honoraries": 1.5,
"harm": -2.5,
"dumbbells": -0.2,
"energization": 1.6,
"energy": 1.1,
"hard": -0.4,
"winnow": -0.3,
"champing": 0.7,
"engaging": 1.4,
"suspecting": -0.7,
"creationists": 0.5,
"gloominess": -1.8,
"worshipers": 0.9,
"lovers": 2.4,
"creditor": -0.1,
"childish": -1.2,
"discouraging": -1.9,
"numbingly": -1.3,
"stinkier": -1.5,
"popularizers": 1.0,
"bully": -2.2,
"pathetic": -2.7,
"commits": 0.1,
"motherfucker": -3.6,
"popularization": 1.3,
"pleasant": 2.3,
"difficulty": -1.4,
"mmk": 0.6,
"backed": 0.1,
"laughable": 0.2,
"devilishly": -1.6,
"profitabilities": 1.1,
"benefits": 1.6,
"trickly": -0.3,
"n00b": -1.6,
"ruination": -2.7,
"dissatisfy": -2.2,
"excitonic": 0.2,
"stabs": -1.9,
"partiers": 0.7,
"mistakenly": -1.2,
"disregarded": -1.6,
"humourous": 2.0,
"fysa": 0.4,
"inhibits": -0.9,
"neglects": -2.2,
"hatefulness": -3.6,
"bullied": -3.1,
"faultlessly": 2.0,
"kinder": 2.2,
"killie": -0.1,
"revive": 1.4,
"182": -2.9,
"teasers": -0.7,
"187": -3.1,
"pains": -1.8,
"favorably": 1.6,
"beneficences": 1.5,
"excels": 2.5,
"favorable": 2.1,
"vigorousness": 0.4,
"compliments": 1.7,
"resentencing": 0.2,
"spirited": 1.3,
"adverseness": -0.6,
"tensionless": 0.6,
"hooligans": -1.1,
"traumatise": -2.8,
"solemnizing": -0.6,
"((-:": 2.1,
"indecisivenesses": -0.9,
"sluttishness": -2.5,
"traumatism": -2.4,
"maniacally": -1.7,
"uninvolving": -2.0,
"traumatically": -2.8,
"disgustful": -2.6,
"amorality": -1.5,
"honors": 2.3,
"thrillingly": 2.0,
"serene": 2.0,
"easygoing": 1.3,
"fumeless": 0.3,
"dumpiness": -1.2,
"popularising": 1.2,
"charm": 1.7,
"significant": 0.8,
"engagements": 0.6,
"giggling": 1.5,
"squelched": -1.0,
"kk": 1.5,
"bastardizing": -2.3,
"loners": -0.9,
"charitably": 1.4,
"faithless": -1.0,
"charitable": 1.7,
"dodging": -0.4,
"rebels": -0.8,
"disturbances": -1.4,
"apathies": -0.6,
"hugging": 1.8,
"wasted": -2.2,
"acceptability": 1.1,
"carefully": 0.5,
"fine": 0.8,
"devilries": -1.6,
"nervous": -1.1,
"ruin": -2.8,
"rotten": -2.3,
"unhappy": -1.8,
"freedoms": 1.2,
"spitefully": -2.3,
"amorino": 1.2,
"petrifaction": -1.9,
"worships": 1.4,
"devastate": -3.1,
"express": 3.1,
"grinners": 1.6,
"bitterish": -1.6,
"shylock": -2.1,
"destructiveness": -2.4,
"resolve": 1.6,
"kissable": 2.0,
"contagious": -1.4,
"jollying": 2.3,
"safeguarding": 1.1,
"beautified": 2.1,
"cynical": -1.6,
"devils": -2.7,
"weirdy": -0.9,
"amorphously": 0.1,
"beautifier": 1.7,
"beautifies": 1.8,
"lowish": -0.9,
"brilliants": 1.9,
"petrify": -1.7,
"feared": -2.2,
"interestedly": 1.5,
"pitifuller": -1.8,
"positivity": 2.3,
"please": 1.3,
"weirds": -0.6,
"beautification": 1.9,
"easeful": 1.5,
"distraught": -2.6,
"forbiddingly": -1.9,
"vindicated": 1.8,
"egotist": -2.3,
"pisser": -2.0,
"pisses": -1.4,
"defeaters": -0.9,
"devilments": -1.1,
"trustingly": 1.6,
"vindicates": 1.6,
"inspirators": 1.2,
"egotism": -1.4,
"pissed": -3.2,
"glorification": 2.0,
"idealless": -1.7,
"carelessness": -1.4,
"alol": 2.8,
"spiritless": -1.3,
"weirdo": -1.8,
"faggots": -3.2,
"humoured": 1.1,
"smother": -1.8,
"panicking": -1.9,
"eviction": -2.0,
"unsuccessful": -1.5,
"openness": 1.4,
"cheerfuller": 1.9,
"create": 1.1,
"appall": -2.4,
"laoj": 1.3,
"faithfully": 1.8,
"splendidness": 2.3,
"freelancing": 0.4,
"honorary": 1.4,
"cruelty": -2.9,
"boreens": 0.2,
"hesitancies": -1.0,
"weakeners": -1.3,
"gag": -1.4,
"attractors": 1.2,
"misunderstands": -1.3,
"solid": 0.6,
"fud": -1.1,
"teaspoonful": 0.2,
"sobs": -2.5,
"fun": 2.3,
"wiselier": 0.9,
"joyousness": 2.8,
"heroically": 2.4,
"violently": -2.8,
"(8": 2.6,
"stubbornly": -1.4,
"perverseness": -2.1,
"solemnify": 0.3,
"profiteered": -0.5,
"disillusioning": -1.3,
"tranquilness": 1.5,
"perpetrator": -2.2,
"tenser": -1.5,
"engrossed": 0.6,
"benign": 1.3,
"freebasing": -0.4,
"lenient": 1.1,
"brutalised": -2.9,
"inhibitive": -1.4,
"absolves": 1.3,
"sillimanite": 0.1,
"praising": 2.5,
")-':": -2.1,
"absolved": 1.5,
"brutalises": -3.2,
"timid": -1.0,
"whores": -3.0,
"smothers": -1.9,
"tensed": -1.0,
"trustiest": 2.2,
"smothery": -1.1,
"whored": -2.8,
"landmark": 0.3,
"trivialising": -1.4,
"absolving": 1.6,
"destructions": -2.3,
"ludicrous": -1.5,
"adventuress": 0.8,
"distracting": -1.2,
"irritable": -2.1,
"miserliness": -2.6,
"cut": -1.1,
"pleasuring": 2.8,
"hated": -3.2,
"excites": 2.1,
"exciter": 1.9,
"passionless": -1.9,
"livelily": 1.8,
"scarers": -1.3,
"tremulous": -1.0,
"excited": 1.4,
"hates": -1.9,
"hater": -1.8,
"ridiculously": -1.4,
"adversaries": -1.0,
"jerked": -0.8,
"matters": 0.1,
"wisenheimer": -1.0,
"loyalism": 1.0,
"darling": 2.8,
"gossiper": -1.1,
"neatest": 1.7,
"loyalist": 1.5,
"foolish": -1.1,
"grossest": -2.1,
"triumphalists": 0.9,
"humorous": 1.6,
"magnificent": 2.9,
"strongest": 1.9,
"insincere": -1.8,
"triumphing": 2.3,
"positivenesses": 2.2,
"jaded": -1.6,
"killifishes": -0.1,
"depressions": -2.2,
"peculiarly": -0.4,
"virtues": 1.5,
"optimize": 2.2,
"blameworthiness": -1.6,
"idealess": -1.9,
"seriousness": -0.2,
"gratings": -0.8,
"freakout": -1.8,
"heartbreakers": -2.1,
"funniness": 1.8,
"harassed": -2.5,
"abusive": -3.2,
"comedienne": 0.6,
"harasses": -2.5,
"harasser": -2.4,
"trustingness": 1.6,
"aug-00": 0.3,
"calming": 1.7,
"bores": -1.3,
"borer": -0.4,
"infected": -2.2,
"bored": -1.1,
"grimier": -1.6,
"weaker": -1.9,
"invite": 0.6,
"partyer": 1.2,
"||-:": -2.3,
"hoped": 1.6,
"generosities": 2.6,
"hopes": 1.8,
"lowlight": -2.0,
"freemasonries": 0.7,
"laziest": -2.7,
"rejection": -2.5,
"exploration": 0.9,
"aversion": -1.9,
"repulse": -2.8,
"devastating": -3.3,
"lowrider": -0.2,
"deprive": -2.1,
"nbif": -0.5,
"lya": 3.3,
"lyb": 3.0,
"lyl": 3.1,
"criticizers": -1.6,
| |
"nsf_plan_good": {
"description": "Is the plan good? Only used if agency is nsf",
"required": False,
"type": "list",
},
"nsf_pot_to_advance_knowledge": {
"description": "Answer to the question how the work will advance"
"knowledge. Only used if agency is nsf",
"required": False,
"type": "list",
},
"nsf_pot_to_benefit_society": {
"description": "Answer to the question how the work has the potential"
"to benefit society. Only used if agency is nsf",
"required": False,
"type": "list",
},
"requester": {
"description": "Name of the program officer who requested the review",
"required": True,
"type": "string",
},
"reviewer": {
"description": "short name of the reviewer. Will be used in the "
"filename of the resulting text file",
"required": True,
"type": "string",
},
"status": {
"description": "the status of the review",
"type": "string",
"eallowed": REVIEW_STATI,
},
"summary": {
"description": "Summary statement",
"required": True,
"type": "string",
},
"title": {
"description": "The title of the proposal",
"required": True,
"type": "string",
},
"year": {
"description": "The year the review was submitted",
"required": True,
"type": "integer",
},
},
"proposals": {
"_description": {
"description": "This collection represents proposals that have "
"been submitted by the group."
},
"_id": {
"description": "short representation, such as this-is-my-name",
"required": True,
"type": ("string", "integer", "float"),
},
"amount": {
"description": "value of award",
"required": True,
"type": ("integer", "float"),
},
"authors": {
"description": "other investigator names",
"required": False,
"anyof_type": ["list", "string"],
},
"begin_date": {
"description": "start date of the proposed grant in format YYYY-MM-DD",
"required": False,
"anyof_type": ["string", "date"]
},
"begin_day": {
"description": "start day of the proposed grant",
"required": False,
"type": "integer",
},
"begin_month": {
"description": "start month of the proposed grant",
"required": False,
"anyof_type": ["string", "integer"]
},
"begin_year": {
"description": "start year of the proposed grant",
"required": False,
"type": "integer",
},
"call_for_proposals": {
"description": "",
"required": False,
"type": "string",
},
"cpp_info": {
"description": "extra information needed for building current and "
"pending form ",
"required": False,
"schema": {
"cppflag": {"required": False, "type": "boolean"},
"other_agencies_submitted": {"required": False,
"anyof_type": ["string", "boolean"]},
"institution": {"required": False, "type": "string",
"description": "place where the proposed grant will be located"},
"person_months_academic": {"required": False,
"anyof_type": ["float", "integer"]},
"person_months_summer": {"required": False,
"anyof_type": ["float", "integer"]},
"project_scope": {"required": False, "type": "string"},
"single_pi": {"required": False, "type": "boolean",
"description": "set to true if there are no co-pi's"},
},
"type": "dict",
},
"currency": {
"description": "typically '$' or 'USD'",
"required": True,
"type": "string",
},
"due_date": {
"description": "day that the proposal is due",
"required": False,
"anyof_type": ["string", "date"],
},
"duration": {
"description": "number of years",
"required": False,
"type": ("integer", "float"),
},
"end_date": {
"description": "end date of the proposed grant in format YYYY-MM-DD",
"required": False,
"anyof_type": ["string", "date"]
},
"end_day": {
"description": "end day of the proposed grant",
"required": False,
"type": ("string", "integer"),
},
"end_month": {
"description": "end month of the proposed grant",
"required": False,
"anyof_type": ["string", "integer"]
},
"end_year": {
"description": "end year of the proposed grant",
"required": False,
"type": "integer",
},
"funder": {
"description": "who will fund the proposal"
"as funder in grants",
"required": False,
"type": "string",
},
"full": {
"description": "full body of the proposal",
"required": False,
"type": "dict",
},
"notes": {
"description": "anything you want to note",
"required": False,
"anyof_type": ["string", "list"],
},
"pi": {
"description": "principal investigator name",
"required": True,
"type": "string",
},
"pre": {
"description": "Information about the pre-proposal",
"required": False,
"type": "dict",
},
"status": {
"description": "e.g. 'pending', 'accepted', 'declined'",
"required": True,
"type": "string",
"eallowed": PROPOSAL_STATI,
},
"submitted_date": {
"description": "date that the proposal was submitted",
"required": False,
"anyof_type": ["string", "date"],
},
"submitted_day": {
"description": "day that the proposal was submitted",
"required": False,
"type": "integer",
},
"submitted_month": {
"description": "month that the proposal was submitted",
"required": False,
"anyof_type": ["string", "integer"]
},
"submitted_year": {
"description": "Year that the proposal was submitted",
"required": False,
"type": "integer",
},
"team": {
"description": "information about the team members participating "
"in the grant.",
"required": False,
"schema": {
"schema": {
"cv": {"required": False, "type": "string"},
"email": {"required": False, "type": "string"},
"institution": {"required": False, "type": "string"},
"name": {"required": False, "type": "string"},
"position": {"required": False, "type": "string"},
"subaward_amount": {
"required": False,
"type": ("integer", "float"),
},
},
"type": "dict",
},
"type": "list",
},
"title": {
"description": "actual title of proposal",
"required": True,
"type": "string",
},
"title_short": {
"description": "short title of proposal",
"required": False,
"type": "string",
},
},
"reading_lists": {
"_description": {
"description": "Reading lists consist of doi's or urls of items and "
"a brief synopsis of why they are interesting"
},
"_id": {
"description": "Unique identifier for the reading list.",
"required": True,
"type": "string"
},
"date": {"description": "date the list was edited",
"required": False,
"anyof_type": ["date", "string"]},
"day": {
"description": "The day the list was edited",
"required": False,
"type": "integer"
},
"month": {
"description": "The month the list was edited",
"required": False,
"anyof_type": [
"integer",
"string"
]
},
"year": {
"description": "The day the list was edited",
"required": False,
"type": "integer"
},
"papers": {
"description": "The list of items that are in the list",
"required": True,
"type": "list",
"schema": {
"type": "dict",
"schema": {
"doi": {
"description": "the doi of the paper. If it doesn't have one put 'na'",
"required": False,
"type": "string"
},
"text": {
"description": "the description of why the item is important or interesting",
"required": True,
"type": "string"
},
"url": {
"description": "the url of the item if it has one",
"required": False,
"type": "string"
}
}
}
},
"purpose": {
"description": "The purpose or target audience for the list",
"required": False,
"type": "string"
},
"title": {
"description": "The title of the list",
"required": True,
"type": "string"
}
},
"refereeReports": {
"_description": {
"description": "This is a collection of information that will be "
"be used to build a referee report. This should probably be private."
},
"_id": {"description": "the ID", "required": True, "type": "string"},
"claimed_found_what": {
"description": "What the authors claim to have found",
"required": True,
"schema": {"type": "string", "required": True},
"type": "list",
},
"claimed_why_important": {
"description": "What importance the authors claim",
"required": True,
"schema": {"type": "string", "required": True},
"type": "list",
},
"did_how": {
"description": "How the study was done",
"required": True,
"schema": {"type": "string", "required": True},
"type": "list",
},
"did_what": {
"description": "What the study was",
"required": True,
"schema": {"type": "string", "required": True},
"type": "list",
},
"due_date": {
"description": "date the review is due in ISO format",
"required": True,
"anyof_type": ["string", "date"],
},
"editor_eyes_only": {
"description": "Comments you don't want passed to the author",
"required": False,
"type": "string",
},
"final_assessment": {
"description": "Summary of impressions of the study",
"required": True,
"schema": {"type": "string", "required": True},
"type": "list",
},
"first_author_last_name": {
"description": "Last name of first author will be referred to "
"with et al.",
"required": True,
"type": "string",
},
"freewrite": {
"description": "Things that you want to add that don't fit into "
"any category above",
"required": False,
"type": "string",
},
"institutions": {
"description": "the institutions of the pi and co-pis",
"required": False,
"anyof_type": ["string", "list"]
},
"journal": {
"description": "name of the journal",
"required": True,
"type": "string",
},
"month": {
"description": "The month the review was requested",
"required": False,
"anyof_type": [
"integer",
"string"
]
},
"recommendation": {
"description": "Your publication recommendation",
"required": True,
"type": "string",
"eallowed": REVIEW_RECOMMENDATIONS,
},
"requester": {
"description": "Name of the program officer who requested the review",
"required": True,
"type": "string",
},
"reviewer": {
"description": "name of person reviewing the paper",
"required": True,
"type": "string",
},
"status": {
"description": "Where you are with the review",
"required": True,
"type": "string",
"eallowed": REVIEW_STATI,
},
"submitted_date": {
"description": "submitted date in ISO YYYY-MM-DD format.",
"required": False,
"anyof_type": ["string", "date"],
},
"title": {
"description": "title of the paper under review",
"required": True,
"type": "string",
},
"validity_assessment": {
"description": "List of impressions of the validity of the claims",
"required": True,
"schema": {"type": "string", "required": True},
"type": "list",
},
"year": {
"description": "year when the review is being done",
"required": True,
"anyof_type": ["string", "integer"],
},
},
"students": {
"_description": {
"description": "This | |
if wrap_around:
bnds[np.isclose(bnds, limits[0], rtol(), atol())] = limits[1]
error = RuntimeError(f"{name} dimension bounds not directed positively")
if bnds.ndim > 0:
space_diff = np.diff(bnds, axis=0)
if wrap_around:
if np.any(space_diff <= 0):
# add one full rotation to first and second negative
# differences to assume it is wrapping around (since
# positive direction is required, and cross-over
# can happen at most once without domain wrapping on
# itself)
neg = space_diff[space_diff <= 0]
neg[0:2] += limits[1] - limits[0]
space_diff[space_diff <= 0] = neg
else:
# it is a scalar, set difference to one to pass next check
space_diff = 1
if not np.all(space_diff > 0):
raise error
if bnds.ndim > 1:
space_diff = np.diff(bnds, axis=1)
if wrap_around:
if np.any(space_diff <= 0):
# add one full rotation to first negative difference
# to assume it is wrapping around (since positive
# direction is required, and cross-over can happen
# at most once without domain wrapping on itself)
neg = space_diff[space_diff <= 0]
neg[0] += limits[1] - limits[0]
space_diff[space_diff <= 0] = neg
else:
# it is a scalar, set difference to one to pass next check
space_diff = 1
if not np.all(space_diff > 0):
raise error
@staticmethod
def _check_dimension_bounds_regularity(bounds, name, limits, wrap_around):
"""**Examples:**
>>> import numpy
>>> Grid._check_dimension_bounds_regularity( # 1D, not cyclic
... numpy.array([0., 1.]), 'test', (-2, 2), False)
>>> Grid._check_dimension_bounds_regularity( # 1D, cyclic, no wrap around
... numpy.array([0., 1.]), 'test', (-2, 2), True)
>>> Grid._check_dimension_bounds_regularity( # 2D, cyclic, wrap around
... numpy.array([0., -1.]), 'test', (-2, 2), True)
>>> Grid._check_dimension_bounds_regularity( # 2D, not cyclic
... numpy.array([[-1., 0.], [0., 1.], [1., 2.]]), 'test', (-3, 3), False)
>>> Grid._check_dimension_bounds_regularity( # 2D, cyclic
... numpy.array([[-1., 0.], [0., 1.], [1., 2.]]), 'test', (-3, 3), True)
>>> Grid._check_dimension_bounds_regularity( # 2D, cyclic, wrap around, bound across
... numpy.array([[1.5, 2.5], [2.5, -2.5], [-2.5, -1.5]]), 'test', (-3, 3), True)
>>> Grid._check_dimension_bounds_regularity( # 2D, cyclic, wrap around, bound edging, sign case 1
... numpy.array([[1., 2.], [2., 3.], [3., -2.], [-2., -1.]]), 'test', (-3, 3), True)
>>> Grid._check_dimension_bounds_regularity( # 2D, cyclic, wrap around, bound edging, sign case 2
... numpy.array([[1., 2.], [2., 3.], [-3., -2.], [-2., -1.]]), 'test', (-3, 3), True)
>>> Grid._check_dimension_bounds_regularity( # 2D, cyclic, wrap around, bound edging, sign case 3
... numpy.array([[1., 2.], [2., -3.], [3., -2.], [-2., -1.]]), 'test', (-3, 3), True)
>>> Grid._check_dimension_bounds_regularity( # 2D, cyclic, wrap around, bound edging, sign case 4
... numpy.array([[1., 2.], [2., -3.], [-3., -2.], [-2., -1.]]), 'test', (-3, 3), True)
>>> Grid._check_dimension_bounds_regularity( # irregular (not cyclic)
... numpy.array([[0., .9], [.9, 2.], [2., 3.]]), 'test', (0, 3), False)
Traceback (most recent call last):
...
RuntimeError: test bounds space gap not constant across region
>>> Grid._check_dimension_bounds_regularity( # irregular (cyclic)
... numpy.array([[0., .9], [.9, 2.], [2., 3.]]), 'test', (-2, 2), True)
Traceback (most recent call last):
...
RuntimeError: test bounds space gap not constant across region
>>> Grid._check_dimension_bounds_regularity( # not cyclic, no wrap around
... numpy.array([[1., 2.], [2., -1.], [-1., 0.]]), 'test', (-2, 2), False)
Traceback (most recent call last):
...
RuntimeError: test bounds space gap not constant across region
>>> Grid._check_dimension_bounds_regularity( # gap
... numpy.array([[-1., 0.], [0., 1.], [2., 3.]]), 'test', (-3, 3), False)
Traceback (most recent call last):
...
RuntimeError: test bounds space gap not constant across region
>>> Grid._check_dimension_bounds_regularity( # inverted direction
... numpy.array([[1., 2.], [-2., -1.], [-1., -2]]), 'test', (-2, 2), True)
Traceback (most recent call last):
...
RuntimeError: test bounds space gap not constant across region
"""
rtol_ = rtol()
atol_ = atol()
# replace lower limit by upper limit to acknowledge it is same
# location (e.g. -180degE same as +180degE, so replace -180degE
# by +180degE)
bnds = deepcopy(bounds)
if wrap_around:
bnds[np.isclose(bnds, limits[0], rtol_, atol_)] = limits[1]
error = RuntimeError(f"{name} bounds space gap not constant across region")
if bnds.ndim > 0:
space_diff = np.diff(bnds, axis=0)
if wrap_around:
if np.any(space_diff < 0):
# add one full rotation to first and second negative
# differences to assume it is wrapping around (since
# positive direction is required, and cross-over
# can happen at most once without domain wrapping on
# itself)
neg = space_diff[space_diff < 0]
neg[0:2] += limits[1] - limits[0]
space_diff[space_diff < 0] = neg
else:
space_diff = 0
if not np.isclose(np.amin(space_diff), np.amax(space_diff),
rtol_, atol_):
raise error
if bnds.ndim > 1:
space_diff = np.diff(bnds, axis=1)
if wrap_around:
if np.any(space_diff < 0):
# add one full rotation to first negative difference
# to assume it is wrapping around (since positive
# direction is required, and cross-over can happen
# at most once without domain wrapping on itself)
neg = space_diff[space_diff < 0]
neg[0] += limits[1] - limits[0]
space_diff[space_diff < 0] = neg
else:
space_diff = 0
if not np.isclose(np.amin(space_diff), np.amax(space_diff),
rtol_, atol_):
raise error
@staticmethod
def _check_dimension_bounds_contiguity(bounds, name, limits, wrap_around):
"""**Examples:**
>>> import numpy
>>> Grid._check_dimension_bounds_contiguity( # 1D, not cyclic
... numpy.array([0., 1.]), 'test', (-2, 2), False)
>>> Grid._check_dimension_bounds_contiguity( # 1D, cyclic, no wrap around
... numpy.array([0., 1.]), 'test', (-2, 2), True)
>>> Grid._check_dimension_bounds_contiguity( # 2D, cyclic, wrap around
... numpy.array([0., -1.]), 'test', (-2, 2), True)
>>> Grid._check_dimension_bounds_contiguity( # 2D, not cyclic
... numpy.array([[-1., 0.], [0., 1.], [1., 2.]]), 'test', (-3, 3), False)
>>> Grid._check_dimension_bounds_contiguity( # 2D, cyclic
... numpy.array([[-1., 0.], [0., 1.], [1., 2.]]), 'test', (-3, 3), True)
>>> Grid._check_dimension_bounds_contiguity( # 2D, cyclic, wrap around, bound across
... numpy.array([[1.5, 2.5], [2.5, -2.5], [-2.5, -1.5]]), 'test', (-3, 3), True)
>>> Grid._check_dimension_bounds_contiguity( # 2D, cyclic, wrap around, bound edging, sign case 1
... numpy.array([[1., 2.], [2., 3.], [3., -2.], [-2., -1.]]), 'test', (-3, 3), True)
>>> Grid._check_dimension_bounds_contiguity( # 2D, cyclic, wrap around, bound edging, sign case 2
... numpy.array([[1., 2.], [2., 3.], [-3., -2.], [-2., -1.]]), 'test', (-3, 3), True)
>>> Grid._check_dimension_bounds_contiguity( # 2D, cyclic, wrap around, bound edging, sign case 3
... numpy.array([[1., 2.], [2., -3.], [3., -2.], [-2., -1.]]), 'test', (-3, 3), True)
>>> Grid._check_dimension_bounds_contiguity( # 2D, cyclic, wrap around, bound edging, sign case 4
... numpy.array([[1., 2.], [2., -3.], [-3., -2.], [-2., -1.]]), 'test', (-3, 3), True)
>>> Grid._check_dimension_bounds_contiguity( # gaps (not cyclic)
... numpy.array([[0.0, 0.9], [1.0, 1.9], [2.0, 2.9]]), 'test', (-3, 3), False)
Traceback (most recent call last):
...
RuntimeError: test bounds not contiguous across region
>>> Grid._check_dimension_bounds_contiguity( # gaps (cyclic)
... numpy.array([[0.0, 0.9], [1.0, 1.9], [-2.0, -1.1]]), 'test', (-2, 2), False)
Traceback (most recent call last):
...
RuntimeError: test bounds not contiguous across region
"""
rtol_ = rtol()
atol_ = atol()
# replace lower limit by upper limit to acknowledge it is same
# location (e.g. -180degE same as +180degE, so replace -180degE
# by +180degE)
bnds = deepcopy(bounds)
if wrap_around:
bnds[np.isclose(bnds, limits[0], rtol_, atol_)] = limits[1]
# compare previous upper bound to next lower bound
prev_to_next = (bnds[1:, 0] - bnds[:-1, 1]
if bnds.ndim > 1 else 0)
if not np.allclose(prev_to_next, 0, rtol_, atol_):
raise RuntimeError(f"{name} bounds not contiguous across region")
@staticmethod
def _check_dimension_in_bounds(dimension, bounds, name, limits, wrap_around):
"""**Examples:**
>>> import numpy
>>> Grid._check_dimension_in_bounds( # 1 coord, not cyclic
... numpy.array(.5), numpy.array([0., 1.]), 'test', (0, 2), False)
>>> Grid._check_dimension_in_bounds( # 1 coord, cyclic, no wrap around
... numpy.array(.5), numpy.array([0., 1.]), 'test', (0, 2), True)
>>> Grid._check_dimension_in_bounds( # x coords, not cyclic
... numpy.array([0.5, 1.5, 2.5]),
... numpy.array([[0., 1.], [1., 2.], [2., 3.]]),
... 'test', (0, 3), False)
>>> Grid._check_dimension_in_bounds( # x coords, cyclic, no wrap around
... numpy.array([0.5, 1.5, 2.5]),
... numpy.array([[0., 1.], [1., 2.], [2., 3.]]),
... 'test', (0, 3), True)
>>> Grid._check_dimension_in_bounds( # x coords, cyclic, wrap around
... numpy.array([0.5, 1.5, -1.5]),
... numpy.array([[0., 1.], [1., 2.], [2., -1.]]),
... 'test', (-2, 2), True)
>>> Grid._check_dimension_in_bounds( # x coords, cyclic, wrap around, bound across, sign case 1
... numpy.array([2., 3., -2.]),
... numpy.array([[1.5, 2.5], [2.5, -2.5], [-2.5, -1.5]]),
... 'test', (-3, 3), True)
>>> Grid._check_dimension_in_bounds( # x coords, cyclic, wrap around, bound across, sign case 2
... numpy.array([2., -3., -2.]),
... numpy.array([[1.5, 2.5], [2.5, -2.5], [-2.5, -1.5]]),
... 'test', (-3, 3), | |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2007 <NAME> <<EMAIL>>
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
"""Classes for constructing CLY grammars."""
from builtins import map
from builtins import str
from past.builtins import basestring
from builtins import object
import re
import os
import posixpath
from xml.dom import minidom
from inspect import isclass
from cly.exceptions import *
__all__ = ['Node', 'Alias', 'Group', 'Action', 'Variable', 'Grammar', 'Help',
'LazyHelp', 'Word', 'String', 'URI', 'LDAPDN', 'Integer', 'Float', 'IP',
'Hostname', 'Host', 'EMail', 'File', 'Boolean']
__docformat__ = 'restructuredtext en'
class Node(object):
"""The base class for all grammar nodes.
Constructor arguments are:
``help``: string or callable returning a list of (key, help) tuples
A help string or a callable returning an iterable of (key, help)
pairs. There is a useful class called Help which can be used for
this purpose.
``name=None``: string
The name of the node. If ommitted the key used by the parent Node
is used. The node name also defines the node path:
>>> Node('Something', name='something')
<Node:/something>
The following constructor arguments are also class variables, and as
such can be overridden at the class level by subclasses of Node. Useful If
you find yourself using a particular pattern repeatedly.
``pattern=None``: regular expression string
The regular expression used to match user input. If not provided,
the node name is used:
>>> a = Node('Something', name='something')
>>> a.pattern == a.name
True
``separator=r'\s+|\s*$'``: regular expression string
A regular expression used to match the text separating this node
and the next.
``group=0``: integer
Nodes can be grouped together to provide visual cues. Groups are
ordered ascending numerically.
``order=0``: integer
Within a group, nodes are normally ordered alphabetically. This can
be overridden by setting this to a value other than 0.
``match_candidates=False``: boolean
The candidates() method returns a list of words that match at the
current token, which are then used for completion, but can also be
used to constrain the allowed matches if match_candidates=True.
Useful for situations where you have a general regex pattern (eg. a
pattern matching files) but a known set of matches at this point (eg.
files in the current directory).
``traversals=1``: integer
The number of times this node can match in any parse context. Alias
nodes allow for multiple traversal.
If ``traversals=0`` the node will match an infinite number of times.
"""
pattern = None
separator = r'\s+|\s*$'
order = 0
group = 0
match_candidates = False
traversals = 1
def __init__(self, help='', *args, **kwargs):
self._children = {}
if isinstance(help, basestring):
self.help = LazyHelp(self, help)
elif callable(help):
self.help = help
else:
raise InvalidHelp('help must be a callable or a string')
if 'pattern' in kwargs:
self.pattern = kwargs.pop('pattern')
if 'separator' in kwargs:
self.separator = kwargs.pop('separator')
if self.pattern is not None:
self._pattern = re.compile(self.pattern)
self._separator = re.compile(self.separator)
if self.pattern is not None and self.separator is not None:
self._full_match = re.compile('(?:%s)(?:%s)' %
(self.pattern, self.separator))
self.name = kwargs.pop('name', None)
self.parent = None
self.__anonymous_children = 0
self(*args, **kwargs)
def _set_name(self, name):
"""Set the name of this node. If the Node does not have an existing
matching pattern associated with it, a pattern will be created using
the name."""
self._name = name
if isinstance(name, basestring) and self.pattern is None:
self.pattern = name
self._pattern = re.compile(name)
if self.pattern is not None and self.separator is not None:
self._full_match = re.compile('(?:%s)(?:%s)' %
(self.pattern, self.separator))
name = property(lambda self: self._name, _set_name)
def __call__(self, *anonymous, **options):
"""Update or add options and child nodes.
Positional arguments are treated as anonymous child nodes, while
keyword arguments can either be named child nodes or attribute updates
for this node. See __init__ for more information on attributes.
As a special case, if a positional argument is a `Grammar` object, its
*children* will be merged.
>>> top = Node('Top', name='top')
>>> top(subnode=Node('Subnode'))
<Node:/top>
>>> top.find('subnode')
<Node:/top/subnode>
"""
for node in anonymous:
if isinstance(node, Grammar):
children = dict([(n.name, n) for n in node])
self(**children)
continue
if not isinstance(node, Node):
raise InvalidAnonymousNode('Anonymous node is not a Node object')
# TODO Convert help to name instead of __anonymous_<n>
node.name = '__anonymous_%i' % self.__anonymous_children
node.parent = self
self._children[node.name] = node
self.__anonymous_children += 1
for k, v in options.items():
if isinstance(v, Node):
if k.endswith('_'):
k = k[:-1]
v.name = k
v.parent = self
self._children[k] = v
else:
setattr(self, k, v)
return self
def __iter__(self):
"""Iterate over child nodes, ignoring context.
>>> tree = Node('One')(two=Node('Two'), three=Node('Three'))
>>> list(tree)
[<Node:/three>, <Node:/two>]
"""
children = sorted(list(self._children.values()),
key=lambda i: (i.group, i.order, i.name))
for child in children:
yield child
def __setitem__(self, key, child):
"""Emulate dictionary set.
>>> node = Node('One')
>>> node['two'] = Node('Two')
>>> list(node.walk())
[<Node:/>, <Node:/two>]
"""
self(**{key: child})
def __getitem__(self, key):
"""Emulate dictionary get.
>>> node = Node('One')(two=Node('Two'))
>>> node['two']
<Node:/two>
"""
return self._children[key]
def __delitem__(self, key):
"""Emulate dictionary delete.
>>> node = Node('One')(two=Node('Two'), three=Node('Three'))
>>> list(node.walk())
[<Node:/>, <Node:/three>, <Node:/two>]
>>> del node['two']
>>> list(node.walk())
[<Node:/>, <Node:/three>]
"""
child = self._children.pop(key)
child.parent = None
def __contains__(self, key):
"""Emulate dictionary key existence test.
>>> node = Node('One')(two=Node('Two'), three=Node('Three'))
>>> 'two' in node
True
"""
return key in self._children
def walk(self, predicate=None):
"""Perform a recursive walk of the grammar tree.
>>> tree = Node('One')(two=Node('Two', three=Node('Three'),
... four=Node('Four')))
>>> list(tree.walk())
[<Node:/>, <Node:/two>, <Node:/two/four>, <Node:/two/three>]
"""
if predicate is None:
predicate = lambda node: True
def walk(root):
if not predicate(root):
return
yield root
for node in root._children.values():
for subnode in walk(node):
yield subnode
for node in walk(self):
yield node
def children(self, context, follow=False):
"""Iterate over child nodes, optionally follow()ing branches.
>>> from cly.parser import Context
>>> tree = Node('One')(two=Node('Two', three=Node('Three'),
... four=Node('Four')), five=Alias('../two/*'))
>>> context = Context(None, None)
>>> list(tree.children(context))
[<Alias:/five for /two/*>, <Node:/two>]
>>> list(tree.children(context, follow=True))
[<Node:/two/four>, <Node:/two/three>, <Node:/two>]
"""
for child in self:
if child.valid(context):
if follow:
for branch in child.follow(context):
if branch.valid(context):
yield branch
else:
yield child
def follow(self, context):
"""Return alternative Nodes to traverse.
The children() method calls this method when follow=True to expand
aliased nodes, although it could be used for other purposes."""
yield self
def selected(self, context, match):
"""This node was selected by the parser.
By default, informs the context that the node has been traversed."""
context.selected(self)
def next(self, context):
"""Return an iterable over the set of next candidate nodes."""
for child in self.children(context, follow=True):
yield child
def match(self, context):
"""Does this node match the current token?
Must return a regex match object or None for no match. If
``match_candidates`` is true the token must also match one of the values
returned by ``candidates()``.
Must include separator in determining whether a match was
successful."""
if not self.valid(context):
return None
match = self._pattern.match(context.command, context.cursor)
if match:
# Check that separator matches as well
if not self._separator.match(context.command, context.cursor +
len(match.group())):
return None
if self.match_candidates and match.group() + ' ' not in \
self.candidates(context, match.group()):
return None
return match
def advance(self, context):
"""Advance context cursor based on this nodes match."""
match = self._full_match.match(context.command, context.cursor)
context.advance(len(match.group()))
def visible(self, context):
"""Should this node be visible?"""
return True
def terminal(self, context):
"""This node was selected as a terminal."""
raise UnexpectedEOL(context)
def depth(self):
"""The depth of this node in the grammar.
>>> grammar = Grammar(one=Node('One'), two=Node('Two'))
>>> grammar.depth()
0
>>> grammar.find('two').depth()
1
"""
return self.parent and self.parent.depth() + 1 or 0
def path(self):
"""The full grammar path to this node. Path components are separated
by a forward slash.
>>> grammar = Grammar(one=Node('One'), two=Node('Two'))
>>> grammar.find('two').path()
'/two'
"""
names = []
node = self
while node is not None:
if node.name is not None:
names.insert(0, node.name)
node = node.parent
return '/' + '/'.join(names)
def candidates(self, context, text):
"""Return an iterable of completion candidates for the given text. The
default is to use the content of self.help().
>>> grammar = Grammar(one=Node('One'), two=Node('Two'))
>>> list(grammar.find('one').candidates(None, 'o'))
['one ']
>>> list(grammar.find('one').candidates(None, 't'))
[]
"""
for key, help in self.help(context):
if key[0] != '<' and key.startswith(text):
yield key + ' '
def find(self, path):
"""Find a Node by path | |
<reponame>chamisfum/ML_Service_Production
"""
DOCUMENTATION:
config layer use to provide several helper and configuration service that needed by service layer.
This layer can be used as config provider and controller for service layer to provide several function configuration.
This layer would use any infrastructure layer function to build each configuration services.
"""
# python package
from keras.models import load_model
from keras.models import model_from_json
# internal package
from src.infra import infra
# Initialize Global alias
_appendListElement = infra._appendListElement
_getSplitedStringByIndex = infra._getSplitedStringByIndex
_getElementByIndex = infra._getElementByIndex
_getFilesFromFolder = infra._getFilesFromFolder
_getFilePathAndName = infra._getFilePathAndName
_openImageFile = infra._openImageFile
_imageToNumpyArray = infra._imageToNumpyArray
_renderRGBImage = infra._renderRGBImage
_renderRGBtoGrayImage = infra._renderRGBtoGrayImage
_resizeImageByModelInputShape = infra._resizeImageByModelInputShape
_normalizeImage = infra._normalizeImage
_reshapeGrayImage = infra._reshapeGrayImage
_expandRGBImageDimensions = infra._expandRGBImageDimensions
def _buildDictModel(list_model) -> list:
"""
_buildDictModel() : Provide a collection of model name and collection of model path
This function will help to build Model dictionary by providing each key and value
This function will handle both of json model or h5 model
ACCEPT list of model either json or h5 model as argument
RETURN keys, values
RETURN EXAMPLE :
* KEYS : ['BALANCE_model', 'IMBALANCE_model', 'SPLIT_AUGMENTATION_model']
* VALUES : ['static/model/BALANCE_model.h5', 'static/model/IMBALANCE_model.h5',
'static/model/SPLIT_AUGMENTATION_model.h5']
"""
keys = []
values = []
for model in list_model:
if type(model) == list: # handle json model (include json model and h5 weight)
getModelPath = _getElementByIndex(model, 0)
get_model_and_ext = _getSplitedStringByIndex(getModelPath, "/", -1)
get_model_name = _getSplitedStringByIndex(get_model_and_ext, ".", 0)
_appendListElement(keys, get_model_name)
_appendListElement(values, model)
else: # handle hdf5 or H5 model
get_model_and_ext = _getSplitedStringByIndex(model, "/", -1)
get_model_name = _getSplitedStringByIndex(get_model_and_ext, ".", 0)
_appendListElement(keys, get_model_name)
_appendListElement(values, model)
return keys, values
def _buildListModel(path):
"""
_buildListModel() : Provide a collection of model and weight path
This function will help to build Model dictionary by providing a collection model and weight path
either json model or hdf5 model. This function will scan all model by pattern name such (model.json,
weights.h5, weights.hdf5, model.h5, and model.hdf5) UPDATED SOON
ACCEPT path of model directory as argument
RETURN json_model and hdf5_model which is containing each model path
RETURN EXAMPLE :
* JSON_MODEL : [ ['static/model/BALANCE_model.json', 'static/model/BALANCE_weight.h5'],
['static/model/SPLIT_AUGMENTATION_model.json', 'static/model/SPLIT_AUGMENTATION_weight.h5'],
]
* HDF5_MODEL : ['static/model/BALANCE_model.h5', 'static/model/SPLIT_AUGMENTATION_model.h5']
"""
json_arch = []
json_weight = []
hdf5_model = []
json_model = []
files_in_folder = _getFilesFromFolder(path) # scan all model in path directory
for data in files_in_folder: # iterate files_in_folder to extract model and weight information
if "model.json" in data: # get model json by partter name <model.json>
file_name_and_path = _getFilePathAndName(path, data)
_appendListElement(json_arch, file_name_and_path)
elif "weights.h5" in data or "weights.hdf5" in data: # get model weight by partter name <weights.h5 or weights.hdf5>
file_name_and_path = _getFilePathAndName(path, data)
_appendListElement(json_weight, file_name_and_path)
elif "model.h5" in data or "model.hdf5" in data: # get model by partter name <model.h5 or model.hdf5>
file_name_and_path = _getFilePathAndName(path, data)
_appendListElement(hdf5_model, file_name_and_path)
if json_arch and json_weight: # build json model collection (it would return list of json model and realted weight)
json_model = _getJsonModel(json_arch, json_weight)
return json_model, hdf5_model
def _getDictModel(path):
"""
_getDictModel() : Provide a collection of model and weight either json or h5 model including model name as keys and model path as values of dictionary
This function will help to generate model information for service and application layer.
This function used _buildListModel and _buildDictModel as helper. For detail please see the documentation of each fuction.
ACCEPT path of model directory as argument
RETURN dicts, keys and values which is containing model information such path and model name.
RETURN EXAMPLE :
* DICTS : {
'BALANCE_model': 'static/model/BALANCE_model.h5',
'IMBALANCE_model': 'static/model/IMBALANCE_model.h5',
'SPLIT_AUGMENTATION_model': 'static/model/SPLIT_AUGMENTATION_model.h5'
}
* KEYS : ['BALANCE_model', 'IMBALANCE_model', 'SPLIT_AUGMENTATION_model']
* VALUES: ['static/model/BALANCE_model.h5',
'static/model/IMBALANCE_model.h5',
'static/model/SPLIT_AUGMENTATION_model.h5']
"""
dicts = {}
keys = []
values = []
json_model, hdf5_model = _buildListModel(path)
if json_model:
keys, values = _buildDictModel(json_model)
if hdf5_model:
keys, values = _buildDictModel(hdf5_model)
for i in range(len(keys)):
data = _getElementByIndex(keys, i)
dicts[data] = values[i]
return dicts, keys, values
def _loadSelectModel(model, path):
"""
_loadSelectModel() : This config function used to load selected model. It would help to load model into keras sequential model either json or h5 model.
ACCEPT selected model and path of model directory as argument
RETURN keras sequential model <keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>
RETURN EXAMPLE :
* LOADED_MODEL : <keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>
"""
model_dict, _, _ = _getDictModel(path)
for data in model_dict:
if data == model:
model_and_weight = _getElementByIndex(model_dict, data)
if type(model_and_weight) == list and model_and_weight: # handle json model and weight
model_name = _getElementByIndex(model_and_weight, 0) # get json model name
weight_name = _getElementByIndex(model_and_weight, 1) # get model weight
json_file = open(model_name, 'r') # open json model
loaded_model_json = json_file.read() # read json model
json_file.close()
loaded_model = model_from_json(loaded_model_json) # load json model
loaded_model.load_weights(weight_name) # load weight
else: # handle h5 or hdf5 model
loaded_model = load_model(model_and_weight)
return loaded_model
def _loadCompareModel(list_model, path):
"""
_loadCompareModel() : This config function used to load selected models. It would help to load all selected model into keras sequential model either json or h5 model.
This function will provide a collection of keras sequential model that can be use for service layer.
ACCEPT selected list_model and path of model directory as argument
RETURN a collection of keras sequential model [<keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>,
<keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>,]
RETURN EXAMPLE :
* LIST_OFMODEL : [<keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>,
<keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>,]
"""
model_dict, _, _ = _getDictModel(path)
list_ofModel = []
for data in list_model:
model_and_weight = _getElementByIndex(model_dict, data)
if data in model_dict:
if type(model_and_weight) == list and model_and_weight: # handle json model and weight
model_name = _getElementByIndex(model_and_weight, 0) # get json model name
weight_name = _getElementByIndex(model_and_weight, 1) # get model weight
json_file = open(model_name, 'r') # open json model
loaded_model_json = json_file.read() # read json model
json_file.close()
loaded_model = model_from_json(loaded_model_json) # load json model
loaded_model.load_weights(weight_name) # load weight
_appendListElement(list_ofModel, loaded_model) # append loaded model with weight into list_OfModel
else: # handle h5 or hdf5 model
loaded_model = load_model(model_and_weight) # load h5 or hdf5 model
_appendListElement(list_ofModel, loaded_model) # append model into list_OfModel
return list_ofModel
def _getJsonModel(models, weights)-> list:
"""
_getJsonModel() : Provide a collection of json model with each weight. It would be helpfull for build a collection of json model that
might be loaded
ACCEPT json models and weights of each json model as argument
RETURN a collection of json model and each weight
RETURN EXAMPLE :
* LIST_OFMODEL : [<keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>,
<keras.engine.sequential.Sequential object at 0x000002C8C8AB8550>,]
"""
sub_value = []
json_model = []
models.sort(reverse=True) # sort model in dsc term or reverse term
weights.sort() # sort weight in asc term
for weight in weights:
model = models.pop() # get model name and pop it up
get_model_and_ext = _getSplitedStringByIndex(model, "/", -1) # example result: VGG19_model.json
model_name = _getSplitedStringByIndex(get_model_and_ext, "_", 0) # example result: VGG19
if model_name in weight: # check is model_name value is in weight list (find string by pattern)
_appendListElement(sub_value, model)
_appendListElement(sub_value, weight)
_appendListElement(json_model, sub_value)
sub_value = []
return json_model
def _rgbImageProcessing(image_file, keras_model):
"""
_rgbImageProcessing() : Provide a RGB image preprocessing for raw query image based on model input volume information
ACCEPT raw image file and keras sequential model as argument
RETURN a numpy array of image which is ready to use for prediction
RETURN EXAMPLE :
* RESULTIMAGE : [[[[0.00784314 0.00784314 0.00784314]
[0.00784314 0.00784314 0.00784314]
[0.00784314 0.00784314 0.00784314]
...
[0.00392157 0.00392157 0.00392157]
[0.00392157 0.00392157 0.00392157]
[0.00392157 0.00392157 0.00392157]]
...
[[0.02352941 0.02352941 0.02352941]
[0.03529412 0.03529412 0.03529412]
[0.03137255 0.03137255 0.03137255]
...
[0.02745098 0.02745098 0.02745098]
[0.02352941 0.02352941 0.02352941]
[0.01960784 0.01960784 0.01960784]]]]
"""
readImage = _openImageFile(image_file) # open image file
imageNdarray = _imageToNumpyArray(readImage) # transform image into numpy array
convertToRGB = _renderRGBImage(imageNdarray) # change image type from BGR to RGB
resizeImage ,_ ,_ = _resizeImageByModelInputShape(convertToRGB, keras_model) # resize image based on model input shape
normalizeImage = _normalizeImage(resizeImage) # normalize image
resultImage = _expandRGBImageDimensions(normalizeImage, 0) # expanding image dimention for prediction
return resultImage
def _grayImageProcessing(image_file, model):
"""
_grayImageProcessing() : Provide a Grayscale image preprocessing for raw query image based on model input volume information
ACCEPT raw image file and keras | |
<reponame>iosifidisvasileios/CumulativeCostBoosting
import pickle
import time
import warnings
import matplotlib
from matplotlib.colors import ListedColormap
from imblearn.datasets import make_imbalance
from sklearn.datasets import make_moons, make_blobs, make_circles, make_classification
from sklearn.preprocessing import StandardScaler
from Competitors.AdaMEC import AdaMEC
from Competitors.CGAda_Cal import CGAda_Cal
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA, SparsePCA
from Competitors.AdaMEC_Cal import AdaMEC_Cal
from DataPreprocessing.load_diabetes import load_diabetes
from DataPreprocessing.load_electricity import load_electricity
from DataPreprocessing.load_phoneme import load_phoneme
from DataPreprocessing.load_speed_dating import load_speed_dating
warnings.filterwarnings("ignore")
from Competitors.RareBoost import RareBoost
from DataPreprocessing.load_mat_data import load_mat_data
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit
import numpy as np
import joblib
from plot_functions import plot_decision_boundary, retrieve_n_class_color_cubic
from collections import defaultdict, Counter
import os, sys
import operator
from multiprocessing import Process
from imblearn import datasets
from sklearn.metrics import f1_score, balanced_accuracy_score, recall_score
from AdaCC import AdaCC
from Competitors.CostBoostingAlgorithms import CostSensitiveAlgorithms
from DataPreprocessing.load_adult import load_adult
from DataPreprocessing.load_wilt import load_wilt
from DataPreprocessing.load_mushroom import load_mushroom
from DataPreprocessing.load_eeg_eye import load_eeg_eye
from DataPreprocessing.load_spam import load_spam
from DataPreprocessing.load_skin import load_skin
from DataPreprocessing.load_credit import load_credit
from DataPreprocessing.load_kdd import load_kdd
from DataPreprocessing.load_bank import load_bank
sys.path.insert(0, 'DataPreprocessing')
def print_stats(names, stats):
for i in range(0, len(names)):
print(names[i] + " " + str(stats[i]))
def get_dataset(dataset):
if dataset == "wilt":
X, y, cl_names = load_wilt()
elif dataset == "adult":
X, y, cl_names = load_adult()
elif dataset == "diabetes":
X, y, cl_names = load_diabetes()
elif dataset == "phoneme":
X, y, cl_names = load_phoneme()
elif dataset == "mushroom":
X, y, cl_names = load_mushroom()
elif dataset == "electricity":
X, y, cl_names = load_electricity()
elif dataset == "speeddating":
X, y, cl_names = load_speed_dating()
elif dataset == "credit":
X, y, cl_names = load_credit()
elif dataset == "eeg_eye":
X, y, cl_names = load_eeg_eye()
elif dataset == "spam":
X, y, cl_names = load_spam()
elif dataset == "skin":
X, y, cl_names = load_skin()
elif dataset == "bank":
X, y, cl_names = load_bank()
elif dataset == "kdd":
X, y, cl_names = load_kdd()
elif dataset == "landsatM":
X, y, cl_names = load_mat_data(dataset)
elif dataset == "musk2":
X, y, cl_names = load_mat_data(dataset)
elif dataset == "spliceM":
X, y, cl_names = load_mat_data(dataset)
elif dataset == "semeion_orig":
X, y, cl_names = load_mat_data(dataset)
elif dataset == "waveformM":
X, y, cl_names = load_mat_data(dataset)
elif dataset not in ['bloob', 'circle', 'moon']:
from imblearn import datasets
data = datasets.fetch_datasets()[dataset]
cl_names = ["feature_" + str(i) for i in range(0, data['data'].shape[1])]
X = data['data']
y = data['target']
y[y != 1] = 0
return X, y, cl_names
def run_eval(dataset, base_list, methods):
for cluster_std in range(1, 11):
for imbalance in range(1, 11):
if dataset == 'moon':
X, y = make_moons(n_samples=5000, noise=cluster_std / 10., random_state=int(time.time()))
elif dataset == 'bloob':
X, y = make_blobs(n_samples=5000, centers=2, cluster_std=cluster_std, random_state=int(time.time()))
ratio = int(1000 * imbalance / 10.)
print("----- init ----- for cluster_std", cluster_std, 'and imbalance ratio', imbalance / 10., 'positives', ratio)
X, y = make_imbalance(X, y, sampling_strategy={0: 1000, 1: ratio})
data_list_of_predictions = []
for baseL in base_list:
list_of_predictors = []
processes = []
for method in methods:
p = Process(target=train_and_predict, args=(X, y, baseL, method))
p.start()
processes.append(p)
for p in processes:
p.join()
for index, method in enumerate(methods):
with open('boundary_temp_preds/' + method, 'rb') as filehandle:
list_of_predictors.append(joblib.load(filehandle))
data_list_of_predictions.append(list_of_predictors)
temp_methods = list(methods)
scores = []
for clf in list_of_predictors:
scores.append(recall_score(y, clf.predict(X)))
zipped_list = zip(scores, temp_methods)
sorted_pairs = sorted(zipped_list, reverse=True)
if 'AdaCC' not in sorted_pairs[0][1]:
# print("failed for cluster_std", cluster_std, 'and imbalance ratio', imbalance, "best method=", sorted_pairs[0][1], sorted_pairs)
break
if (baseL == 200):
print("SUCCESS for cluster_std", cluster_std, 'and imbalance ratio', imbalance)
draw_results(dataset, base_list, data_list_of_predictions, methods, cluster_std, imbalance, X,y)
def draw_results(dataset, base_list, data_list_of_predictions, methods, cluster_std, imbalance, X,y):
datasets_list = [X,y,X,y]
figure = plt.figure(figsize=(30, 15))
plt.rcParams.update({'font.size': 16})
i = 1
for ds_cnt, ds in enumerate(base_list):
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#00FF00'])
ax = plt.subplot(len(base_list), len(data_list_of_predictions[ds_cnt]) + 1, i)
if ds_cnt == 0:
ax.set_title("Training data")
x_min, x_max = datasets_list[0][:, 0].min() - 1, datasets_list[0][:, 0].max() + 1
y_min, y_max = datasets_list[0][:, 1].min() - 1, datasets_list[0][:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1), np.arange(y_min, y_max, 0.1))
ax.scatter(datasets_list[0][:, 0], datasets_list[0][:, 1], c=datasets_list[1], cmap=cm_bright, alpha=0.6, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_ylabel('25 weak learners')
elif ds_cnt == 1:
ax.set_ylabel('50 weak learners')
elif ds_cnt == 2:
ax.set_ylabel('100 weak learners')
elif ds_cnt == 3:
ax.set_ylabel('200 weak learners')
# i += 1
# ax = plt.subplot(len(base_list), len(data_list_of_predictions[ds_cnt]) + 2, i)
# if ds_cnt == 0:
# ax.set_title("Testing data")
# xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1), np.arange(y_min, y_max, 0.1))
# ax.scatter(datasets_list[2][:, 0], datasets_list[2][:, 1], c=datasets_list[3], cmap=cm_bright, alpha=1,edgecolors='k')
# ax.set_xlim(xx.min(), xx.max())
# ax.set_ylim(yy.min(), yy.max())
# ax.set_xticks(())
# ax.set_yticks(())
i += 1
for name, clf in zip(methods, data_list_of_predictions[ds_cnt]):
# score = balanced_accuracy_score(datasets_list[3], clf.predict(datasets_list[2]))*100
score = recall_score(datasets_list[3], clf.predict(datasets_list[2])) * 100
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
ax = plt.subplot(len(base_list), len(data_list_of_predictions[ds_cnt]) + 1, i)
# ax = plt.subplot(len(base_list), len(data_list_of_predictions[ds_cnt]) + 2, i)
ax.contourf(xx, yy, Z, cmap=cm_bright, alpha=.9)
# Plot the testing points
ax.scatter(datasets_list[2][:, 0], datasets_list[2][:, 1], c=datasets_list[3], cmap=cm_bright,
edgecolors='k', alpha=.9)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.1f' % score).lstrip('0'), size=20, weight='bold', horizontalalignment='right')
i += 1
plt.tight_layout()
plt.show()
plt.savefig("Images/Boundaries/" + dataset + "_noise=" + str(cluster_std) + "_imb=" + str(imbalance) + ".png",
bbox_inches='tight', dpi=200)
def train_and_predict(X_train, y_train, base_learners, method):
ratios = [1., 2., 3., 4., 5., 6., 7, 8., 9., 10.]
ratios = [ 2., 4., 6., 8., 10.]
if method == 'AdaBoost':
clf = CostSensitiveAlgorithms(algorithm='AdaBoost', n_estimators=base_learners)
clf.fit(X_train, y_train)
elif 'AdaCC' in method:
clf = AdaCC(n_estimators=base_learners, algorithm=method)
clf.fit(X_train, y_train)
elif 'AdaMEC' in method:
counter_dict = Counter(list(y_train))
majority = max(counter_dict.items(), key=operator.itemgetter(1))[0]
minority = max(counter_dict.items(), key=operator.itemgetter(0))[0]
# ratios = [ 2., 4., 6., 8., 1.]
clf = AdaMEC(n_estimators=base_learners, algorithm=method)
clf.fit(X_train, y_train)
best_score = -1
best_idx = 0
for idx, cost in enumerate(ratios):
class_weight = {minority: 1, majority: cost / 10.}
clf.set_costs(class_weight)
score = f1_score(y_train, clf.predict(X_train))
# score = balanced_accuracy_score(y_train, clf.predict(X_train))
if best_score < score:
best_idx = idx
best_score = score
class_weight = {minority: 1, majority: ratios[best_idx] / 10.}
clf.set_costs(class_weight)
elif 'AdaMEC_Cal' in method:
counter_dict = Counter(list(y_train))
majority = max(counter_dict.items(), key=operator.itemgetter(1))[0]
minority = max(counter_dict.items(), key=operator.itemgetter(0))[0]
# ratios = [ 2., 4., 6., 8., 10.]
clf = AdaMEC_Cal(n_estimators=base_learners, algorithm=method)
clf.fit(X_train, y_train)
best_score = -1
best_idx = 0
for idx, cost in enumerate(ratios):
class_weight = {minority: 1, majority: cost / 10.}
clf.set_costs(y_train, class_weight)
score = f1_score(y_train, clf.predict(X_train))
# score = balanced_accuracy_score(y_train, clf.predict(X_train))
if best_score < score:
best_idx = idx
best_score = score
class_weight = {minority: 1, majority: ratios[best_idx] / 10.}
clf.set_costs(y_train, class_weight)
elif 'RareBoost' in method:
clf = RareBoost(n_estimators=base_learners)
clf.fit(X_train, y_train)
else:
counter_dict = Counter(list(y_train))
majority = max(counter_dict.items(), key=operator.itemgetter(1))[0]
minority = max(counter_dict.items(), key=operator.itemgetter(0))[0]
# ratios = [ 2., 4., 6., 8., 9.9]
processes = []
for ratio in ratios:
p = Process(target=train_competitors,
args=(X_train, y_train, base_learners, method, majority, minority, ratio))
p.start()
processes.append(p)
for p in processes:
p.join()
best_ratio = -1
predictor = None
for ratio in ratios:
if os.path.exists('boundary_temp_preds/' + method + str(ratio)):
with open('boundary_temp_preds/' + method + str(ratio), 'rb') as filehandle:
temp = pickle.load(filehandle)
if temp[0] > best_ratio:
best_ratio = temp[0]
predictor = temp[1]
if os.path.exists('boundary_temp_preds/' + method + str(ratio)):
os.remove('boundary_temp_preds/' + method + str(ratio))
with open('boundary_temp_preds/' + method, 'wb') as filehandle:
joblib.dump(predictor, filehandle)
return
with open('boundary_temp_preds/' + method, 'wb') as filehandle:
joblib.dump(clf, filehandle)
def train_competitors(X_train, y_train, base_learners, method, maj, min, ratio):
try:
out = []
if method == 'CGAda_Cal':
clf = CGAda_Cal(n_estimators=base_learners, algorithm=method, class_weight={min: 1, maj: ratio / 10.})
else:
clf = CostSensitiveAlgorithms(n_estimators=base_learners, algorithm=method,
class_weight={min: 1, maj: ratio / 10.})
clf.fit(X_train, y_train)
out.append(f1_score(y_train, clf.predict(X_train)))
out.append(clf)
with open('boundary_temp_preds/' + method + str(ratio), 'wb') as filehandle:
pickle.dump(out, filehandle)
except:
return
def run_eval_circle(dataset, base_list, methods):
for cluster_std in range(1, 11):
for fact in range(1, 10):
for imbalance in range(1, 11):
X, y = make_circles(n_samples=2000, noise=cluster_std/10., factor=fact/10., shuffle=True, random_state=100)
ratio = int(1000 * imbalance / 10.)
print("----- init ----- for cluster_std", cluster_std, 'and factor', fact,'and imbalance ratio', imbalance / 10., 'positives',
ratio)
X, y = make_imbalance(X, y, sampling_strategy={0: 1000, 1: ratio})
data_list_of_predictions = []
for baseL in base_list:
list_of_predictors = []
processes = []
for method in methods:
p = Process(target=train_and_predict, args=(X, y, baseL, method))
p.start()
processes.append(p)
for p in processes:
p.join()
for index, method in enumerate(methods):
with open('boundary_temp_preds/' + method, 'rb') as filehandle:
list_of_predictors.append(joblib.load(filehandle))
data_list_of_predictions.append(list_of_predictors)
temp_methods = list(methods)
scores = []
for clf in list_of_predictors:
scores.append(recall_score(y, clf.predict(X)))
zipped_list = zip(scores, temp_methods)
sorted_pairs = sorted(zipped_list, reverse=True)
if 'AdaCC' | |
from math import pi, cos, sin
import torch
from torch.autograd import Variable
import shapely
from shapely.geometry import Polygon, MultiPoint
import numpy as np
from mmdet.ops import box_iou_rotated_differentiable
import matplotlib.pyplot as plt
class Vector:
def __init__(self, x, y):
self.x = x
self.y = y
def __add__(self, v):
if not isinstance(v, Vector):
return NotImplemented
return Vector(self.x + v.x, self.y + v.y)
def __sub__(self, v):
if not isinstance(v, Vector):
return NotImplemented
return Vector(self.x - v.x, self.y - v.y)
def cross(self, v):
if not isinstance(v, Vector):
return NotImplemented
return self.x*v.y - self.y*v.x
class Line:
# ax + by + c = 0
def __init__(self, v1, v2):
self.a = v2.y - v1.y
self.b = v1.x - v2.x
self.c = v2.cross(v1)
def __call__(self, p):
return self.a*p.x + self.b*p.y + self.c
def intersection(self, other):
# See e.g. https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection#Using_homogeneous_coordinates
if not isinstance(other, Line):
return NotImplemented
w = self.a*other.b - self.b*other.a
return Vector(
(self.b*other.c - self.c*other.b)/w,
(self.c*other.a - self.a*other.c)/w
)
def rectangle_vertices(rotated_rectangle, radian=False):
cx, cy, w, h, r = rotated_rectangle
if radian==False:
angle = pi*r/180
else:
angle = r
dx = w/2
dy = h/2
dxcos = dx*torch.cos(angle)
dxsin = dx*torch.sin(angle)
dycos = dy*torch.cos(angle)
dysin = dy*torch.sin(angle)
# dxcos = dx*cos(angle)
# dxsin = dx*sin(angle)
# dycos = dy*cos(angle)
# dysin = dy*sin(angle)
return (
Vector(cx, cy) + Vector(-dxcos - -dysin, -dxsin + -dycos),
Vector(cx, cy) + Vector( dxcos - -dysin, dxsin + -dycos),
Vector(cx, cy) + Vector( dxcos - dysin, dxsin + dycos),
Vector(cx, cy) + Vector(-dxcos - dysin, -dxsin + dycos)
)
def rectangle_vertices_angle(rotated_rectangle):
cx, cy, w, h, r = rotated_rectangle
angle = r
dx = w/2
dy = h/2
dxcos = dx*torch.cos(angle)
dxsin = dx*torch.sin(angle)
dycos = dy*torch.cos(angle)
dysin = dy*torch.sin(angle)
return (
Vector(cx, cy) + Vector(-dxcos - -dysin, -dxsin + -dycos),
Vector(cx, cy) + Vector( dxcos - -dysin, dxsin + -dycos),
Vector(cx, cy) + Vector( dxcos - dysin, dxsin + dycos),
Vector(cx, cy) + Vector(-dxcos - dysin, -dxsin + dycos)
)
def intersection_area(r1, r2, radian=False):
# r1 and r2 are in (center, width, height, rotation) representation
# First convert these into a sequence of vertices
if len(r1) == 5 and len(r2) == 5:
rect1 = rectangle_vertices(r1, radian)
rect2 = rectangle_vertices(r2, radian)
elif len(r1) == 8 and len(r2) == 8:
rect1 = [Vector(vertex[0],vertex[1]) for vertex in r1.view(-1,2)]
rect2 = [Vector(vertex[0],vertex[1]) for vertex in r2.view(-1,2)]
# Use the vertices of the first rectangle as
# starting vertices of the intersection polygon.
intersection = rect1
# Loop over the edges of the second rectangle
for p, q in zip(rect2, rect2[1:] + rect2[:1]):
if len(intersection) <= 2:
break # No intersection
line = Line(p, q)
# Any point p with line(p) <= 0 is on the "inside" (or on the boundary),
# any point p with line(p) > 0 is on the "outside".
# Loop over the edges of the intersection polygon,
# and determine which part is inside and which is outside.
new_intersection = []
line_values = [line(t) for t in intersection]
for s, t, s_value, t_value in zip(
intersection, intersection[1:] + intersection[:1],
line_values, line_values[1:] + line_values[:1]):
if s_value <= 0:
new_intersection.append(s)
if s_value * t_value < 0:
# Points are on opposite sides.
# Add the intersection of the lines to new_intersection.
intersection_point = line.intersection(Line(s, t))
new_intersection.append(intersection_point)
intersection = new_intersection
# Calculate area
if len(intersection) <= 2:
return 0
return 0.5 * sum(p.x*q.y - p.y*q.x for p, q in
zip(intersection, intersection[1:] + intersection[:1]))
def convex_hull(points):
"""Computes the convex hull of a set of 2D points.
Input: an iterable sequence of (x, y) pairs representing the points.
Output: a list of vertices of the convex hull in counter-clockwise order,
starting from the vertex with the lexicographically smallest coordinates.
Implements Andrew's monotone chain algorithm. O(n log n) complexity.
"""
# Sort the points lexicographically (tuples are compared lexicographically).
# Remove duplicates to detect the case we have just one unique point.
points = sorted(set(points))
# Boring case: no points or a single point, possibly repeated multiple times.
if len(points) <= 1:
return points
# 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.
# Returns a positive value, if OAB makes a counter-clockwise turn,
# negative for clockwise turn, and zero if the points are collinear.
def cross(o, a, b):
return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])
# Build lower hull
lower = []
for p in points:
while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:
lower.pop()
lower.append(p)
# Build upper hull
upper = []
for p in reversed(points):
while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:
upper.pop()
upper.append(p)
# Concatenation of the lower and upper hulls gives the convex hull.
# Last point of each list is omitted because it is repeated at the beginning of the other list.
return lower[:-1] + upper[:-1]
def convex_hull_area(points):
convex_hull_vertex = convex_hull(points)
return 0.5 * sum(px*qy - py*qx for (px,py), (qx,qy) in
zip(convex_hull_vertex, convex_hull_vertex[1:] + convex_hull_vertex[:1]))
# Example: convex hull of a 10-by-10 grid.
assert convex_hull([(i//10, i%10) for i in range(100)]) == [(0, 0), (9, 0), (9, 9), (0, 9)]
def validate_clockwise_points(points):
"""
Validates that the points that the 4 points that dlimite a polygon are in clockwise order.
"""
if len(points) != 8:
raise Exception("Points list not valid." + str(len(points)))
point = [
[int(points[0]) , int(points[1])],
[int(points[2]) , int(points[3])],
[int(points[4]) , int(points[5])],
[int(points[6]) , int(points[7])]
]
edge = [
( point[1][0] - point[0][0])*( point[1][1] + point[0][1]),
( point[2][0] - point[1][0])*( point[2][1] + point[1][1]),
( point[3][0] - point[2][0])*( point[3][1] + point[2][1]),
( point[0][0] - point[3][0])*( point[0][1] + point[3][1])
]
summatory = edge[0] + edge[1] + edge[2] + edge[3]
if summatory>0:
return False
else:
return True
def clockwise_sort(points):
from functools import reduce
import operator
import math
coords = points.reshape((-1, 2))
center = torch.mean(coords, dim=0).reshape((1, 2))
sorted_coords = sorted(coords, key=lambda coord: -(-135 - math.degrees(math.atan2((coord - center)[0,1], (coord - center)[0,0]))) % 360)
sorted_coords = torch.cat(sorted_coords).reshape(-1)
# assert validate_clockwise_points(sorted_coords)
return sorted_coords
def GIoU_Rotated_Rectangle(Box1,Box2, radian=False):
"""Compute the differentiable giou between two arbitrary rotated rectangles.
GIoU(b1,b2) = IoU(b1,b2) - area(C\(b1 union b2))/area(C) = area(b1 intersection b2)/area(b1 union b2) - area(C\(b1 union b2))/area(C)
area(b1 union b2) = area(b1) + area(b2) - area(b1 intersection b2)
area(C) = area(convex_hull)
Input: Box1, Box2: (cx,cy,w,h,angle), torch.tensor, requires_grad = true
Output: GIoU: torch.tensor
"""
if len(Box1) == 5:
area_box1 = Box1[2]*Box1[3] # for rectangle
area_box2 = Box2[2]*Box2[3]
box1_vertex = [(vertex.x,vertex.y)for vertex in rectangle_vertices(Box1, radian)]
box2_vertex = [(vertex.x,vertex.y)for vertex in rectangle_vertices(Box2, radian)]
elif len(Box1) == 8:
# if not validate_clockwise_points(Box1):
Box1 = clockwise_sort(Box1)
# if not validate_clockwise_points(Box2):
Box2 = clockwise_sort(Box2)
box1_vertex = [(vertex[0],vertex[1]) for vertex in Box1.view(-1,2)]
box2_vertex = [(vertex[0],vertex[1]) for vertex in Box2.view(-1,2)]
area_box1 = 0.5 * sum(px*qy - py*qx for (px,py), (qx,qy) in
zip(box1_vertex, box1_vertex[1:] + box1_vertex[:1]))
area_box2 = 0.5 * sum(px*qy - py*qx for (px,py), (qx,qy) in
zip(box2_vertex, box2_vertex[1:] + box2_vertex[:1]))
area_box1_intersection_box2 = intersection_area(Box1, Box2, radian)
all_vertex = box1_vertex + box2_vertex
C = convex_hull_area(all_vertex)
area_box1_union_box2 = area_box1 + area_box2 - area_box1_intersection_box2
GIoU = area_box1_intersection_box2/area_box1_union_box2 - (C-area_box1_union_box2)/C
return GIoU
def IoU_Rotated_Rectangle(Box1,Box2, radian=False):
"""Compute the differentiable giou between two arbitrary rotated rectangles.
GIoU(b1,b2) = IoU(b1,b2) - area(C\(b1 union b2))/area(C) = area(b1 intersection b2)/area(b1 union b2) - area(C\(b1 union b2))/area(C)
area(b1 union b2) = area(b1) + area(b2) - area(b1 intersection b2)
area(C) = area(convex_hull)
Input: Box1, Box2: (cx,cy,w,h,angle), torch.tensor, requires_grad = true
Output: GIoU: torch.tensor
"""
if len(Box1) == 5:
area_box1 = Box1[2]*Box1[3] # for rectangle
area_box2 = Box2[2]*Box2[3]
box1_vertex = [(vertex.x,vertex.y)for vertex in rectangle_vertices(Box1, radian)]
box2_vertex = [(vertex.x,vertex.y)for vertex in rectangle_vertices(Box2, radian)]
elif len(Box1) == 8:
# if not validate_clockwise_points(Box1):
Box1 = clockwise_sort(Box1)
# if not validate_clockwise_points(Box2):
Box2 = clockwise_sort(Box2)
box1_vertex = [(vertex[0],vertex[1]) for vertex in Box1.view(-1,2)]
box2_vertex = [(vertex[0],vertex[1]) for vertex in Box2.view(-1,2)]
area_box1 = 0.5 * sum(px*qy - py*qx for (px,py), (qx,qy) in
zip(box1_vertex, box1_vertex[1:] + box1_vertex[:1]))
area_box2 = 0.5 * sum(px*qy - py*qx for (px,py), (qx,qy) in
zip(box2_vertex, box2_vertex[1:] + box2_vertex[:1]))
area_box1_intersection_box2 = intersection_area(Box1, Box2, | |
+ 3080*m.b168 + 8360*m.b169 + 8360*m.b170 + 8360*m.b171 + 8360*m.b172 + 8360*m.b173
+ 8360*m.b174 + 8360*m.b175 + 8360*m.b176 + 760*m.b177 + 760*m.b178 + 760*m.b179 + 760*m.b180
+ 760*m.b181 + 760*m.b182 + 760*m.b183 + 760*m.b184 + 1500*m.b185 + 1500*m.b186 + 1500*m.b187
+ 1500*m.b188 + 1500*m.b189 + 1500*m.b190 + 1500*m.b191 + 1500*m.b192 + 3750*m.b193
+ 3750*m.b194 + 3750*m.b195 + 3750*m.b196 + 3750*m.b197 + 3750*m.b198 + 3750*m.b199
+ 3750*m.b200 + 4620*m.b201 + 4620*m.b202 + 4620*m.b203 + 4620*m.b204 + 4620*m.b205
+ 4620*m.b206 + 4620*m.b207 + 4620*m.b208 + 770*m.b209 + 770*m.b210 + 770*m.b211 + 770*m.b212
+ 770*m.b213 + 770*m.b214 + 770*m.b215 + 770*m.b216 - 0.15*m.x409 - 0.4*m.x410 - 0.65*m.x411
+ 0.1406*m.x420 + 0.1406*m.x421 + 0.1406*m.x422 == 0)
m.c2 = Constraint(expr= m.b1 - m.b8 + m.b73 + m.b81 - m.b96 - m.b112 + m.x289 - m.x296 == 0)
m.c3 = Constraint(expr= - m.b1 + m.b2 + m.b74 + m.b82 - m.b89 - m.b105 - m.x289 + m.x290 == 0)
m.c4 = Constraint(expr= - m.b2 + m.b3 + m.b75 + m.b83 - m.b90 - m.b106 - m.x290 + m.x291 == 0)
m.c5 = Constraint(expr= - m.b3 + m.b4 + m.b76 + m.b84 - m.b91 - m.b107 - m.x291 + m.x292 == 0)
m.c6 = Constraint(expr= - m.b4 + m.b5 + m.b77 + m.b85 - m.b92 - m.b108 - m.x292 + m.x293 == 0)
m.c7 = Constraint(expr= - m.b5 + m.b6 + m.b78 + m.b86 - m.b93 - m.b109 - m.x293 + m.x294 == 0)
m.c8 = Constraint(expr= - m.b6 + m.b7 + m.b79 + m.b87 - m.b94 - m.b110 - m.x294 + m.x295 == 0)
m.c9 = Constraint(expr= - m.b7 + m.b8 + m.b80 + m.b88 - m.b95 - m.b111 - m.x295 + m.x296 == 0)
m.c10 = Constraint(expr= m.b25 - m.b32 - m.b80 + m.b89 + m.b97 - m.b120 + m.x297 - m.x304 == 0)
m.c11 = Constraint(expr= - m.b25 + m.b26 - m.b73 + m.b90 + m.b98 - m.b113 - m.x297 + m.x298 == 0)
m.c12 = Constraint(expr= - m.b26 + m.b27 - m.b74 + m.b91 + m.b99 - m.b114 - m.x298 + m.x299 == 0)
m.c13 = Constraint(expr= - m.b27 + m.b28 - m.b75 + m.b92 + m.b100 - m.b115 - m.x299 + m.x300 == 0)
m.c14 = Constraint(expr= - m.b28 + m.b29 - m.b76 + m.b93 + m.b101 - m.b116 - m.x300 + m.x301 == 0)
m.c15 = Constraint(expr= - m.b29 + m.b30 - m.b77 + m.b94 + m.b102 - m.b117 - m.x301 + m.x302 == 0)
m.c16 = Constraint(expr= - m.b30 + m.b31 - m.b78 + m.b95 + m.b103 - m.b118 - m.x302 + m.x303 == 0)
m.c17 = Constraint(expr= - m.b31 + m.b32 - m.b79 + m.b96 + m.b104 - m.b119 - m.x303 + m.x304 == 0)
m.c18 = Constraint(expr= m.b49 - m.b56 - m.b88 - m.b104 + m.b105 + m.b113 + m.x305 - m.x312 == 0)
m.c19 = Constraint(expr= - m.b49 + m.b50 - m.b81 - m.b97 + m.b106 + m.b114 - m.x305 + m.x306 == 0)
m.c20 = Constraint(expr= - m.b50 + m.b51 - m.b82 - m.b98 + m.b107 + m.b115 - m.x306 + m.x307 == 0)
m.c21 = Constraint(expr= - m.b51 + m.b52 - m.b83 - m.b99 + m.b108 + m.b116 - m.x307 + m.x308 == 0)
m.c22 = Constraint(expr= - m.b52 + m.b53 - m.b84 - m.b100 + m.b109 + m.b117 - m.x308 + m.x309 == 0)
m.c23 = Constraint(expr= - m.b53 + m.b54 - m.b85 - m.b101 + m.b110 + m.b118 - m.x309 + m.x310 == 0)
m.c24 = Constraint(expr= - m.b54 + m.b55 - m.b86 - m.b102 + m.b111 + m.b119 - m.x310 + m.x311 == 0)
m.c25 = Constraint(expr= - m.b55 + m.b56 - m.b87 - m.b103 + m.b112 + m.b120 - m.x311 + m.x312 == 0)
m.c26 = Constraint(expr= m.b9 - m.b16 + m.b121 + m.b129 - m.b144 - m.b160 + m.x313 - m.x320 == 0)
m.c27 = Constraint(expr= - m.b9 + m.b10 + m.b122 + m.b130 - m.b137 - m.b153 - m.x313 + m.x314 == 0)
m.c28 = Constraint(expr= - m.b10 + m.b11 + m.b123 + m.b131 - m.b138 - m.b154 - m.x314 + m.x315 == 0)
m.c29 = Constraint(expr= - m.b11 + m.b12 + m.b124 + m.b132 - m.b139 - m.b155 - m.x315 + m.x316 == 0)
m.c30 = Constraint(expr= - m.b12 + m.b13 + m.b125 + m.b133 - m.b140 - m.b156 - m.x316 + m.x317 == 0)
m.c31 = Constraint(expr= - m.b13 + m.b14 + m.b126 + m.b134 - m.b141 - m.b157 - m.x317 + m.x318 == 0)
m.c32 = Constraint(expr= - m.b14 + m.b15 + m.b127 + m.b135 - m.b142 - m.b158 - m.x318 + m.x319 == 0)
m.c33 = Constraint(expr= - m.b15 + m.b16 + m.b128 + m.b136 - m.b143 - m.b159 - m.x319 + m.x320 == 0)
m.c34 = Constraint(expr= m.b33 - m.b40 - m.b128 + m.b137 + m.b145 - m.b168 + m.x321 - m.x328 == 0)
m.c35 = Constraint(expr= - m.b33 + m.b34 - m.b121 + m.b138 + m.b146 - m.b161 - m.x321 + m.x322 == 0)
m.c36 = Constraint(expr= - m.b34 + m.b35 - m.b122 + m.b139 + m.b147 - m.b162 - m.x322 + m.x323 == 0)
m.c37 = Constraint(expr= - m.b35 + m.b36 - m.b123 + m.b140 + m.b148 - m.b163 - m.x323 + m.x324 == 0)
m.c38 = Constraint(expr= - m.b36 + m.b37 - m.b124 + m.b141 + m.b149 - m.b164 - m.x324 + m.x325 == 0)
m.c39 = Constraint(expr= - m.b37 + m.b38 - m.b125 + m.b142 + m.b150 - m.b165 - m.x325 + m.x326 == 0)
m.c40 = Constraint(expr= - m.b38 + m.b39 - m.b126 + m.b143 + m.b151 - m.b166 - m.x326 + m.x327 == 0)
m.c41 = Constraint(expr= - m.b39 + m.b40 - m.b127 + m.b144 + m.b152 - m.b167 - m.x327 + m.x328 == 0)
m.c42 = Constraint(expr= m.b57 - m.b64 - m.b136 - m.b152 + m.b153 + m.b161 + m.x329 - m.x336 == 0)
m.c43 = Constraint(expr= - m.b57 + m.b58 - m.b129 - m.b145 + m.b154 + m.b162 - m.x329 + m.x330 == 0)
m.c44 = Constraint(expr= - m.b58 + m.b59 - m.b130 - m.b146 + m.b155 + m.b163 - m.x330 + m.x331 == 0)
m.c45 = Constraint(expr= - m.b59 + m.b60 - m.b131 - m.b147 + m.b156 + m.b164 - m.x331 + m.x332 == 0)
m.c46 = Constraint(expr= - m.b60 + m.b61 - m.b132 - m.b148 + m.b157 + m.b165 - m.x332 + m.x333 == 0)
m.c47 = Constraint(expr= - m.b61 + m.b62 - m.b133 - m.b149 + m.b158 + m.b166 - m.x333 + m.x334 == 0)
m.c48 = Constraint(expr= - m.b62 + m.b63 - m.b134 - m.b150 + m.b159 + m.b167 - m.x334 + m.x335 == 0)
m.c49 = Constraint(expr= - m.b63 + m.b64 - m.b135 - m.b151 + m.b160 + m.b168 - m.x335 + m.x336 == 0)
m.c50 = Constraint(expr= m.b17 - m.b24 + m.b169 + m.b177 - m.b192 - m.b208 + m.x337 - m.x344 == 0)
m.c51 = Constraint(expr= - m.b17 + m.b18 + m.b170 + m.b178 - m.b185 - m.b201 - m.x337 + m.x338 == 0)
m.c52 = Constraint(expr= - m.b18 + m.b19 + m.b171 + m.b179 - m.b186 - m.b202 - m.x338 + m.x339 == 0)
m.c53 = Constraint(expr= - m.b19 + m.b20 + m.b172 + m.b180 - m.b187 - m.b203 - m.x339 + m.x340 == 0)
m.c54 = Constraint(expr= - m.b20 + m.b21 + m.b173 + m.b181 - m.b188 - m.b204 - m.x340 + m.x341 == 0)
m.c55 = Constraint(expr= - m.b21 + m.b22 + m.b174 + m.b182 - m.b189 - m.b205 - m.x341 + m.x342 == 0)
m.c56 = Constraint(expr= - m.b22 + m.b23 + m.b175 + m.b183 - m.b190 - m.b206 - m.x342 + m.x343 == 0)
m.c57 = Constraint(expr= - m.b23 + m.b24 + m.b176 + m.b184 - m.b191 - m.b207 - m.x343 + m.x344 == 0)
m.c58 = Constraint(expr= m.b41 - m.b48 - m.b176 + m.b185 + m.b193 - m.b216 + m.x345 - m.x352 == 0)
m.c59 = Constraint(expr= - m.b41 + m.b42 - m.b169 + m.b186 + m.b194 - m.b209 - m.x345 + m.x346 == 0)
m.c60 = Constraint(expr= - m.b42 + m.b43 - m.b170 + m.b187 + m.b195 - m.b210 - m.x346 | |
import arrow
import calendar
import copy
import datetime
from dateutil import parser
import math
import random
import plotly.express as px
import plotly.figure_factory as ff
import plotly.graph_objs as go
from plotly.subplots import make_subplots
from data_handler import DataHandler
from date_unit import DateUnit
data_handler = DataHandler()
labels={
"happy_j": "행복",
"happy_score": "행복",
"happy_score_j": "행복",
"attention_j": "집중",
"attention_score": "집중",
"attention_score_j": "집중",
"working_hours": "작업시간",
"productive_score": "생산성",
"repeat_task_score": "반복작업",
"sleep_score": "수면",
"total_score": "종합점수",
"task_hour": "작업시간",
"task_count": "작업수",
"category": "카테고리",
"start_hour": "시작시간",
"sleep_time": "수면시간",
"year": "연도",
"month": "월",
"weekday": "요일",
"year-month": "Date",
}
def _make_daily_schedule_fig(date):
timedelta = arrow.now() - arrow.get(date, tzinfo="Asia/Seoul")
days_diff = timedelta.days
if days_diff < 0:
pass # Can't handle future!"
elif days_diff == 0:
record_data = data_handler.read_record(redownload=True)
else:
record_data = data_handler.read_record(days=-days_diff)
activity_data = record_data.get("activity", {})
task_data = activity_data.get("task", [])
toggl_projects = [data["project"] for data in task_data]
colors = {"Empty": "#DEDEDE"}
for data in task_data:
colors[data["project"]] = data["color"]
base_date = date
tomorrow_base_date = arrow.get(base_date).shift(days=+1).format("YYYY-MM-DD")
DUMMY_RESOURCE = "Empty"
df = [ # Labeling scores
dict(Task=5, Start=base_date, Finish=base_date, Resource=DUMMY_RESOURCE),
dict(Task=4, Start=base_date, Finish=base_date, Resource=DUMMY_RESOURCE),
dict(Task=3, Start=base_date, Finish=base_date, Resource=DUMMY_RESOURCE),
dict(Task=2, Start=base_date, Finish=base_date, Resource=DUMMY_RESOURCE),
dict(Task=1, Start=base_date, Finish=base_date, Resource=DUMMY_RESOURCE),
]
# Labeling projects
for project in toggl_projects:
df.append(
dict(
Task=1,
Start=tomorrow_base_date,
Finish=tomorrow_base_date,
Resource=project,
)
)
for data in task_data:
task = {
"Task": data.get("score", 3),
"Start": arrow.get(data["start_time"]).format("YYYY-MM-DD HH:mm"),
"Finish": arrow.get(data["end_time"]).format("YYYY-MM-DD HH:mm"),
"Resource": data["project"],
"Description": data["description"],
}
df.append(task)
# fig = px.timeline(
# df,
# x_start="Start",
# x_end="End",
# y="Attention",
# color="Task",
# color_discrete_map=colors,
# hover_data={
# "Start": "|%Y-%m-%dT%H:%M",
# "End": True,
# "Attention": True,
# "Task": True,
# "Description": True,
# },
# title="Daily Schedule",
# width=1000,
# height=500,
# )
fig = ff.create_gantt(
df,
colors=colors,
index_col="Resource",
title="Daily Schedule",
group_tasks=True,
show_colorbar=True,
bar_width=0.3,
showgrid_x=True,
showgrid_y=True,
width=800,
height=500,
)
happy_data = activity_data.get("happy", [])
if len(happy_data) > 0:
xs = [arrow.get(d["time"]).format("YYYY-MM-DD HH:mm:ss") for d in happy_data]
ys = [d["score"] - 1 for d in happy_data]
scatter_trace = dict(
type="scatter",
mode="markers",
marker=dict(size=10, color="#439C59", line=dict(width=2)),
name="Happy",
x=xs,
y=ys,
)
fig.add_trace(scatter_trace)
# Annotations
up_index, down_index = -1, -1
up_ays = [i for i in range(30, 200, 20)]
down_ays = [-i for i in up_ays]
annotations = []
for d in fig["data"]:
if d["text"] is None:
continue
data_count = len(d["x"])
for i in range(0, data_count, 2):
text = d["text"][i]
if text is None:
continue
start_date = d["x"][i]
end_date = d["x"][i + 1]
start_score = d["y"][i]
end_score = d["y"][i + 1]
if start_date == end_date or start_score != end_score:
continue
description = d["text"][i]
project_names = list(colors.keys())
project_name = "Empty"
for p_name in project_names:
if description.startswith(p_name):
project_name = p_name
break
if type(start_date) != datetime.datetime:
start_date = parser.parse(start_date)
if type(end_date) != datetime.datetime:
end_date = parser.parse(end_date)
if start_score > 2: # large than 3
ays = down_ays
down_index += 1
index = down_index
else:
ays = up_ays
up_index += 1
index = up_index
ay = ays[index % len(ays)]
annotations.append(
go.layout.Annotation(
x=start_date + (end_date - start_date) / 2,
y=start_score,
xref="x",
yref="y",
text=description,
font=dict(family="Courier New, monospace", size=12, color="#fff"),
bgcolor=colors.get(project_name, "#DEDEDE"),
bordercolor="#666",
borderpad=2,
arrowhead=7,
ax=0,
ay=ay,
opacity=0.7,
)
)
fig.update_layout(annotations=annotations)
return fig
def _make_task_stacked_bar_fig(start_date, end_date, date_unit=DateUnit.DAILY):
colors = {"Empty": "#DEDEDE"}
base_dates, task_reports = data_handler.make_task_reports(
start_date,
end_date,
colors=colors,
date_unit=date_unit,
return_base_dates=True,
)
data = []
for category, task_report in task_reports.items():
differ_with_last_date = [f"{task_report[0]} (0)"]
for i in range(1, len(task_report)):
last_week_task_time = task_report[i - 1]
task_time = task_report[i]
differ_time = round(task_time - last_week_task_time, 2)
plus_and_minus = "+"
if differ_time < 0:
plus_and_minus = ""
differ_with_last_date.append(
f"{round(task_time, 2)} ({plus_and_minus}{differ_time})"
)
data.append(
go.Bar(
x=base_dates,
y=task_report,
name=category,
marker=dict(
color=colors.get(category, "#DEDEDE"),
line=dict(color="#222", width=1),
),
hovertext=differ_with_last_date,
opacity=0.8,
)
)
layout = go.Layout(
autosize=True,
barmode="stack",
title=f"{date_unit.value} Task Report (Stack Bar)",
xaxis={
"tickformat":'%m-%d %a'
}
)
fig = go.Figure(data=data, layout=layout)
return fig
def _make_pie_chart_fig(start_date, end_date):
start_date = arrow.get(start_date).replace(tzinfo='Asia/Seoul')
end_date = arrow.get(end_date).replace(tzinfo='Asia/Seoul')
categories = copy.deepcopy(data_handler.TASK_CATEGORIES)
categories.append("Empty")
task_reports = {}
colors = {"Empty": "#DEDEDE"}
sunday_dates = data_handler.get_weekly_base_of_range(start_date, end_date, weekday_value=data_handler.BASE_WEEKDAY)
for c in categories:
task_reports[c] = [0] * len(sunday_dates)
weekly_index = 0
for r in arrow.Arrow.range("day", start_date, end_date):
offset_day = (arrow.now() - r).days
record_data = data_handler.read_record(days=-offset_day)
for weekly_index, base_date in enumerate(sunday_dates):
days_diff = (base_date - r).days
if days_diff < 7 and days_diff >= 0:
break
activity_data = record_data.get("activity", {})
task_data = activity_data.get("task", [])
for t in task_data:
project = t["project"]
duration = (arrow.get(t["end_time"]) - arrow.get(t["start_time"])).seconds
duration_hours = round(duration / 60 / 60, 1)
task_reports[project][weekly_index] += duration_hours
# Color
if project not in colors and "color" in t:
colors[project] = t["color"]
pie_chart_count = weekly_index + 1
if start_date.date() == end_date.date():
COL_COUNT = 1
else:
# Default COL_COUNT
COL_COUNT = 4
ROW_COUNT = math.ceil(pie_chart_count / COL_COUNT)
pie_values = []
for i in range(pie_chart_count):
pie_values.append([])
subplots_specs = []
for r in range(ROW_COUNT):
row_specs = []
for c in range(COL_COUNT):
row_specs.append({"type": "domain"})
subplots_specs.append(row_specs)
fig = make_subplots(rows=ROW_COUNT, cols=COL_COUNT, specs=subplots_specs)
pie_colors = []
for category, task_values in task_reports.items():
for i, v in enumerate(task_values):
pie_values[i].append(v)
pie_colors.append(colors.get(category, "#DEDEDE"))
for i, pie_value in enumerate(pie_values):
col_index = int((i % COL_COUNT)) + 1
row_index = int((i / COL_COUNT)) + 1
fig.add_trace(
go.Pie(
labels=categories,
values=pie_value,
name=sunday_dates[i].format("MMM D"),
),
row=row_index,
col=col_index,
)
# Use `hole` to create a donut-like pie chart
fig.update_traces(
hole=.3, hoverinfo="label+percent+name", marker={"colors": pie_colors}
)
return fig
def _make_summary_line_fig(start_date, end_date):
start_date = arrow.get(start_date).replace(tzinfo='Asia/Seoul')
end_date = arrow.get(end_date).replace(tzinfo='Asia/Seoul')
summary_data = []
for r in arrow.Arrow.range("day", start_date, end_date):
offset_day = (arrow.now() - r).days
record_data = data_handler.read_record(days=-offset_day)
if "summary" not in record_data or "total" not in record_data["summary"]:
record_data = data_handler.read_record(days=-offset_day, redownload=True)
summary_data.append(record_data.get("summary", {}))
dates = data_handler.get_daily_base_of_range(start_date, end_date)
dates = [d.format("YYYY-MM-DD") for d in dates]
def get_score(data, category):
return data.get(category, 0)
attention_scores = [get_score(data, "attention") for data in summary_data]
happy_scores = [get_score(data, "happy") for data in summary_data]
productive_scores = [get_score(data, "productive") for data in summary_data]
sleep_scores = [get_score(data, "sleep") for data in summary_data]
repeat_task_scores = [get_score(data, "repeat_task") for data in summary_data]
total_scores = [get_score(data, "total") for data in summary_data]
names = ["attention", "happy", "productive", "sleep", "repeat_task", "total"]
ys = [
attention_scores,
happy_scores,
productive_scores,
sleep_scores,
repeat_task_scores,
total_scores,
]
# Create traces
data = []
for name, y in zip(names, ys):
data.append(go.Scatter(x=dates, y=y, mode="lines+markers", name=name))
layout = go.Layout(
autosize=True,
title="Summary Chart",
xaxis={
"tickformat":'%m-%d %a'
}
)
fig = go.Figure(data=data, layout=layout)
return fig
def _make_calendar_heatmap_fig(start_date, end_date):
start_date = arrow.get(start_date).replace(tzinfo='Asia/Seoul')
end_date = arrow.get(end_date).replace(tzinfo='Asia/Seoul')
categories = ["BAT", "Blog", "Diary", "Exercise"]
dates = []
z = []
for _ in categories:
z.append([])
for r in arrow.Arrow.range("day", start_date, end_date):
offset_day = (arrow.now() - r).days
habit_data = data_handler.read_habit(days=-offset_day)
for i, category in enumerate(categories):
category = category.lower()
do_category = f"do_{category}"
if do_category in habit_data: # metric_v0 format
habit_point = int(habit_data[do_category])
elif category in habit_data: # metric_v1 format
habit_point = int(habit_data[category])
else:
habit_point = 0
z[i].append(habit_point)
dates.append(r.format("YYYY-MM-DD"))
categories.append("All")
z_do_all = []
for i in range(len(dates)):
do_all = 0
for item in z:
do_all += item[i]
z_do_all.append(do_all)
z.append(z_do_all)
fig = go.Figure(
data=go.Heatmap(
z=z,
text=z,
x=dates,
y=categories,
colorscale=[[0, "#FFFFFF"], [1, "#19410a"]],
xgap=7,
ygap=7,
)
)
fig.update_layout(
title="BAT, Diary, Exercise per day",
height=300,
xaxis={
"tickformat": "%a-%m-%d",
"tickangle": 75,
"showticklabels": True,
"dtick": 86400000.0 * 1, # 1 day
},
)
return fig
def _make_summary_chart_and_corr(df):
df["year"] = df["time"].apply(lambda x: str(arrow.get(x).year))
df["month"] = df["time"].apply(lambda x: arrow.get(x).format("MM"))
df["day"] = df["time"].apply(lambda x: arrow.get(x).format("DDD"))
df = df.drop_duplicates(subset=["year", "day"])
summary_month_df = df.groupby(["year", "month"]).mean().reset_index()
bar_fig = px.bar(
summary_month_df,
x="month",
y="total_score",
labels=labels,
barmode="group",
color="year",
category_orders={
"category": data_handler.TASK_CATEGORIES,
"year": ["2017", "2018", "2019", "2020"],
"weekday": list(calendar.day_name),
},
)
month_values = list(range(1, 13))
bar_fig.update_xaxes({
"tickmode": "array",
"tickvals": month_values,
"ticktext": [f"{m}월" for m in month_values],
})
corr_df = df.corr()
corr_fig = px.imshow(
corr_df.to_numpy(),
x=list(corr_df.columns),
y=list(corr_df.columns),
color_continuous_scale=px.colors.sequential.Viridis,
range_color=[-1, 1],
)
return bar_fig, corr_fig
def _make_summary_line_chart(df):
df["year"] = df["time"].apply(lambda x: str(arrow.get(x).year))
df["month"] = df["time"].apply(lambda x: arrow.get(x).format("MM"))
df["day"] = df["time"].apply(lambda x: arrow.get(x).format("DDD"))
df = df.drop_duplicates(subset=["year", "day"])
# df = df.loc[df["year"] == "2019"]
summary_month_df = df.groupby(["year", "month"]).mean().reset_index()
summary_month_df["year-month"] = summary_month_df.apply(lambda x: f"{x.year}-{int(x.month):02d}", axis=1)
print(summary_month_df)
fig = px.line(
summary_month_df,
x="year-month",
y=["attention_score", "happy_score", "sleep_score", "productive_score", "total_score", "repeat_task_score"],
labels=labels,
category_orders={
"category": data_handler.TASK_CATEGORIES,
"year": ["2017", "2018", "2019", "2020"],
"weekday": list(calendar.day_name),
},
)
return fig
def _make_summary_exercise_all_score_bar_charts(df):
df["year"] = df["time"].apply(lambda x: str(arrow.get(x).year))
df["month"] = df["time"].apply(lambda x: arrow.get(x).format("MM"))
df["day"] = df["time"].apply(lambda x: arrow.get(x).format("DDD"))
df = df.drop_duplicates(subset=["year", "day"])
# df = df.loc[df["year"] == "2019"]
summary_month_df = df.groupby(["year", "month", "exercise"]).mean().reset_index()
summary_month_df["year-month"] = summary_month_df.apply(lambda x: f"{x.year}-{int(x.month):02d}", axis=1)
figs = []
for score_name in ["attention_score", "happy_score"]:
fig = | |
result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, (net1, net2))
def test_ipv4network_hash(self):
"""Test the IPv4Network.__hash__ method."""
n = 10**6
net = ip.IPv4Network('1.2.3.0/24')
time1, result1 = timefn(n, net.__hash__)
enet = eip.IPv4Network('1.2.3.0/24')
time2, result2 = timefn(n, enet.__hash__)
# results will differ, so don't compare them
results = (time1, None), (time2, None)
self.report_4n.report(fn_name(), n, results, net)
def test_ipv4network_str(self):
"""Test the IPv4Network.__str__ method."""
n = 10**5
net = ip.IPv4Network('1.2.3.0/24')
time1, result1 = timefn(n, net.__str__)
enet = eip.IPv4Network('1.2.3.0/24')
time2, result2 = timefn(n, enet.__str__)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, net)
def test_ipv4network_repr(self):
"""Test the IPv4Network.__repr__ method."""
n = 10**5
net = ip.IPv4Network('1.2.3.0/24')
time1, result1 = timefn(n, net.__repr__)
enet = eip.IPv4Network('1.2.3.0/24')
time2, result2 = timefn(n, enet.__repr__)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, net)
def test_ipv4network_compressed(self):
"""Test the IPv4Network.compressed method."""
n = 10**5
def fn(a):
return a.compressed
net = ip.IPv4Network('1.2.3.0/24')
time1, result1 = timefn(n, fn, net)
enet = eip.IPv4Network('1.2.3.0/24')
time2, result2 = timefn(n, fn, enet)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, net)
def test_ipv4network_exploded(self):
"""Test the IPv4Network.exploded method."""
n = 10**5
net = ip.IPv4Network('1.2.3.0/24')
time1, result1 = timefn(n, lambda: net.exploded)
enet = eip.IPv4Network('1.2.3.0/24')
time2, result2 = timefn(n, lambda: enet.exploded)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, net)
def test_ipv4network_reduce(self):
"""Test the IPv4Network.__reduce__ method."""
n = 10**5
addr = ip.IPv4Network('1.2.3.4')
time1, result1 = timefn(n, addr.__reduce__)
eaddr = eip.IPv4Network('1.2.3.4')
time2, result2 = timefn(n, eaddr.__reduce__)
# results will differ, so don't compare them
results = (time1, None), (time2, None)
self.report_4n.report(fn_name(), n, results, addr)
def test_ipv4network_reverse_pointer(self):
"""Test the IPv4Network.reverse_pointer method."""
n = 10**5
addr = ip.IPv4Network('1.2.3.4')
time1, result1 = timefn(n, lambda: addr.reverse_pointer)
eaddr = eip.IPv4Network('1.2.3.4')
time2, result2 = timefn(n, lambda: eaddr.reverse_pointer)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, addr)
def test_ipv4network_max_prefixlen(self):
"""Test the IPv4Network.max_prefixlen method."""
n = 10**6
net = ip.IPv4Network('1.2.3.0/24')
time1, result1 = timefn(n, lambda: net.max_prefixlen)
enet = eip.IPv4Network('1.2.3.0/24')
time2, result2 = timefn(n, lambda: enet.max_prefixlen)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, net)
def test_ipv4network_num_addresses(self):
"""Test the IPv4Network.num_addresses method."""
n = 10**6
net = ip.IPv4Network('1.2.3.0/24')
time1, result1 = timefn(n, lambda: net.num_addresses)
enet = eip.IPv4Network('1.2.3.0/24')
time2, result2 = timefn(n, lambda: enet.num_addresses)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, net)
def test_ipv4network_network_address(self):
"""Test the IPv4Network.network_address method."""
for n in (10**0, 10**6):
net = ip.IPv4Network('1.2.3.0/24')
time1, result1 = timefn(n, lambda: net.network_address)
enet = eip.IPv4Network('1.2.3.0/24')
time2, result2 = timefn(n, lambda: enet.network_address)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, net)
def test_ipv4network_broadcast_address(self):
"""Test the IPv4Network.broadcast_address method."""
for n in (10**0, 10**6):
net = ip.IPv4Network('1.2.3.0/24')
time1, result1 = timefn(n, lambda: net.broadcast_address)
enet = eip.IPv4Network('1.2.3.0/24')
time2, result2 = timefn(n, lambda: enet.broadcast_address)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, net)
def test_ipv4network_hostmask(self):
"""Test the IPv4Network.hostmask method."""
for n in (10**0, 10**6):
net = ip.IPv4Network('1.2.3.0/24')
time1, result1 = timefn(n, lambda: net.hostmask)
enet = eip.IPv4Network('1.2.3.0/24')
time2, result2 = timefn(n, lambda: enet.hostmask)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, net)
def test_ipv4network_netmask(self):
"""Test the IPv4Network.netmask method."""
for n in (10**0, 10**6):
net = ip.IPv4Network('1.2.3.0/24')
time1, result1 = timefn(n, lambda: net.netmask)
enet = eip.IPv4Network('1.2.3.0/24')
time2, result2 = timefn(n, lambda: enet.netmask)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, net)
def test_ipv4network_init_network_address(self):
"""Test the IPv4Network.network_address method."""
def f(n, net_class, *args):
net = net_class(*args)
for _ in range(n):
a = net.network_address
return a
for n in (1, 10, 51):
time1, result1 = timefn(1, f, n, ip.IPv4Network, 33)
time2, result2 = timefn(1, f, n, eip.IPv4Network, 33)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, n)
def test_ipv4network_init_broadcast_address(self):
"""Test the IPv4Network.broadcast_address method."""
def f(n, net_class, *args):
net = net_class(*args)
for _ in range(n):
a = net.broadcast_address
return a
for n in (1, 100, 184):
time1, result1 = timefn(1, f, n, ip.IPv4Network, 33)
time2, result2 = timefn(1, f, n, eip.IPv4Network, 33)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, n)
def test_ipv4network_init_hostmask(self):
"""Test the IPv4Network.hostmask method."""
def f(n, net_class, *args):
net = net_class(*args)
for _ in range(n):
a = net.hostmask
return a
for n in (1, 40, 80):
time1, result1 = timefn(1, f, n, ip.IPv4Network, 33)
time2, result2 = timefn(1, f, n, eip.IPv4Network, 33)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, n)
def test_ipv4network_init_netmask(self):
"""Test the IPv4Network.netmask method."""
def f(n, net_class, *args):
net = net_class(*args)
for _ in range(n):
a = net.netmask
return a
for n in (1, 30, 54):
time1, result1 = timefn(1, f, n, ip.IPv4Network, 33)
time2, result2 = timefn(1, f, n, eip.IPv4Network, 33)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, n)
def test_ipv4network_with_prefixlen(self):
"""Test the IPv4Network.with_prefixlen method."""
n = 10**5
net = ip.IPv4Network('1.2.3.0/24')
time1, result1 = timefn(n, lambda: net.with_prefixlen)
enet = eip.IPv4Network('1.2.3.0/24')
time2, result2 = timefn(n, lambda: enet.with_prefixlen)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, net)
def test_ipv4network_with_netmask(self):
"""Test the IPv4Network.with_netmask method."""
n = 10**5
net = ip.IPv4Network('1.2.3.0/24')
time1, result1 = timefn(n, lambda: net.with_netmask)
enet = eip.IPv4Network('1.2.3.0/24')
time2, result2 = timefn(n, lambda: enet.with_netmask)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, net)
def test_ipv4network_with_hostmask(self):
"""Test the IPv4Network.with_hostmask method."""
n = 10**5
net = ip.IPv4Network('1.2.3.0/24')
time1, result1 = timefn(n, lambda: net.with_hostmask)
enet = eip.IPv4Network('1.2.3.0/24')
time2, result2 = timefn(n, lambda: enet.with_hostmask)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, net)
def test_ipv4network_getitem(self):
"""Test the IPv4Network.__getitem__ method."""
n = 10**5
net = ip.IPv4Network('1.2.3.0/24')
time1, result1 = timefn(n, net.__getitem__, 11)
enet = eip.IPv4Network('1.2.3.0/24')
time2, result2 = timefn(n, enet.__getitem__, 11)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, net)
def test_ipv4network_iter(self):
"""Test the IPv4Network.__iter__ method."""
n = 10**3
net = ip.IPv4Network('1.2.0.0/24')
time1, result1 = timelist(n, net.__iter__)
enet = eip.IPv4Network('1.2.0.0/24')
time2, result2 = timelist(n, enet.__iter__)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, net)
def test_ipv4network_hosts(self):
"""Test the IPv4Network.hosts method."""
n = 10**3
net = ip.IPv4Network('1.2.3.0/24')
time1, result1 = timelist(n, net.hosts)
enet = eip.IPv4Network('1.2.3.0/24')
time2, result2 = timelist(n, enet.hosts)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, net)
def test_ipv4network_subnets(self):
"""Test the IPv4Network.subnets method."""
n = 10**5
net = ip.IPv4Network('1.2.3.0/24')
time1, result1 = timelist(n, net.subnets)
enet = eip.IPv4Network('1.2.3.0/24')
time2, result2 = timelist(n, enet.subnets)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, net)
def test_ipv4network_supernet(self):
"""Test the IPv4Network.supernet method."""
n = 10**5
net = ip.IPv4Network('1.2.3.0/24')
time1, result1 = timefn(n, net.supernet)
enet = eip.IPv4Network('1.2.3.0/24')
time2, result2 = timefn(n, enet.supernet)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, net)
def test_ipv4network_contains(self):
"""Test the IPv4Network.__contains__ method."""
n = 10**6
data = [
('1.2.3.0/24', '1.2.3.4'),
('1.2.3.4/30', '1.2.3.0'),
('3.2.3.0/24', '1.2.3.4'),
]
for n1, a1 in data:
net = ip.IPv4Network(n1)
addr = ip.IPv4Address(a1)
time1, result1 = timefn(n, net.__contains__, addr)
enet1 = eip.IPv4Network(n1)
eaddr = eip.IPv4Address(a1)
time2, result2 = timefn(n, enet1.__contains__, eaddr)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, '%s %s' % (net, addr))
def test_ipv4network_overlaps(self):
"""Test the IPv4Network.overlaps method."""
n = 10**5
data = [
('1.2.3.0/24', '1.2.3.4/30'),
('1.2.3.4/30', '1.2.3.0/24'),
('3.2.3.0/24', '1.2.3.4/30'),
]
for n1, n2 in data:
net1 = ip.IPv4Network(n1)
net2 = ip.IPv4Network(n2)
time1, result1 = timefn(n, net1.overlaps, net2)
enet1 = eip.IPv4Network(n1)
enet2 = eip.IPv4Network(n2)
time2, result2 = timefn(n, enet1.overlaps, enet2)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, '%s %s' % (net1, net2))
def test_ipv4network_address_exclude(self):
"""Test the IPv4Network.address_exclude method."""
n = 10**3
net1 = ip.IPv4Network('1.2.3.0/24')
net2 = ip.IPv4Network('1.2.3.4/30')
time1, result1 = timelist(n, net1.address_exclude, net2)
enet1 = eip.IPv4Network('1.2.3.0/24')
enet2 = eip.IPv4Network('1.2.3.4/30')
time2, result2 = timelist(n, enet1.address_exclude, enet2)
# results will differ, so don't compare them
results = (time1, None), (time2, None)
self.report_4n.report(fn_name(), n, results, '%s %s' % (net1, net2))
def test_ipv4network_subnet_of(self):
"""Test the IPv4Network.subnet_of method."""
n = 10**5
data = [
('10.0.0.0/8', '10.0.0.0/8'),
('10.0.0.0/8', '10.1.0.0/16'),
('10.1.0.0/16', '10.0.0.0/8'),
]
for args in data:
net1 = ip.IPv4Network(args[0])
net2 = ip.IPv4Network(args[1])
time1, result1 = timefn(n, net1.subnet_of, net2)
enet1 = eip.IPv4Network(args[0])
enet2 = eip.IPv4Network(args[1])
time2, result2 = timefn(n, enet1.subnet_of, enet2)
results = (time1, result1), (time2, result2)
self.report_4n.report(fn_name(), n, results, args)
def test_ipv4network_supernet_of(self):
"""Test the IPv4Network.supernet_of method."""
n = 10**5
data = [
('10.0.0.0/8', '10.0.0.0/8'),
('10.0.0.0/8', '10.1.0.0/16'),
('10.1.0.0/16', '10.0.0.0/8'),
]
for args in data:
net1 = ip.IPv4Network(args[0])
net2 = ip.IPv4Network(args[1])
time1, result1 | |
now.")
self._schedule_now(self._EVT_CREATE)
return create_id
else:
logger.warning("Request was rejected by scheduler.")
self.issue_err(err=self.ERR_REJECTED)
return None
except Exception as err:
logger.exception("Failed to issue create, due to error {}".format(err))
self.issue_err(err=self.ERR_CREATE)
def _get_next_create_id(self):
"""
Returns the next create ID and increments the counter
:return:
"""
create_id = self.next_creation_id
self.next_creation_id = self.next_creation_id + 1
logger.debug("Assigned creation id {} to request".format(create_id))
return create_id
def check_supported_request(self, creq):
"""
Performs resource and fulfillment checks to see if the provided request can be satisfied by the EGP.
:param creq: obj `~qlinklayer.toolbox.CQC_EPR_request_tuple
The EGP Request that we want to check
:return: Error code if request fails a check, otherwise 0
"""
# TODO other ID should be checked before the request is handed to the EGP
if creq.other_id == self.node.nodeID:
logger.error("Attempted to submit request for entanglement with self!")
return self.ERR_CREATE
if creq.other_id != self.get_otherID():
logger.error("Attempted to submit request for entanglement with unknown ID!")
return self.ERR_CREATE
# Check if we can achieve the minimum fidelity
if self.feu.get_max_fidelity() < creq.min_fidelity:
logger.error("Requested fidelity {} is too high to be satisfied, maximum achievable is {}"
.format(creq.min_fidelity, self.feu.get_max_fidelity()))
return self.ERR_UNSUPP
# Check if we can satisfy the request within the given time frame
attempt_latency = self.mhp_service.get_cycle_time(self.node)
# Minimum amount of time for measure directly corresponds to the cycle time, attempts can overlap
if creq.measure_directly:
min_time = attempt_latency * creq.num_pairs
# Minimum amount of time for stored entanglement requires obtaining info from midpoint, requests cannot overlap
else:
min_time = self.mhp.conn.full_cycle * creq.num_pairs
# Check against request, max_time of 0 means will wait indefinitely
if (min_time > creq.max_time or self.dqp.comm_delay > creq.max_time) and (creq.max_time != 0):
logger.error("Requested max time is too short")
return self.ERR_UNSUPP
return 0
# Queues a request
def _add_to_queue(self, egp_request, create_id):
"""
Stores the request in the distributed queue
:param egp_request: `~qlinklayer.egp.EGPRequest`
The request we want to store in the distributed queue
:param create_id: int
The assigned create ID of this request
:return: bool
Whether the add was successful.
"""
success = self.scheduler.add_request(egp_request, create_id)
return success
def _add_to_queue_callback(self, result):
"""
Callback to be given to the DQP since adding an item takes a round of communication. Issues an
error to higher layer protocols if there was a failure
:param result: tuple
Result of performing add of request, contains add error code, absolute qid and the request
:return:
"""
try:
# Store the request locally if DQP ADD was successful
status, qid, qseq, creq = result
if status == WFQDistributedQueue.DQ_OK:
logger.debug("Completed adding item to Distributed Queue, got result: {}".format(result))
# Otherwise bubble up the DQP error
else:
logger.error("Error occurred adding request to distributed queue!")
self.issue_err(err=status, create_id=creq.create_id)
except Exception:
logger.exception("Error occurred processing DQP add callback!")
self.issue_err(err=self.ERR_OTHER)
# Handler to be given to MHP as a stateProvider
def trigger_pair_mhp(self):
"""
Handler to be given to the MHP service that allows it to ask the scheduler
if there are any requests and to receive a request to process
"""
try:
# Request memory update when out of resources
if not self.scheduler.other_has_resources():
self.request_other_free_memory()
self.scheduler.inc_cycle()
# Get scheduler's next gen task
gen = self.scheduler.next()
if gen.flag:
# Keep track of used MHP cycles per request (data collection)
qid, qseq = self.scheduler.curr_aid
request = self.scheduler.distQueue.local_peek(qid, qseq).request
create_id = request.create_id
self._current_create_id = create_id
if create_id not in self._used_MHP_cycles:
self._used_MHP_cycles[create_id] = 1
else:
self._used_MHP_cycles[create_id] += 1
if gen.storage_q != gen.comm_q:
# Check that storage qubit is already initialized
if self._memory_needs_initialization(gen.storage_q):
self.initialize_storage(gen.storage_q)
return False
# If we are storing the qubit prevent additional attempts until we have a reply or have timed out
if not self.scheduler.is_handling_measure_directly():
suspend_time = self.scheduler.mhp_full_cycle
logger.debug("Next generation attempt after {}".format(suspend_time))
self.scheduler.suspend_generation(suspend_time)
# Store the gen for pickup by mhp
self.mhp_service.put_ready_data(self.node.nodeID, gen)
else:
# Keep track of used MHP cycles per request (data collection)
if self._current_create_id is not None:
if self.scheduler.suspended() or self.scheduler.qmm.is_busy():
self._used_MHP_cycles[self._current_create_id] += 1
return gen.flag
except Exception:
logger.exception("Error occurred when triggering MHP!")
self.issue_err(err=self.ERR_OTHER)
return False
def initialize_storage(self, qubit_id):
"""
Initializes the qubit if possible, if so, suspends the scheduler
:param qubit_id:
:return:
"""
logger.debug("Node {} : Memory qubit {} needs initalization".format(self.node.name, qubit_id))
if self.scheduler.suspended():
logger.debug("Node {} : Scheduler is suspended".format(self.node.name))
return
elif self.qmm.is_busy():
logger.debug("Node {} : QMM is busy".format(self.node.name))
return
elif self.qmm.reserved_qubits[qubit_id]:
logger.debug("Node {} : Qubit ID {} is reserved".format(self.node.name, qubit_id))
return
else:
logger.debug("Node {} : Initializing qubit {} in cycle {}".format(self.node.name, qubit_id,
self.scheduler.mhp_cycle_number))
if self._cycles_per_initialization[qubit_id] is not None:
this_cycle = self.scheduler.mhp_cycle_number
init_delay_cycles = ceil(self.max_memory_init_delay / self.scheduler.mhp_cycle_period)
dec_cycles = self._cycles_per_initialization[qubit_id]
self._next_init_cycle[qubit_id] = ((this_cycle + init_delay_cycles + dec_cycles)
% self.scheduler.max_mhp_cycle_number)
else:
self._next_init_cycle[qubit_id] = None
# self.init_info = qubit_id
# self._last_memory_init[qubit_id] = None
prgm = QuantumProgram()
q = prgm.get_qubit_indices(1)[0]
prgm.apply(INSTR_INIT, q)
self.scheduler.suspend_generation(self.max_memory_init_delay)
# self.reset_program_callback()
self._current_prgm = prgm
self._current_prgm_name = self.OP_INIT
self.node.qmem.execute_program(prgm, qubit_mapping=[qubit_id])
def _handle_program_failure(self):
"""
Just prints the error of a program failed
:return:
"""
logger.error("Node {} : QuantumProgram failed because {}".format(self.node.name,
self.node.qmem.failure_exception))
def _handle_program_done(self):
"""
Handles the finish of a measure, init or swap program
:param operation: str
"meas", "move" or "init"
:return:
"""
if self._current_prgm_name == self.OP_MEAS:
logger.debug("Node {} : Handling meas program done".format(self.node.name))
self._handle_measurement_outcome()
elif self._current_prgm_name == self.OP_MOVE:
logger.debug("Node {} : Handling move program done".format(self.node.name))
self._handle_move_completion()
elif self._current_prgm_name == self.OP_INIT:
logger.debug("Node {} : Handling init program done".format(self.node.name))
self._handle_init_completion()
else:
raise ValueError("Unknown operation")
def _handle_init_completion(self):
"""
Handles the completion of an initialization
:return:
"""
pass
def _memory_needs_initialization(self, qubit_id):
"""
Checks if qubit should be initialized to be ready to be used.
:param qubit_id:
:return:
"""
# Has the qubit been initialized and not used
if qubit_id not in self._next_init_cycle:
return True
else:
# Does this qubit have infinite decoherence time?
if self._cycles_per_initialization[qubit_id] is None:
return False
else:
curr_cycle = self.scheduler.mhp_cycle_number
if self.scheduler._compare_mhp_cycle(self._next_init_cycle[qubit_id], curr_cycle) <= 0:
return True
else:
return False
# Callback handler to be given to MHP so that EGP updates when request is satisfied
def handle_reply_mhp(self, result):
"""
Handler for processing the reply from the MHP service
:param result: tuple
Contains the processing result information from attempting entanglement generation
"""
try:
logger.debug("Node {} : Handling MHP Reply: {} in cycle {}".format(self.node.name, result,
self.scheduler.mhp_cycle_number))
# Otherwise we are ready to process the reply now
midpoint_outcome, mhp_seq, aid, proto_err = self._extract_mhp_reply(result=result)
self._remove_old_measurement_results(aid)
# Check if an error occurred while processing a request
if proto_err:
logger.error("Protocol error occured in MHP: {}".format(proto_err))
self._handle_mhp_err(result)
# Check if this aid may have been expired or timed out while awaiting reply
elif not self.scheduler.has_request(aid=aid):
self.clear_if_handling_emission(aid)
# Update the MHP Sequence number as necessary
if midpoint_outcome in [1, 2]:
logger.debug("Updating MHP Seq")
self._process_mhp_seq(mhp_seq, aid)
if self.scheduler.previous_request(aid=aid):
logger.debug("Got MHP Reply containing aid {} a previous request!".format(aid))
else:
# If we have never seen this aid before we should throw a warning
logger.warning("Got MHP reply containing aid {} for an unknown request".format(aid))
# Check if the reply came in before our emission handling completed
elif self.emission_handling_in_progress == self.EMIT_HANDLER_CK:
raise RuntimeError("Shouldn't be handling CK emit")
# if midpoint_outcome == 0:
# self.clear_if_handling_emission(aid)
# else:
# print("suspended cycles = {}".format(self.scheduler.num_suspended_cycles))
# self.mhp_reply = result
# Check if we have results for this aid
elif self.emission_handling_in_progress == self.EMIT_HANDLER_MD and len(self.measurement_results[aid]) == 0:
self.mhp_reply = result
# Otherwise this response is associated with a generation attempt where emission handling is finished
# and we are ready to process
else:
# No entanglement generated
if midpoint_outcome == 0:
logger.debug("Failed to produce entanglement with other node")
creq = self.scheduler.get_request(aid)
if creq is None:
logger.error("Request not found!")
self.issue_err(err=self.ERR_OTHER)
else:
# Resume generation
self.scheduler.resume_generation()
# Free the resources for the next attempt
self.scheduler.free_gen_resources(aid)
# If handling a measure directly request we need to throw away the measurement result
if creq.measure_directly and self.scheduler.has_request(aid):
try:
ecycle, m, basis = self.measurement_results[aid].pop(0)
logger.debug("Removing measurement outcome {} in basis {} for aid {} (failed attempt)"
.format(m, basis, aid))
except IndexError:
pass
elif midpoint_outcome in [1, 2]:
# Check if we need to time out this request
logger.debug("Processing MHP SEQ {}".format(mhp_seq))
valid_mhp = self._process_mhp_seq(mhp_seq, aid)
if valid_mhp:
logger.debug("Handling reply corresponding to absolute queue id {}".format(aid))
self._handle_generation_reply(midpoint_outcome, mhp_seq, aid)
logger.debug("Finished handling MHP | |
replicas[0]),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('sku.tier', result['sku']['tier']),
JMESPathCheck('sku.name', result['sku']['name']),
JMESPathCheck('replicationRole', 'Replica'),
JMESPathCheck('sourceServerId', result['id']),
JMESPathCheck('replicaCapacity', '0')])
time.sleep(20 * 60)
def _test_flexible_server_replica_list(self, database_engine, resource_group, master_server):
self.cmd('{} flexible-server replica list -g {} --name {}'
.format(database_engine, resource_group, master_server),
checks=[JMESPathCheck('length(@)', 1)])
def _test_flexible_server_replica_stop(self, database_engine, resource_group, master_server, replicas):
result = self.cmd('{} flexible-server show -g {} --name {} '
.format(database_engine, resource_group, master_server),
checks=[JMESPathCheck('replicationRole', 'Source')]).get_output_in_json()
self.cmd('{} flexible-server replica stop-replication -g {} --name {} --yes'
.format(database_engine, resource_group, replicas[0]),
checks=[
JMESPathCheck('name', replicas[0]),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('replicationRole', 'None'),
JMESPathCheck('sourceServerId', ''),
JMESPathCheck('replicaCapacity', result['replicaCapacity'])])
# test show server with replication info, master becomes normal server
self.cmd('{} flexible-server show -g {} --name {}'
.format(database_engine, resource_group, master_server),
checks=[
JMESPathCheck('replicationRole', 'None'),
JMESPathCheck('sourceServerId', ''),
JMESPathCheck('replicaCapacity', result['replicaCapacity'])])
def _test_flexible_server_replica_delete_source(self, database_engine, resource_group, master_server, replicas):
result = self.cmd('{} flexible-server show -g {} --name {} '
.format(database_engine, resource_group, master_server),
checks=[JMESPathCheck('replicationRole', 'None')]).get_output_in_json()
self.cmd('{} flexible-server replica create -g {} --replica-name {} --source-server {}'
.format(database_engine, resource_group, replicas[1], master_server),
checks=[
JMESPathCheck('name', replicas[1]),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('sku.name', result['sku']['name']),
JMESPathCheck('replicationRole', 'Replica'),
JMESPathCheck('sourceServerId', result['id']),
JMESPathCheck('replicaCapacity', '0')])
self.cmd('{} flexible-server delete -g {} --name {} --yes'
.format(database_engine, resource_group, master_server), checks=NoneCheck())
self.cmd('{} flexible-server show -g {} --name {}'
.format(database_engine, resource_group, replicas[1]),
checks=[
JMESPathCheck('replicationRole', 'None'),
JMESPathCheck('sourceServerId', ''),
JMESPathCheck('replicaCapacity', result['replicaCapacity'])])
def _test_flexible_server_replica_delete(self, database_engine, resource_group, replicas):
self.cmd('{} flexible-server delete -g {} --name {} --yes'
.format(database_engine, resource_group, replicas[0]), checks=NoneCheck())
self.cmd('{} flexible-server delete -g {} --name {} --yes'
.format(database_engine, resource_group, replicas[1]), checks=NoneCheck())
self.cmd('az group delete --name {} --yes --no-wait'.format(resource_group), checks=NoneCheck())
class FlexibleServerVnetMgmtScenarioTest(ScenarioTest):
def _test_flexible_server_vnet_mgmt_existing_supplied_subnetid(self, database_engine, resource_group):
# flexible-server create
if self.cli_ctx.local_context.is_on:
self.cmd('local-context off')
if database_engine == 'postgres':
location = self.postgres_location
elif database_engine == 'mysql':
location = self.mysql_location
server = 'testvnetserver10' + database_engine
# Scenario : Provision a server with supplied Subnet ID that exists, where the subnet is not delegated
subnet_id = self.cmd('network vnet subnet show -g {rg} -n default --vnet-name {vnet}').get_output_in_json()['id']
# create server - Delegation should be added.
self.cmd('{} flexible-server create -g {} -n {} --subnet {} -l {}'
.format(database_engine, resource_group, server, subnet_id, location))
# flexible-server show to validate delegation is added to both the created server
show_result_1 = self.cmd('{} flexible-server show -g {} -n {}'
.format(database_engine, resource_group, server)).get_output_in_json()
self.assertEqual(show_result_1['delegatedSubnetArguments']['subnetArmResourceId'], subnet_id)
# delete server
self.cmd('{} flexible-server delete -g {} -n {} --yes'.format(database_engine, resource_group, server),
checks=NoneCheck())
# This is required because the delegations cannot be removed until the server is completely deleted. In the current implementation, there is a delay. Hence, the wait
time.sleep(20 * 60)
def _test_flexible_server_vnet_mgmt_non_existing_supplied_subnetid(self, database_engine, resource_group):
# flexible-server create
if self.cli_ctx.local_context.is_on:
self.cmd('local-context off')
if database_engine == 'postgres':
location = self.postgres_location
elif database_engine == 'mysql':
location = self.mysql_location
vnet_name_2 = 'clitestvnet1'
subnet_name_2 = 'clitestsubnet1'
server = 'testvnetserver2' + database_engine
# Scenario : Provision a server with supplied Subnet ID whose vnet exists, but subnet does not exist and the vnet does not contain any other subnet
# The subnet name is the default created one, not the one in subnet ID
self.cmd('{} flexible-server create -g {} -n {} -l {} --subnet {}'
.format(database_engine, resource_group, server, location, '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}'.format(self.get_subscription_id(), resource_group, vnet_name_2, subnet_name_2)))
# flexible-server show to validate delegation is added to both the created server
show_result = self.cmd('{} flexible-server show -g {} -n {}'.format(database_engine, resource_group, server)).get_output_in_json()
self.assertEqual(show_result['delegatedSubnetArguments']['subnetArmResourceId'],
'/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}'.format(
self.get_subscription_id(), resource_group, vnet_name_2, subnet_name_2))
# Cleanup
self.cmd('{} flexible-server delete -g {} -n {} --yes'.format(database_engine, resource_group, server), checks=NoneCheck())
# This is required because the delegations cannot be removed until the server is completely deleted. In the current implementation, there is a delay. Hence, the wait
time.sleep(20 * 60)
def _test_flexible_server_vnet_mgmt_supplied_vnet(self, database_engine, resource_group):
# flexible-server create
if self.cli_ctx.local_context.is_on:
self.cmd('local-context off')
if database_engine == 'postgres':
location = self.postgres_location
elif database_engine == 'mysql':
location = self.mysql_location
vnet_name = 'clitestvnet2'
address_prefix = '10.0.0.0/16'
subnet_prefix_1 = '10.0.0.0/24'
vnet_name_2 = 'clitestvnet3'
# flexible-servers
servers = ['testvnetserver3' + database_engine, 'testvnetserver4' + database_engine]
# Case 1 : Provision a server with supplied Vname that exists.
# create vnet and subnet. When vnet name is supplied, the subnet created will be given the default name.
vnet_result = self.cmd('network vnet create -n {} -g {} -l {} --address-prefix {} --subnet-name {} --subnet-prefix {}'
.format(vnet_name, resource_group, location, address_prefix, 'Subnet' + servers[0], subnet_prefix_1)).get_output_in_json()
# create server - Delegation should be added.
self.cmd('{} flexible-server create -g {} -n {} --vnet {} -l {}'
.format(database_engine, resource_group, servers[0], vnet_result['newVNet']['name'], location))
# Case 2 : Provision a server with a supplied Vname that does not exist.
self.cmd('{} flexible-server create -g {} -n {} --vnet {} -l {}'
.format(database_engine, resource_group, servers[1], vnet_name_2, location))
# flexible-server show to validate delegation is added to both the created server
show_result_1 = self.cmd('{} flexible-server show -g {} -n {}'
.format(database_engine, resource_group, servers[0])).get_output_in_json()
show_result_2 = self.cmd('{} flexible-server show -g {} -n {}'
.format(database_engine, resource_group, servers[1])).get_output_in_json()
self.assertEqual(show_result_1['delegatedSubnetArguments']['subnetArmResourceId'],
'/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}'.format(
self.get_subscription_id(), resource_group, vnet_name, 'Subnet' + servers[0]))
self.assertEqual(show_result_2['delegatedSubnetArguments']['subnetArmResourceId'],
'/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}'.format(
self.get_subscription_id(), resource_group, vnet_name_2, 'Subnet' + servers[1]))
# delete all servers
self.cmd('{} flexible-server delete -g {} -n {} --yes'.format(database_engine, resource_group, servers[0]),
checks=NoneCheck())
self.cmd('{} flexible-server delete -g {} -n {} --yes'.format(database_engine, resource_group, servers[1]),
checks=NoneCheck())
time.sleep(20 * 60)
def _test_flexible_server_vnet_mgmt_supplied_vname_and_subnetname(self, database_engine, resource_group, virtual_network):
# flexible-server create
if self.cli_ctx.local_context.is_on:
self.cmd('local-context off')
vnet_name_2 = 'clitestvnet6'
if database_engine == 'postgres':
location = self.postgres_location
elif database_engine == 'mysql':
location = self.mysql_location
# flexible-servers
servers = ['testvnetserver5' + database_engine, 'testvnetserver6' + database_engine]
# Case 1 : Provision a server with supplied Vname and subnet name that exists.
# create vnet and subnet. When vnet name is supplied, the subnet created will be given the default name.
subnet_id = self.cmd('network vnet subnet show -g {rg} -n default --vnet-name {vnet}').get_output_in_json()[
'id']
# create server - Delegation should be added.
self.cmd('{} flexible-server create -g {} -n {} --vnet {} -l {} --subnet default'
.format(database_engine, resource_group, servers[0], virtual_network, location))
# Case 2 : Provision a server with a supplied Vname and subnet name that does not exist.
self.cmd('{} flexible-server create -g {} -n {} -l {} --vnet {}'
.format(database_engine, resource_group, servers[1], location, vnet_name_2))
# flexible-server show to validate delegation is added to both the created server
show_result_1 = self.cmd('{} flexible-server show -g {} -n {}'
.format(database_engine, resource_group, servers[0])).get_output_in_json()
show_result_2 = self.cmd('{} flexible-server show -g {} -n {}'
.format(database_engine, resource_group, servers[1])).get_output_in_json()
self.assertEqual(show_result_1['delegatedSubnetArguments']['subnetArmResourceId'], subnet_id)
self.assertEqual(show_result_2['delegatedSubnetArguments']['subnetArmResourceId'],
'/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}'.format(
self.get_subscription_id(), resource_group, vnet_name_2, 'Subnet' + servers[1]))
# delete all servers
self.cmd('{} flexible-server delete -g {} -n {} --yes'.format(database_engine, resource_group, servers[0]),
checks=NoneCheck())
self.cmd('{} flexible-server delete -g {} -n {} --yes'.format(database_engine, resource_group, servers[1]),
checks=NoneCheck())
time.sleep(20 * 60)
def _test_flexible_server_vnet_mgmt_supplied_subnet_id_in_different_rg(self, database_engine, resource_group_1, resource_group_2):
# flexible-server create
if self.cli_ctx.local_context.is_on:
self.cmd('local-context off')
if database_engine == 'postgres':
location = self.postgres_location
elif database_engine == 'mysql':
location = self.mysql_location
vnet_name = 'clitestvnet7'
subnet_name = 'clitestsubnet7'
address_prefix = '172.16.17.32/16'
subnet_prefix_1 = '172.16.17.32/24'
vnet_name_2 = 'clitestvnet8'
subnet_name_2 = 'clitestsubnet8'
# flexible-servers
servers = ['testvnetserver7' + database_engine, 'testvnetserver8' + database_engine]
# Case 1 : Provision a server with supplied subnetid that exists in a different RG
# create vnet and subnet.
vnet_result = self.cmd(
'network vnet create -n {} -g {} -l {} --address-prefix {} --subnet-name {} --subnet-prefix {}'
.format(vnet_name, resource_group_1, location, address_prefix, subnet_name,
subnet_prefix_1)).get_output_in_json()
# create server - Delegation should be added.
self.cmd('{} flexible-server create -g {} -n {} --subnet {} -l {}'
.format(database_engine, resource_group_2, servers[0], vnet_result['newVNet']['subnets'][0]['id'], location))
# Case 2 : Provision a server with supplied subnetid that has a different RG in the ID but does not exist. The vnet and subnet is then created in the RG of the server
self.cmd('{} flexible-server create -g {} -n {} -l {} --subnet {}'
.format(database_engine, resource_group_2, servers[1], location, '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}'.format(
self.get_subscription_id(), resource_group_1, vnet_name_2, subnet_name_2)))
# flexible-server show to validate delegation is added to both the created server
show_result_1 = self.cmd('{} flexible-server show -g {} -n {}'
.format(database_engine, resource_group_2, servers[0])).get_output_in_json()
show_result_2 = self.cmd('{} flexible-server show -g {} -n {}'
.format(database_engine, resource_group_2, servers[1])).get_output_in_json()
self.assertEqual(show_result_1['delegatedSubnetArguments']['subnetArmResourceId'],
'/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}'.format(
self.get_subscription_id(), resource_group_1, vnet_name, subnet_name))
self.assertEqual(show_result_2['delegatedSubnetArguments']['subnetArmResourceId'],
'/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}'.format(
self.get_subscription_id(), resource_group_1, vnet_name_2, subnet_name_2))
# delete all servers
self.cmd('{} flexible-server delete -g {} -n {} --yes'.format(database_engine, resource_group_2, servers[0]),
checks=NoneCheck())
self.cmd('{} flexible-server delete -g {} -n {} --yes'.format(database_engine, resource_group_2, servers[1]),
checks=NoneCheck())
time.sleep(20 * 60)
class FlexibleServerPublicAccessMgmtScenarioTest(ScenarioTest):
def _test_flexible_server_public_access_mgmt(self, database_engine, resource_group):
# flexible-server create
if self.cli_ctx.local_context.is_on:
self.cmd('local-context off')
if database_engine == 'postgres':
| |
left=True, right=True)
axs[0,1].set_ylim(-.2,.2)
axs[0,1].set_xlim(1.5,22.5)
axs[0,1].set_title('60$\degree$N-40$\degree$N', fontsize=14)
###########################################################################################################################
axs[0,2].plot(lt_hours, ERA_5_cycles[-3] - np.nanmean(ERA_5_cycles[-3]), linewidth=4, color='black')
axs[0,2].plot(lt_hours, ERA_5_colocation_cycles[-3] - np.nanmean(ERA_5_colocation_cycles[-3]), linewidth=2, color='red')
axs[0,2].plot(lt_hours, ERA_5_colocations_no_removal[-3] - np.nanmean(ERA_5_colocations_no_removal[-3]), linewidth=2, color='blue', label='Colocations no mean removal')
axs[0,2].yaxis.set_minor_locator(MultipleLocator(0.05))
axs[0,2].xaxis.set_minor_locator(MultipleLocator(3))
axs[0,2].tick_params(direction='in', length=8, width=2, colors='black', bottom=True, top=True,
left=True, right=True, labelsize=15)
axs[0,2].set_xticks(ticks=[3, 9, 15, 21])
axs[0,2].tick_params(which='minor', direction='in', length=4, width=1.5, left=True, right=True)
axs[0,2].set_ylim(-.2,.2)
axs[0,2].set_xlim(1.5,22.5)
axs[0,2].set_title('40$\degree$N-20$\degree$N', fontsize=14)
###########################################################################################################################
axs[0,3].plot(lt_hours, ERA_5_cycles[-4] - np.nanmean(ERA_5_cycles[-4]), linewidth=4, color='black')
axs[0,3].plot(lt_hours, ERA_5_colocation_cycles[-4] - np.nanmean(ERA_5_colocation_cycles[-4]), linewidth=2, color='red')
axs[0,3].plot(lt_hours, ERA_5_colocations_no_removal[-4] - np.nanmean(ERA_5_colocations_no_removal[-4]), linewidth=2, color='blue', label='Colocations no mean removal')
axs[0,3].yaxis.set_minor_locator(MultipleLocator(0.05))
axs[0,3].xaxis.set_minor_locator(MultipleLocator(3))
axs[0,3].tick_params(direction='in', length=8, width=2, colors='black', bottom=True, top=True,
left=True, right=True, labelsize=15)
axs[0,3].set_xticks(ticks=[3, 9, 15, 21])
axs[0,3].tick_params(which='minor', direction='in', length=4, width=1.5, left=True, right=True)
axs[0,3].set_ylim(-.2,.2)
axs[0,3].set_xlim(1.5,22.5)
axs[0,3].set_title('20$\degree$N-10$\degree$N', fontsize=14)
###########################################################################################################################
axs[0,4].plot(lt_hours, ERA_5_cycles[-5] - np.nanmean(ERA_5_cycles[-5]), linewidth=4, color='black')
axs[0,4].plot(lt_hours, ERA_5_colocation_cycles[-5] - np.nanmean(ERA_5_colocation_cycles[-5]), linewidth=2, color='red')
axs[0,4].plot(lt_hours, ERA_5_colocations_no_removal[-5] - np.nanmean(ERA_5_colocations_no_removal[-5]), linewidth=2, color='blue', label='Colocations no mean removal')
axs[0,4].yaxis.set_minor_locator(MultipleLocator(0.05))
axs[0,4].xaxis.set_minor_locator(MultipleLocator(3))
axs[0,4].tick_params(direction='in', length=8, width=2, colors='black', bottom=True, top=True,
left=True, right=True, labelsize=15)
axs[0,4].set_xticks(ticks=[3, 9, 15, 21])
axs[0,4].tick_params(which='minor', direction='in', length=4, width=1.5, left=True, right=True)
axs[0,4].set_ylim(-.2,.2)
axs[0,4].set_xlim(1.5,22.5)
axs[0,4].set_title('10$\degree$N-Equator', fontsize=14)
###########################################################################################################################
axs[1,0].plot(lt_hours, ERA_5_cycles[-6] - np.nanmean(ERA_5_cycles[-6]), linewidth=4, color='black')
axs[1,0].plot(lt_hours, ERA_5_colocation_cycles[-6] - np.nanmean(ERA_5_colocation_cycles[-6]), linewidth=2, color='red')
axs[1,0].plot(lt_hours, ERA_5_colocations_no_removal[-6] - np.nanmean(ERA_5_colocations_no_removal[-6]), linewidth=2, color='blue', label='Colocations no mean removal')
axs[1,0].yaxis.set_minor_locator(MultipleLocator(0.05))
axs[1,0].xaxis.set_minor_locator(MultipleLocator(3))
axs[1,0].tick_params(direction='in', length=8, width=2, colors='black', bottom=True, top=True,
left=True, right=True, labelsize=15)
axs[1,0].set_xticks(ticks=[3, 9, 15, 21])
axs[1,0].tick_params(which='minor', direction='in', length=4, width=1.5, left=True, right=True)
axs[1,0].set_ylim(-.2,.2)
axs[1,0].set_xlim(1.5,22.5)
axs[1,0].set_title('Equator-10$\degree$S', fontsize=14)
axs[1,0].set_ylabel('Diurnal Anomaly ($\degree$K)', fontsize=15)
axs[1,0].set_xlabel('Local Time Hour', fontsize=15)
###########################################################################################################################
axs[1,1].plot(lt_hours, ERA_5_cycles[-7] - np.nanmean(ERA_5_cycles[-7]), linewidth=4, color='black')
axs[1,1].plot(lt_hours, ERA_5_colocation_cycles[-7] - np.nanmean(ERA_5_colocation_cycles[-7]), linewidth=2, color='red')
axs[1,1].plot(lt_hours, ERA_5_colocations_no_removal[-7] - np.nanmean(ERA_5_colocations_no_removal[-7]), linewidth=2, color='blue', label='Colocations no mean removal')
axs[1,1].yaxis.set_minor_locator(MultipleLocator(0.05))
axs[1,1].xaxis.set_minor_locator(MultipleLocator(3))
axs[1,1].tick_params(direction='in', length=8, width=2, colors='black', bottom=True, top=True,
left=True, right=True, labelsize=15)
axs[1,1].set_xticks(ticks=[3, 9, 15, 21])
axs[1,1].tick_params(which='minor', direction='in', length=4, width=1.5, left=True, right=True)
axs[1,1].set_ylim(-.2,.2)
axs[1,1].set_xlim(1.5,22.5)
axs[1,1].set_title('10$\degree$S-20$\degree$S', fontsize=14)
axs[1,1].set_xlabel('Local Time Hour', fontsize=15)
###########################################################################################################################
axs[1,2].plot(lt_hours, ERA_5_cycles[-8] - np.nanmean(ERA_5_cycles[-8]), linewidth=4, color='black')
axs[1,2].plot(lt_hours, ERA_5_colocation_cycles[-8] - np.nanmean(ERA_5_colocation_cycles[-8]), linewidth=2, color='red')
axs[1,2].plot(lt_hours, ERA_5_colocations_no_removal[-8] - np.nanmean(ERA_5_colocations_no_removal[-8]), linewidth=2, color='blue', label='Colocations no mean removal')
axs[1,2].yaxis.set_minor_locator(MultipleLocator(0.05))
axs[1,2].xaxis.set_minor_locator(MultipleLocator(3))
axs[1,2].tick_params(direction='in', length=8, width=2, colors='black', bottom=True, top=True,
left=True, right=True, labelsize=15)
axs[1,2].set_xticks(ticks=[3, 9, 15, 21])
axs[1,2].tick_params(which='minor', direction='in', length=4, width=1.5, left=True, right=True)
axs[1,2].set_ylim(-.2,.2)
axs[1,2].set_xlim(1.5,22.5)
axs[1,2].set_title('20$\degree$S-40$\degree$S', fontsize=14)
axs[1,2].set_xlabel('Local Time Hour', fontsize=15)
###########################################################################################################################
axs[1,3].plot(lt_hours, ERA_5_cycles[-9] - np.nanmean(ERA_5_cycles[-9]), linewidth=4, color='black')
axs[1,3].plot(lt_hours, ERA_5_colocation_cycles[-9] - np.nanmean(ERA_5_colocation_cycles[-9]), linewidth=2, color='red')
axs[1,3].plot(lt_hours, ERA_5_colocations_no_removal[-9] - np.nanmean(ERA_5_colocations_no_removal[-9]), linewidth=2, color='blue', label='Colocations no mean removal')
axs[1,3].yaxis.set_minor_locator(MultipleLocator(0.05))
axs[1,3].xaxis.set_minor_locator(MultipleLocator(3))
axs[1,3].tick_params(direction='in', length=8, width=2, colors='black', bottom=True, top=True,
left=True, right=True, labelsize=15)
axs[1,3].set_xticks(ticks=[3, 9, 15, 21])
axs[1,3].tick_params(which='minor', direction='in', length=4, width=1.5, left=True, right=True)
axs[1,3].set_ylim(-.2,.2)
axs[1,3].set_xlim(1.5,22.5)
axs[1,3].set_title('40$\degree$S-60$\degree$S', fontsize=14)
axs[1,3].set_xlabel('Local Time Hour', fontsize=15)
###########################################################################################################################
axs[1,4].plot(lt_hours, ERA_5_cycles[-10] - np.nanmean(ERA_5_cycles[-10]), linewidth=4, color='black')
axs[1,4].plot(lt_hours, ERA_5_colocation_cycles[-10] - np.nanmean(ERA_5_colocation_cycles[-10]), linewidth=2, color='red')
axs[1,4].plot(lt_hours, ERA_5_colocations_no_removal[-10] - np.nanmean(ERA_5_colocations_no_removal[-10]), linewidth=2, color='blue', label='Colocations no mean removal')
axs[1,4].yaxis.set_minor_locator(MultipleLocator(0.05))
axs[1,4].xaxis.set_minor_locator(MultipleLocator(3))
axs[1,4].tick_params(direction='in', length=8, width=2, colors='black', bottom=True, top=True,
left=True, right=True, labelsize=15)
axs[1,4].set_xticks(ticks=[3, 9, 15, 21])
axs[1,4].tick_params(which='minor', direction='in', length=4, width=1.5, left=True, right=True)
axs[1,4].set_ylim(-.2,.2)
axs[1,4].set_xlim(1.5,22.5)
axs[1,4].set_title('60$\degree$S-90$\degree$S', fontsize=14)
axs[1,4].set_xlabel('Local Time Hour', fontsize=15)
return(fig)
def land_ocean_stats(land_ocean_chart):
names = ['NH Land DJF', 'NH Ocean DJF', 'NH Land MAM', 'NH Ocean MAM',
'NH Land JJA', 'NH Ocean JJA', 'NH Land SON', 'NH Ocean SON',
'SH Land DJF','SH Ocean DJF', 'SH Land MAM', 'SH Ocean MAM',
'SH Land JJA', 'SH Ocean JJA', 'SH Land SON','SH Ocean SON']
stats_holder = []
for idx, set_of_dcs in enumerate(land_ocean_chart):
gpsro_cycles = np.array(set_of_dcs[0])
ERA_5_cycles = np.array(set_of_dcs[1])
waccm6_cycels = np.array(set_of_dcs[2])
ccsm_cycles = np.array(set_of_dcs[3])
era5_mse = np.nanmean((ERA_5_cycles - gpsro_cycles)**2)
era5_r = stats.pearsonr(ERA_5_cycles, gpsro_cycles)[0]
waccm6_mse = np.nanmean((waccm6_cycels - gpsro_cycles)**2)
waccm6_r = stats.pearsonr(waccm6_cycels, gpsro_cycles)[0]
ccsm_mse = np.nanmean((ccsm_cycles - gpsro_cycles)**2)
ccsm_r = stats.pearsonr(ccsm_cycles, gpsro_cycles)[0]
print('{name_string}\n========= \nERA5: '.format(name_string = names[idx]),
np.around(era5_mse, 5), np.around(era5_r, 5),
'\nWACCM6: ', np.around(waccm6_mse, 5), np.around(waccm6_r, 5),
'\nCCM3: ', np.around(ccsm_mse, 5), np.around(ccsm_r, 5), '\n')
stats_instance = [[era5_mse, era5_r], [waccm6_mse, waccm6_r], [ccsm_mse, ccsm_r]]
stats_holder.append(stats_instance)
return(stats_holder)
def land_ocean_plotting(nh_land_gpsro_djf, nh_land_era5_djf, nh_land_waccm6_djf, nh_land_ccm3_djf,
nh_ocean_gpsro_djf, nh_ocean_era5_djf, nh_ocean_waccm6_djf, nh_ocean_ccm3_djf,
nh_land_gpsro_mam, nh_land_era5_mam, nh_land_waccm6_mam, nh_land_ccm3_mam,
nh_ocean_gpsro_mam, nh_ocean_era5_mam, nh_ocean_waccm6_mam, nh_ocean_ccm3_mam,
nh_land_gpsro_jja, nh_land_era5_jja, nh_land_waccm6_jja, nh_land_ccm3_jja,
nh_ocean_gpsro_jja, nh_ocean_era5_jja, nh_ocean_waccm6_jja, nh_ocean_ccm3_jja,
nh_land_gpsro_son, nh_land_era5_son, nh_land_waccm6_son, nh_land_ccm3_son,
nh_ocean_gpsro_son, nh_ocean_era5_son, nh_ocean_waccm6_son, nh_ocean_ccm3_son,
sh_land_gpsro_djf, sh_land_era5_djf, sh_land_waccm6_djf, sh_land_ccm3_djf,
sh_ocean_gpsro_djf, sh_ocean_era5_djf, sh_ocean_waccm6_djf, sh_ocean_ccm3_djf,
sh_land_gpsro_mam, sh_land_era5_mam, sh_land_waccm6_mam, sh_land_ccm3_mam,
sh_ocean_gpsro_mam, sh_ocean_era5_mam, sh_ocean_waccm6_mam, sh_ocean_ccm3_mam,
sh_land_gpsro_jja, sh_land_era5_jja, sh_land_waccm6_jja, sh_land_ccm3_jja,
sh_ocean_gpsro_jja, sh_ocean_era5_jja, sh_ocean_waccm6_jja, sh_ocean_ccm3_jja,
sh_land_gpsro_son, sh_land_era5_son, sh_land_waccm6_son, sh_land_ccm3_son,
sh_ocean_gpsro_son, sh_ocean_era5_son, sh_ocean_waccm6_son, sh_ocean_ccm3_son
):
land_ocean_chart = [[nh_land_gpsro_djf, nh_land_era5_djf, nh_land_waccm6_djf, nh_land_ccm3_djf],
[nh_ocean_gpsro_djf, nh_ocean_era5_djf, nh_ocean_waccm6_djf, nh_ocean_ccm3_djf],
[nh_land_gpsro_mam, nh_land_era5_mam, nh_land_waccm6_mam, nh_land_ccm3_mam],
[nh_ocean_gpsro_mam, nh_ocean_era5_mam, nh_ocean_waccm6_mam, nh_ocean_ccm3_mam],
[nh_land_gpsro_jja, nh_land_era5_jja, nh_land_waccm6_jja, nh_land_ccm3_jja],
[nh_ocean_gpsro_jja, nh_ocean_era5_jja, nh_ocean_waccm6_jja, nh_ocean_ccm3_jja],
[nh_land_gpsro_son, nh_land_era5_son, nh_land_waccm6_son, nh_land_ccm3_son],
[nh_ocean_gpsro_son, nh_ocean_era5_son, nh_ocean_waccm6_son, nh_ocean_ccm3_son],
[sh_land_gpsro_djf, sh_land_era5_djf, sh_land_waccm6_djf, sh_land_ccm3_djf],
[sh_ocean_gpsro_djf, sh_ocean_era5_djf, sh_ocean_waccm6_djf, sh_ocean_ccm3_djf],
[sh_land_gpsro_mam, sh_land_era5_mam, sh_land_waccm6_mam, sh_land_ccm3_mam],
[sh_ocean_gpsro_mam, sh_ocean_era5_mam, sh_ocean_waccm6_mam, sh_ocean_ccm3_mam],
[sh_land_gpsro_jja, sh_land_era5_jja, sh_land_waccm6_jja, sh_land_ccm3_jja],
[sh_ocean_gpsro_jja, sh_ocean_era5_jja, sh_ocean_waccm6_jja, sh_ocean_ccm3_jja],
[sh_land_gpsro_son, sh_land_era5_son, sh_land_waccm6_son, sh_land_ccm3_son],
[sh_ocean_gpsro_son, sh_ocean_era5_son, sh_ocean_waccm6_son, sh_ocean_ccm3_son]]
stats_holder = land_ocean_stats(land_ocean_chart)
fig, axs = plt.subplots(2, 8, figsize=(18, 7))
lt_hours = np.linspace(1.5, 22.5, 8)
axs[0,0].plot(lt_hours,nh_land_gpsro_djf, label='GPS-RO', color='black', linewidth=3)
axs[0,0].plot(lt_hours,nh_land_era5_djf, label='ERA-5', color='firebrick', marker='s', linewidth=2)
axs[0,0].plot(lt_hours,nh_land_waccm6_djf, label='WACCM6', color='dodgerblue', marker='^', linewidth=2)
axs[0,0].plot(lt_hours,nh_land_ccm3_djf, label='CCM3', color='seagreen', marker='o', linewidth=2)
fig.text(0.12, 0.97, 'Land Ocean Contrast', fontsize=30, verticalalignment='top')
fig.legend(bbox_to_anchor=(.4,0.87,.4,0.2), loc="lower left", mode="expand",
borderaxespad=0, ncol=4, frameon=False, prop={'size': 15})
axs[0,0].yaxis.set_minor_locator(MultipleLocator(0.05))
axs[0,0].yaxis.set_major_locator(MultipleLocator(0.15))
axs[0,0].xaxis.set_minor_locator(MultipleLocator(3))
axs[0,0].tick_params(direction='in', length=8, width=2, colors='black', bottom=True, top=True,
left=True, right=True, labelsize=15)
axs[0,0].set_xticks(ticks=[3, 9, 15, 21])
axs[0,0].tick_params(which='minor', direction='in', length=4, width=1.5, left=True, right=True)
axs[0,0].set_ylim(-.3, .3)
axs[0,0].set_xlim(1.5,22.5)
axs[0,0].set_ylabel('Equator-20$\degree$N \nDiurnal Amonamly ($\degree$K)', fontsize=14)
axs[0,0].set_title('DJF Land', fontsize=14)
###############################################################################################
axs[0,1].plot(lt_hours,nh_ocean_gpsro_djf, label='GPS-RO', color='black', linewidth=3)
axs[0,1].plot(lt_hours,nh_ocean_era5_djf, label='ERA-5', color='firebrick', marker='s', linewidth=2)
axs[0,1].plot(lt_hours,nh_ocean_waccm6_djf, label='WACCM6', color='dodgerblue', marker='^', linewidth=2)
axs[0,1].plot(lt_hours,nh_ocean_ccm3_djf, label='CCM3', color='seagreen', marker='o', linewidth=2)
axs[0,1].yaxis.set_minor_locator(MultipleLocator(0.05))
axs[0,1].yaxis.set_major_locator(MultipleLocator(0.15))
axs[0,1].xaxis.set_minor_locator(MultipleLocator(3))
axs[0,1].tick_params(direction='in', length=8, width=2, colors='black', bottom=True, top=True,
left=True, right=True, labelsize=15)
axs[0,1].set_xticks(ticks=[3, 9, 15, 21])
axs[0,1].tick_params(which='minor', direction='in', length=4, width=1.5, left=True, right=True)
axs[0,1].set_ylim(-.3, .3)
axs[0,1].set_xlim(1.5,22.5)
axs[0,1].set_yticklabels([])
axs[0,1].set_title('DJF Ocean', fontsize=14)
##############################################################################################
axs[0,2].plot(lt_hours,nh_land_gpsro_mam, label='GPS-RO', color='black', linewidth=3)
axs[0,2].plot(lt_hours,nh_land_era5_mam, label='ERA-5', color='firebrick', marker='s', linewidth=2)
axs[0,2].plot(lt_hours,nh_land_waccm6_mam, label='WACCM6', color='dodgerblue', marker='^', linewidth=2)
axs[0,2].plot(lt_hours,nh_land_ccm3_mam, label='CCM3', color='seagreen', marker='o', linewidth=2)
axs[0,2].yaxis.set_major_locator(MultipleLocator(0.15))
axs[0,2].yaxis.set_minor_locator(MultipleLocator(0.05))
axs[0,2].xaxis.set_minor_locator(MultipleLocator(3))
axs[0,2].tick_params(direction='in', length=8, width=2, colors='black', bottom=True, top=True,
left=True, right=True, labelsize=15)
axs[0,2].set_xticks(ticks=[3, 9, 15, 21])
axs[0,2].tick_params(which='minor', direction='in', length=4, width=1.5, left=True, right=True)
axs[0,2].set_ylim(-.3, .3)
axs[0,2].set_xlim(1.5,22.5)
axs[0,2].set_yticklabels([])
axs[0,2].set_title('MAM Land', fontsize=14)
###############################################################################################
axs[0,3].plot(lt_hours,nh_ocean_gpsro_mam, label='GPS-RO', color='black', linewidth=3)
axs[0,3].plot(lt_hours,nh_ocean_era5_mam, label='ERA-5', color='firebrick', marker='s', linewidth=2)
axs[0,3].plot(lt_hours,nh_ocean_waccm6_mam, label='WACCM6', color='dodgerblue', marker='^', linewidth=2)
axs[0,3].plot(lt_hours,nh_ocean_ccm3_mam, label='CCM3', color='seagreen', marker='o', linewidth=2)
axs[0,3].yaxis.set_minor_locator(MultipleLocator(0.05))
axs[0,3].yaxis.set_major_locator(MultipleLocator(0.15))
axs[0,3].xaxis.set_minor_locator(MultipleLocator(3))
axs[0,3].tick_params(direction='in', length=8, width=2, colors='black', bottom=True, top=True,
left=True, right=True, labelsize=15)
axs[0,3].set_xticks(ticks=[3, 9, 15, 21])
axs[0,3].tick_params(which='minor', direction='in', length=4, width=1.5, left=True, right=True)
axs[0,3].set_ylim(-.3, .3)
axs[0,3].set_xlim(1.5,22.5)
axs[0,3].set_yticklabels([])
axs[0,3].set_title('MAM Ocean', fontsize=14)
###############################################################################################
axs[0,4].plot(lt_hours,nh_land_gpsro_jja, label='GPS-RO', color='black', linewidth=3)
axs[0,4].plot(lt_hours,nh_land_era5_jja, label='ERA-5', color='firebrick', marker='s', linewidth=2)
axs[0,4].plot(lt_hours,nh_land_waccm6_jja, label='WACCM6', color='dodgerblue', marker='^', linewidth=2)
axs[0,4].plot(lt_hours,nh_land_ccm3_jja, label='CCM3', color='seagreen', marker='o', linewidth=2)
axs[0,4].yaxis.set_major_locator(MultipleLocator(0.15))
axs[0,4].yaxis.set_minor_locator(MultipleLocator(0.05))
axs[0,4].xaxis.set_minor_locator(MultipleLocator(3))
axs[0,4].tick_params(direction='in', length=8, width=2, colors='black', bottom=True, top=True,
left=True, right=True, labelsize=15)
axs[0,4].set_xticks(ticks=[3, 9, 15, 21])
axs[0,4].tick_params(which='minor', direction='in', length=4, width=1.5, left=True, right=True)
axs[0,4].set_ylim(-.3, .3)
axs[0,4].set_xlim(1.5,22.5)
axs[0,4].set_yticklabels([])
#axs[0,4].set_xticklabels([])
axs[0,4].set_title('JJA land', fontsize=14)
###############################################################################################
axs[0,5].plot(lt_hours,nh_ocean_gpsro_jja, label='GPS-RO', color='black', linewidth=3)
axs[0,5].plot(lt_hours,nh_ocean_era5_jja, label='ERA-5', color='firebrick', marker='s', linewidth=2)
axs[0,5].plot(lt_hours,nh_ocean_waccm6_jja, label='WACCM6', color='dodgerblue', marker='^', linewidth=2)
axs[0,5].plot(lt_hours,nh_ocean_ccm3_jja, label='CCM3', color='seagreen', marker='o', linewidth=2)
axs[0,5].yaxis.set_major_locator(MultipleLocator(0.15))
axs[0,5].yaxis.set_minor_locator(MultipleLocator(0.05))
axs[0,5].xaxis.set_minor_locator(MultipleLocator(3))
axs[0,5].tick_params(direction='in', length=8, width=2, colors='black', bottom=True, top=True,
left=True, right=True, labelsize=15)
axs[0,5].set_xticks(ticks=[3, 9, 15, 21])
axs[0,5].tick_params(which='minor', direction='in', length=4, width=1.5, left=True, right=True)
axs[0,5].set_ylim(-.3, .3)
axs[0,5].set_xlim(1.5,22.5)
axs[0,5].set_yticklabels([])
#axs[0,5].set_xticklabels([])
axs[0,5].set_title('JJA Ocean', fontsize=14)
###############################################################################################
axs[0,6].plot(lt_hours,nh_land_gpsro_son, label='GPS-RO', color='black', linewidth=3)
axs[0,6].plot(lt_hours,nh_land_era5_son, label='ERA-5', color='firebrick', marker='s', linewidth=2)
axs[0,6].plot(lt_hours,nh_land_waccm6_son, label='WACCM6', color='dodgerblue', marker='^', linewidth=2)
axs[0,6].plot(lt_hours,nh_land_ccm3_son, label='CCM3', color='seagreen', marker='o', linewidth=2)
axs[0,6].yaxis.set_major_locator(MultipleLocator(0.15))
axs[0,6].yaxis.set_minor_locator(MultipleLocator(0.05))
axs[0,6].xaxis.set_minor_locator(MultipleLocator(3))
axs[0,6].tick_params(direction='in', length=8, width=2, colors='black', bottom=True, top=True,
left=True, right=True, labelsize=15)
axs[0,6].set_xticks(ticks=[3, 9, 15, 21])
axs[0,6].tick_params(which='minor', direction='in', length=4, width=1.5, left=True, right=True)
axs[0,6].set_ylim(-.3, .3)
axs[0,6].set_xlim(1.5,22.5)
axs[0,6].set_yticklabels([])
axs[0,6].set_title('SON Land', fontsize=14)
###############################################################################################
axs[0,7].plot(lt_hours,nh_ocean_gpsro_son, label='GPS-RO', color='black', linewidth=3)
axs[0,7].plot(lt_hours,nh_ocean_era5_son, label='ERA-5', color='firebrick', marker='s', linewidth=2)
axs[0,7].plot(lt_hours,nh_ocean_waccm6_son, label='WACCM6', color='dodgerblue', marker='^', linewidth=2)
axs[0,7].plot(lt_hours,nh_ocean_ccm3_son, label='CCM3', color='seagreen', marker='o', linewidth=2)
axs[0,7].yaxis.set_major_locator(MultipleLocator(0.15))
axs[0,7].yaxis.set_minor_locator(MultipleLocator(0.05))
axs[0,7].xaxis.set_minor_locator(MultipleLocator(3))
axs[0,7].tick_params(direction='in', length=8, width=2, colors='black', bottom=True, top=True,
left=True, right=True, labelsize=15)
axs[0,7].set_xticks(ticks=[3, 9, 15, 21])
axs[0,7].tick_params(which='minor', direction='in', length=4, width=1.5, left=True, right=True)
axs[0,7].set_ylim(-.3, .3)
axs[0,7].set_xlim(1.5,22.5)
axs[0,7].set_yticklabels([])
axs[0,7].set_title('SON Ocean', fontsize=14)
###############################################################################################
axs[1,0].plot(lt_hours,sh_land_gpsro_djf, label='GPS-RO', color='black', linewidth=3)
axs[1,0].plot(lt_hours,sh_land_era5_djf, label='ERA-5', color='firebrick', marker='s', linewidth=2)
axs[1,0].plot(lt_hours,sh_land_waccm6_djf, label='WACCM6', color='dodgerblue', marker='^', linewidth=2)
axs[1,0].plot(lt_hours,sh_land_ccm3_djf, label='CCM3', color='seagreen', marker='o', linewidth=2)
axs[1,0].yaxis.set_minor_locator(MultipleLocator(0.05))
axs[1,0].xaxis.set_minor_locator(MultipleLocator(3))
axs[1,0].tick_params(direction='in', length=8, width=2, colors='black', bottom=True, top=True,
left=True, right=True, labelsize=15)
axs[1,0].yaxis.set_major_locator(MultipleLocator(0.15))
axs[1,0].set_xticks(ticks=[3, 9, 15, 21])
axs[1,0].tick_params(which='minor', direction='in', length=4, width=1.5, left=True, right=True)
axs[1,0].set_ylim(-.3, .3)
axs[1,0].set_xlim(1.5,22.5)
axs[1,0].set_ylabel('20$\degree$S-Equator \nDiurnal Amonamly ($\degree$K)', fontsize=14)
axs[1,0].set_xlabel('LTH', fontsize=15)
###############################################################################################
axs[1,1].plot(lt_hours,sh_ocean_gpsro_djf, label='GPS-RO', color='black', linewidth=3)
axs[1,1].plot(lt_hours,sh_ocean_era5_djf, label='ERA-5', color='firebrick', marker='s', linewidth=2)
axs[1,1].plot(lt_hours,sh_ocean_waccm6_djf, label='WACCM6', color='dodgerblue', marker='^', linewidth=2)
axs[1,1].plot(lt_hours,sh_ocean_ccm3_djf, label='CCM3', color='seagreen', marker='o', linewidth=2)
axs[1,1].yaxis.set_major_locator(MultipleLocator(0.15))
axs[1,1].yaxis.set_minor_locator(MultipleLocator(0.05))
axs[1,1].xaxis.set_minor_locator(MultipleLocator(3))
axs[1,1].tick_params(direction='in', length=8, width=2, colors='black', bottom=True, top=True,
left=True, right=True, labelsize=15)
axs[1,1].set_xticks(ticks=[3, 9, 15, 21])
axs[1,1].tick_params(which='minor', direction='in', length=4, width=1.5, left=True, right=True)
axs[1,1].set_ylim(-.3, .3)
axs[1,1].set_xlim(1.5,22.5)
axs[1,1].set_yticklabels([])
axs[1,1].set_xlabel('LTH', fontsize=15)
###############################################################################################
axs[1,2].plot(lt_hours,sh_land_gpsro_mam, label='GPS-RO', color='black', linewidth=3)
axs[1,2].plot(lt_hours,sh_land_era5_mam, label='ERA-5', color='firebrick', marker='s', linewidth=2)
axs[1,2].plot(lt_hours,sh_land_waccm6_mam, label='WACCM6', color='dodgerblue', marker='^', linewidth=2)
axs[1,2].plot(lt_hours,sh_land_ccm3_mam, label='CCM3', color='seagreen', marker='o', linewidth=2)
axs[1,2].yaxis.set_minor_locator(MultipleLocator(0.05))
axs[1,2].xaxis.set_minor_locator(MultipleLocator(3))
axs[1,2].yaxis.set_major_locator(MultipleLocator(0.15))
axs[1,2].tick_params(direction='in', length=8, width=2, colors='black', bottom=True, | |
<gh_stars>1-10
#!/usr/bin/env python
"""
Reference implementation of ExtrudedPolygon
"""
import numpy
from icecube.phys_services import Surface
from icecube.dataclasses import make_pair
def convex_hull(points):
"""Computes the convex hull of a set of 2D points.
Input: an iterable sequence of (x, y) pairs representing the points.
Output: a list of vertices of the convex hull in counter-clockwise order,
starting from the vertex with the lexicographically smallest coordinates.
Implements Andrew's monotone chain algorithm. O(n log n) complexity.
Lifted from http://code.icecube.wisc.edu/svn/sandbox/ckopper/eventinjector/python/util/__init__.py
"""
# convert to a list of tuples
points = [(p[0],p[1]) for p in points]
# Sort the points lexicographically (tuples are compared lexicographically).
# Remove duplicates to detect the case we have just one unique point.
points = sorted(set(points))
# Boring case: no points or a single point, possibly repeated multiple times.
if len(points) <= 1:
return points
# 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.
# Returns a positive value, if OAB makes a counter-clockwise turn,
# negative for clockwise turn, and zero if the points are collinear.
def cross(o, a, b):
return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])
# Build lower hull
lower = []
for p in points:
while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:
lower.pop()
lower.append(p)
# Build upper hull
upper = []
for p in reversed(points):
while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:
upper.pop()
upper.append(p)
# Concatenation of the lower and upper hulls gives the convex hull.
# Last point of each list is omitted because it is repeated at the beginning of the other list.
hull = lower[:-1] + upper[:-1]
# convert into numpy array
return numpy.array(hull)
def hull_to_normals(points):
# append first point at the end to close the hull
points = numpy.append(points, [points[0]], axis=0 )
vecs = points[1:]-points[:-1]
magn = numpy.sqrt(vecs[:,0]**2 + vecs[:,1]**2)
normals = numpy.array([vecs[:,1]/magn, -vecs[:,0]/magn, numpy.zeros(magn.shape)]).T
return normals
def hull_to_lengths(points):
# append first point at the end to close the hull
points = numpy.append(points, [points[0]], axis=0 )
vecs = points[1:]-points[:-1]
return numpy.sqrt(vecs[:,0]**2 + vecs[:,1]**2)
def signed_area(points):
"""Returns the signed area of a given simple (i.e. non-intersecting) polygon.
Positive if points are sorted counter-clockwise.
"""
# append first point at the end to close the hull
points = numpy.append(points, [points[0]], axis=0 )
return numpy.sum(points[:,0][:-1]*points[:,1][1:] - points[:,0][1:]*points[:,1][:-1])/2.
class ExtrudedPolygon(Surface):
"""
A convex polygon in the x-y plane, extruded in the z direction
"""
def __init__(self, xy_points, z_range):
"""
:param xy_points: a list of x-y coordinate pairs. The convex hull of
these points will form the edge of the surface in the
x-y plane
:param z_range: a pair giving the lower and upper boundary of the
surface in z.
"""
super(ExtrudedPolygon, self).__init__()
assert len(xy_points) >= 3, "Need at least 3 points to form a closed polygon"
hull = convex_hull(xy_points)
# hull points, in counterclockwise order
self._x = hull
# next neighbor in the hull
self._nx = numpy.roll(hull, -1, axis=0)
# vector connecting each pair of points in the hull
self._dx = self._nx - self._x
self._z_range = z_range
self.length = z_range[1] - z_range[0]
self._side_lengths = hull_to_lengths(hull)
side_normals = hull_to_normals(hull)
side_areas = self._side_lengths*self.length
cap_area = [signed_area(hull)]*2
cap_normals = numpy.array([[0., 0., 1.], [0., 0., -1.]])
self._areas = numpy.concatenate((side_areas, cap_area))
self._normals = numpy.concatenate((side_normals, cap_normals))
assert self._areas.size == self._normals.shape[0]
def expand(self, padding):
"""
Expand the x-y footprint by moving each edge out by a distance *padding*.
"""
# A convex polygon can be offset by moving each vertex parallel to the
# edges by a distance that is inversely proportional to the sine of the
# counterclockwise angle between the edges that meet at each vertex.
# This breaks down for edges that are [anti]parallel or, but neither
# case should occur for maximally simplified polygons.
# normalized vector connecting each vertex to the next one
d = self._dx/self._side_lengths[:,None]
# and the one connecting the previous vertex
prev_d = numpy.roll(d, 1, axis=0)
# sine of the inner angle of each vertex
det = prev_d[:,0]*d[:,1] - prev_d[:,1]*d[:,0]
assert (det != 0.).all(), "Edges can't be [anti]parallel"
points = self._x + (padding/det[:,None])*(prev_d - d)
z_range = [self._z_range[0]-padding, self._z_range[1]+padding]
return type(self)(points, z_range)
@classmethod
def from_I3Geometry(cls, i3geo, padding=0):
from collections import defaultdict
strings = defaultdict(list)
for omkey, omgeo in i3geo.omgeo:
if omgeo.omtype != omgeo.IceTop:
strings[omkey.string].append(list(omgeo.position))
mean_xy = [numpy.mean(positions, axis=0)[0:2] for positions in strings.values()]
zmax = max(max(p[2] for p in positions) for positions in strings.values())
zmin = min(min(p[2] for p in positions) for positions in strings.values())
self = cls(mean_xy, [zmin, zmax])
if padding != 0:
return self.expand(padding)
else:
return self
@classmethod
def from_file(cls, fname, padding=0):
from icecube import icetray, dataio, dataclasses
f = dataio.I3File(fname)
fr = f.pop_frame(icetray.I3Frame.Geometry)
f.close()
return cls.from_I3Geometry(fr['I3Geometry'], padding)
def area(self, dir):
"""
Return projected area in the given direction
:param dir: an I3Direction
"""
# inner product with component normals
inner = numpy.dot(self._normals, numpy.asarray((dir.x, dir.y, dir.z)))
# only surfaces that face the requested direction count towards the area
mask = inner < 0
return -(inner*self._areas*mask).sum(axis=0)
def partial_area(self, dir):
inner = numpy.dot(self._normals, numpy.asarray((dir.x, dir.y, dir.z)))
# only surfaces that face the requested direction count towards the area
mask = inner < 0
return -(inner*self._areas*mask)
def azimuth_averaged_area(self, cos_theta):
"""
Return projected area at the given zenith angle, averaged over all
azimuth angles.
:param cos_theta: cosine of the zenith angle
"""
cap = self._areas[-1]
sides = self._side_lengths.sum()*self.length/numpy.pi
return cap*abs(cos_theta) + sides*numpy.sqrt(1-cos_theta**2)
@staticmethod
def _integrate_area(a, b, cap, sides):
return numpy.pi*(cap*(b**2-a**2) + sides*(numpy.arccos(a) - numpy.arccos(b) - numpy.sqrt(1-a**2)*a + numpy.sqrt(1-b**2)*b))
def entendue(self, cosMin=-1., cosMax=1.):
"""
Integrate A * d\Omega over the given range of zenith angles
:param cosMin: cosine of the maximum zenith angle
:param cosMax: cosine of the minimum zenith angle
:returns: a product of area and solid angle. Divide by
2*pi*(cosMax-cosMin) to obtain the average projected area in
this zenith angle range
"""
# First, integrate over all azimuthal angles, exploiting the fact that
# the projected area of a plane, averaged over a 2\pi rotation that
# passes through the normal, is
# A*\int_0^\pi \Theta(\sin\alpha)\sin\alpha d\alpha / 2\pi = A/\pi
sides = self._side_lengths.sum()*self.length/numpy.pi
# The projected area of the cap is independent of azimuth
cap = self._areas[-1]
if (cosMin >= 0 and cosMax >= 0):
return self._integrate_area(cosMin, cosMax, cap, sides)
elif (cosMin < 0 and cosMax <= 0):
return self._integrate_area(-cosMax, -cosMin, cap, sides)
elif (cosMin < 0 and cosMax > 0):
return self._integrate_area(0, -cosMin, cap, sides) \
+ self._integrate_area(0, cosMax, cap, sides)
else:
raise ValueError("Can't deal with zenith range [%.1e, %.1e]" % (cosMin, cosMax))
return numpy.nan
def _point_in_hull(self, point):
"""
Test whether point is inside the 2D hull by ray casting
"""
x, y = point[0:2]
# Find segments whose y range spans the current point
mask = ((self._x[:,1] > y)&(self._nx[:,1] <= y))|((self._x[:,1] <= y)&(self._nx[:,1] > y))
# Count crossings to the right of the current point
xc = self._x[:,0] + (y-self._x[:,1])*self._dx[:,0]/self._dx[:,1]
crossings = (x < xc[mask]).sum()
inside = (crossings % 2) == 1
return inside
def _distance_to_hull(self, point, vec):
"""
Calculate the most extreme displacements from x,y along dx,dy to points
on the 2D hull
"""
# calculate the distance along the ray to each line segment
x, y = (self._x - point[:2]).T
dx, dy = self._dx.T
dirx, diry = vec[0:2]
assert dirx+diry != 0, "Direction vector may not have zero length"
# proportional distance along edge to intersection point
# NB: if diry/dirx == dy/dx, the ray is parallel to the line segment
nonparallel = diry*dx != dirx*dy
alpha = numpy.where(nonparallel, (dirx*y - diry*x)/(diry*dx - dirx*dy), numpy.nan)
# check whether the intersection is actually in the segment
mask = (alpha >= 0)&(alpha < 1)
# distance along ray to | |
"""This module is meant to contain the Solscan class"""
from messari.dataloader import DataLoader
from messari.utils import validate_input
from string import Template
from typing import Union, List, Dict
from .helpers import unpack_dataframe_of_dicts
import pandas as pd
#### Block
BLOCK_LAST_URL = 'https://public-api.solscan.io/block/last'
BLOCK_TRANSACTIONS_URL = 'https://public-api.solscan.io/block/transactions'
BLOCK_BLOCK_URL = Template('https://public-api.solscan.io/block/$block')
#### Transaction
TRANSACTION_LAST_URL = 'https://public-api.solscan.io/transaction/last'
TRANSACTION_SIGNATURE_URL = Template('https://public-api.solscan.io/transaction/$signature')
#### Account
ACCOUNT_TOKENS_URL = 'https://public-api.solscan.io/account/tokens'
ACCOUNT_TRANSACTIONS_URL = 'https://public-api.solscan.io/account/transactions'
ACCOUNT_STAKE_URL = 'https://public-api.solscan.io/account/stakeAccounts'
ACCOUNT_SPL_TXNS_URL = 'https://public-api.solscan.io/account/splTransfers'
ACCOUNT_SOL_TXNS_URL = 'https://public-api.solscan.io/account/solTransfers'
ACCOUNT_EXPORT_TXNS_URL = 'https://public-api.solscan.io/account/exportTransactions'
ACCOUNT_ACCOUNT_URL = Template('https://public-api.solscan.io/account/$account')
#### Token
TOKEN_HOLDERS_URL = 'https://public-api.solscan.io/token/holders'
TOKEN_META_URL = 'https://public-api.solscan.io/token/meta'
TOKEN_LIST_URL = 'https://public-api.solscan.io/token/list'
#### Market
MARKET_INFO_URL = Template('https://public-api.solscan.io/market/token/$tokenAddress')
#### Chain Information
CHAIN_INFO_URL = 'https://public-api.solscan.io/chaininfo'
#TODO max this clean/ not hardcoded? look into how this works
HEADERS={'accept': 'application/json', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36'} # pylint: disable=line-too-long
class Solscan(DataLoader):
"""This class is a wrapper around the Solscan API
"""
def __init__(self):
DataLoader.__init__(self, api_dict=None, taxonomy_dict=None)
#################
# Block endpoints
def get_last_blocks(self, num_blocks=1) -> pd.DataFrame:
"""returns info for last blocks (default is 1, limit is 20)
Parameters
----------
num_blocks: int (default is 1)
number of blocks to return, max is 20
Returns
-------
DataFrame
DataFrame with block information
"""
# Max value is 20 or API bricks
limit=num_blocks if num_blocks < 21 else 20
params = {'limit': limit}
last_blocks = self.get_response(BLOCK_LAST_URL,
params=params,
headers=HEADERS)
last_blocks_df = pd.DataFrame(last_blocks)
last_blocks_df.set_index('currentSlot', inplace=True)
last_blocks_df = unpack_dataframe_of_dicts(last_blocks_df)
# TODO, extract data from 'result'
return last_blocks_df
def get_block_last_transactions(self, blocks_in: Union[str, List],
offset=0, num_transactions=10) -> pd.DataFrame:
"""get last num_transactions of given block numbers
Parameters
----------
blocks_in: str, List
single block in or list of blocks in
num_transactions: int (default is 10)
number of transactions to return
Returns
-------
DataFrame
dataframe with transaction details
"""
blocks = validate_input(blocks_in)
df_list = []
for block in blocks:
params = {'block': block,
'offset': offset,
'limit': num_transactions}
txns = self.get_response(BLOCK_TRANSACTIONS_URL,
params=params,
headers=HEADERS)
txns_df = pd.DataFrame(txns)
df_list.append(txns_df)
fin_df = pd.concat(df_list, keys=blocks, axis=1)
fin_df = unpack_dataframe_of_dicts(fin_df)
return fin_df
def get_block(self, blocks_in: Union[str, List]) -> pd.DataFrame:
"""Return information of given block(s)
Parameters
----------
blocks_in: str, List
single block in or list of blocks in
Returns
-------
DataFrame
DataFrame with block information
"""
blocks = validate_input(blocks_in)
df_list = []
for block in blocks:
endpoint_url = BLOCK_BLOCK_URL.substitute(block=block)
response = self.get_response(endpoint_url,
headers=HEADERS)
df = pd.DataFrame(response)
df.drop('currentSlot', axis=1)
df_list.append(df)
fin_df = pd.concat(df_list, keys=blocks, axis=1)
fin_df = fin_df.xs('result', axis=1, level=1)
return fin_df
#######################
# Transaction endpoints
def get_last_transactions(self, num_transactions=10) -> pd.DataFrame:
"""Return last num_transactions transactions
Parameters
----------
num_transactions: int (default is 10)
number of transactions to return, limit is 20
Returns
-------
DataFrame
dataframe with transaction details
"""
# 20
limit=num_transactions if num_transactions < 21 else 20
params = {'limit': limit}
response = self.get_response(TRANSACTION_LAST_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
fin_df = unpack_dataframe_of_dicts(df)
return fin_df
def get_transaction(self, signatures_in: Union[str, List]) -> pd.DataFrame:
"""Return information of given transaction signature(s)
Parameters
----------
signatures_in: str, List
single signature in or list of signatures in
Returns
-------
DataFrame
DataFrame with transaction details
"""
signatures = validate_input(signatures_in)
series_list = []
for signature in signatures:
endpoint_url = TRANSACTION_SIGNATURE_URL.substitute(signature=signature)
response = self.get_response(endpoint_url,
headers=HEADERS)
#print(response)
series = pd.Series(response)
series_list.append(series)
fin_df = pd.concat(series_list, keys=signatures, axis=1)
return fin_df
###################
# Account endpoints
def get_account_tokens(self, accounts_in: Union[str, List]) -> pd.DataFrame:
"""Return token balances of the given account(s)
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
Returns
-------
DataFrame
DataFrame with token balances of given accounts
"""
accounts = validate_input(accounts_in)
df_list=[]
for account in accounts:
params={'account':account}
response = self.get_response(ACCOUNT_TOKENS_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df_list.append(df)
fin_df = pd.concat(df_list, keys=accounts, axis=1)
return fin_df
def get_account_transactions(self, accounts_in: Union[str,List]) -> pd.DataFrame:
"""Return DataFrame of transactions of the given account(s)
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
Returns
-------
DataFrame
DataFrame with transactions of given accounts
"""
accounts = validate_input(accounts_in)
df_list=[]
for account in accounts:
params={'account':account}
response = self.get_response(ACCOUNT_TRANSACTIONS_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df_list.append(df)
fin_df = pd.concat(df_list, keys=accounts, axis=1)
return fin_df
def get_account_stake(self, accounts_in: Union[str, List]) -> pd.DataFrame:
"""Get staking accounts of the given account(s)
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
Returns
-------
DataFrame
DataFrame with staking accounts of given accounts
"""
accounts = validate_input(accounts_in)
df_list=[]
for account in accounts:
params={'account':account}
response = self.get_response(ACCOUNT_STAKE_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df_list.append(df)
fin_df = pd.concat(df_list, keys=accounts, axis=1)
return fin_df
def get_account_spl_transactions(self, accounts_in: Union[str, List],
from_time: int=None,
to_time: int=None,
offset: int=0,
limit: int=10) -> pd.DataFrame:
"""Return SPL transfers for given account(s)
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
from_time: int
unix time to start transaction history
to_time: int
unix time to end transaction history
offset: int
Offset starting at 0. Increment value to offset paginated results
limit: int
Limit of assets to return. Default is 10
Returns
-------
DataFrame
DataFrame with SPL transfers for given account(s)
"""
accounts = validate_input(accounts_in)
df_list=[]
for account in accounts:
params={'account':account,
'toTime': to_time,
'fromTime': from_time,
'offset': offset,
'limit': limit}
response = self.get_response(ACCOUNT_SPL_TXNS_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df.drop('total', axis=1)
df_list.append(df)
fin_df = pd.concat(df_list, keys=accounts, axis=1)
fin_df = unpack_dataframe_of_dicts(fin_df)
return fin_df
def get_account_sol_transactions(self, accounts_in: Union[str, List],
from_time: int=None,
to_time: int=None,
offset: int=0,
limit: int=10) -> pd.DataFrame:
"""Return SOL transfers for given account(s)
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
from_time: int
unix time to start transaction history
to_time: int
unix time to end transaction history
offset: int
Offset starting at 0. Increment value to offset paginated results
limit: int
Limit of assets to return. Default is 10
Returns
-------
DataFrame
DataFrame with SOL transfers for given account(s)
"""
accounts = validate_input(accounts_in)
df_list=[]
for account in accounts:
params={'account':account,
'toTime': to_time,
'fromTime': from_time,
'offset': offset,
'limit': limit}
response = self.get_response(ACCOUNT_SOL_TXNS_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df_list.append(df)
fin_df = pd.concat(df_list, keys=accounts, axis=1)
fin_df = unpack_dataframe_of_dicts(fin_df)
return fin_df
def get_account_export_transactions(self, accounts_in: Union[str, List],
type_in: str, from_time: int, to_time: int) -> List[str]:
"""Export transactions to CSV style string
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
type_in: str
what type of transactions to export:
- tokenchange
- soltransfer
- all
from_time: int
unix time to start transaction history
to_time: int
unix time to end transaction history
Returns
-------
List[str]
list of strings to make csv document
"""
accounts = validate_input(accounts_in)
csv_list=[]
for account in accounts:
params={'account': account,
'type': type_in,
'fromTime': from_time,
'toTime': to_time}
# NOTE: need to do this to not return json
response = self.session.get(ACCOUNT_EXPORT_TXNS_URL, params=params, headers=HEADERS)
csv = response.content.decode('utf-8')
csv_list.append(csv)
return csv_list
def get_account(self, accounts_in: Union[str, List]) -> pd.DataFrame:
"""Return overall account(s) information, including program account,
NFT metadata information
Parameters
----------
accounts_in: str, List
single account in or list of accounts in
Returns
-------
DataFrame
DataFrame with account info
"""
accounts = validate_input(accounts_in)
series_list = []
for account in accounts:
endpoint_url = ACCOUNT_ACCOUNT_URL.substitute(account=account)
response = self.get_response(endpoint_url,
headers=HEADERS)
series = pd.Series(response)
series_list.append(series)
fin_df = pd.concat(series_list, keys=accounts, axis=1)
return fin_df
#################
# Token endpoints
def get_token_holders(self, tokens_in: Union[str, List],
limit: int=10, offset: int=0) -> pd.DataFrame:
"""Return top token holders for given token(s)
Parameters
----------
tokens_in: str, List
single token address in or list of token addresses, used to filter results
offset: int
Offset starting at 0. Increment value to offset paginated results
limit: int
Limit of assets to return. Default is 10
Returns
-------
DataFrame
DataFrame with top token holders
"""
tokens = validate_input(tokens_in)
df_list = []
for token in tokens:
params={'tokenAddress': token,
'limit': limit,
'offset': offset}
response = self.get_response(TOKEN_HOLDERS_URL,
params=params,
headers=HEADERS)
df = pd.DataFrame(response)
df.drop('total', axis=1)
df_list.append(df)
fin_df = pd.concat(df_list, keys=tokens, axis=1)
fin_df = unpack_dataframe_of_dicts(fin_df)
return fin_df
def get_token_meta(self, tokens_in: Union[str, List]) -> pd.DataFrame:
"""Return metadata of given token(s)
Parameters
----------
tokens_in: str, List
single token address in or list of token addresses, used to filter results
Returns
-------
DataFrame
DataFrame with token metadata
"""
tokens = validate_input(tokens_in)
series_list = []
for token in tokens:
params={'tokenAddress': token}
response = self.get_response(TOKEN_META_URL,
params=params,
headers=HEADERS)
series = pd.Series(response)
series_list.append(series)
fin_df = pd.concat(series_list, keys=tokens, axis=1)
return fin_df
def get_token_list(self, sort_by: str='market_cap', ascending: bool=True,
limit: int=10, offset: int=0) -> pd.DataFrame:
"""Returns DataFrame of tokens
Parameters
----------
sort_by: str (default 'market_cap')
how to | |
import sys, os
import numpy as np
import nibabel as nib
from scipy import ndimage as ndi
from skimage.measure import label
from scipy.signal import convolve
from numpy.linalg import norm
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
import pyqtgraph.opengl as gl
import networkx as nx
from networkx.drawing.nx_agraph import write_dot, graphviz_layout
from scipy import interpolate
import scipy.spatial as sp
import logging
import traceback
import timeit
import time
import math
from ast import literal_eval as make_tuple
import platform
import matplotlib as mpl
import matplotlib.pyplot as plt
import glob
import pickle
from scipy.stats import ttest_ind
import copy
from operator import itemgetter
from os.path import join
import inspect
import myFunctions as mf
class Vessel(QtGui.QWidget):
"""
A custom GUI framework based on QWidget
"""
def __init__(self, app=None):
super(Vessel, self).__init__()
self.app = app
self.init_ui()
self.init_variable()
self.qt_connections()
def init_ui(self):
pass
def init_variable(self):
pass
def qt_connections(self):
pass
class PlotObject(gl.GLViewWidget):
"""
Override GLViewWidget with enhanced behavior
This widget is based on the framework I found [here](https://groups.google.com/d/msg/pyqtgraph/mZiiLO8hS70/740KYx-vAAAJ),
which enables a user to select a point in 3D using pyqtgraph.
"""
App = None
def __init__(self, app=None):
if self.App is None:
if app is not None:
self.App = app
else:
self.App = QtGui.QApplication([])
super(PlotObject,self).__init__()
self.skeletonNodesStartIndex = 0
self.segmentStartIndex = 1
self.offset = np.array([0, 0, 0])
self.customInit()
def customInit(self):
pass
def mousePressEvent(self, ev):
"""
Store the position of the mouse press for later use.
"""
super(PlotObject, self).mousePressEvent(ev)
self._downpos = self.mousePos
def mouseReleaseEvent(self, ev):
"""
Allow for single click to move and right click for context menu.
Also emits a sigUpdate to refresh listeners.
"""
super(PlotObject, self).mouseReleaseEvent(ev)
if self._downpos == ev.pos():
x = ev.pos().x()
y = ev.pos().y()
if ev.button() == 2 :
try:
self.mPosition()
except Exception:
print(traceback.format_exc())
elif ev.button() == 1:
x = x - self.width() / 2
y = y - self.height() / 2
#self.pan(-x, -y, 0, relative=True)
self._prev_zoom_pos = None
self._prev_pan_pos = None
def mPosition(self):
#This function is called by a mouse event
## Get mouse coordinates saved when the mouse is clicked( incase dragging)
mx = self._downpos.x()
my = self._downpos.y()
self.Candidates = [] #Initiate a list for storing indices of picked points
#Get height and width of 2D Viewport space
view_w = self.width()
view_h = self.height()
#Convert pixel values to normalized coordinates
x = 2.0 * mx / view_w - 1.0
y = 1.0 - (2.0 * my / view_h)
# Convert projection and view matrix to np types and inverse them
PMi = self.projectionMatrix().inverted()[0]
VMi = self.viewMatrix().inverted()[0]
ray_clip = QtGui.QVector4D(x, y, -1.0, 1.0) # get transpose for matrix multiplication
ray_eye = PMi * ray_clip
ray_eye.setZ(-1)
ray_eye.setW(0)
#Convert to world coordinates
ray_world = VMi * ray_eye
ray_world = QtGui.QVector3D(ray_world.x(), ray_world.y(), ray_world.z()) # get transpose for matrix multiplication
ray_world.normalize()
O = np.matrix(self.cameraPosition()) # camera position should be starting point of the ray
ray_world = np.matrix([ray_world.x(), ray_world.y(), ray_world.z()])
# print('O={}, ray_world={}'.format(O, ray_world))
skeletonNodesStartIndex = self.skeletonNodesStartIndex
skeletonNodesPlotItem = self.items[skeletonNodesStartIndex]
skeletonColor = skeletonNodesPlotItem.color
for i, C in enumerate(skeletonNodesPlotItem.pos): # Iterate over all points
offset = self.offset
CView = C + offset
OC = O - CView
b = np.inner(ray_world, OC)
b = b.item(0)
c = np.inner(OC, OC)
c = c.item(0) - (0.4)**2 #np.square((self.Sizes[i]))
bsqr = np.square(b)
if (bsqr - c) >= 0: # means intersection
self.currentVoxelIndex = i
self.currentVoxel = tuple(C)
stop = self.pointSelectionLogic()
if stop:
break
def pointSelectionLogic(self):
pass
def addExtraInfo(self, **kwds):
for arg in kwds.keys():
setattr(self, arg, kwds[arg])
class myVessel(Vessel):
"""
Implements the `Vessel` base class.
To create one compartment partition (say LMCA), do the following:
1. Click `LMCA` button.
2. Click `Label Initial Voxels` button, then right click on any voxel(s) that you would like to use and those
selected will become blue and larger. If you mistakenly choose one voxel, unselect it by right clicking on it
again, or click the `Clear Chosen Voxels` button to clear all of the chosen voxels in this step.
3. (Optional) Click `Label Boundary Voxels` button, then right click on any voxel(s) that serve as the boundary and
those selected will become red and larger.
4. Click `Random Walk BFS` button, and the traversed voxels and segments will become yellow and assigned to that
compartment.
5. (Optional) Repeat Step 1-4 for other compartments if necessary.
6. Click `Save Chosen Voxels` button to save the partition information files.
"""
def init_ui(self):
self.setWindowTitle('Vessel')
hbox = QtGui.QHBoxLayout()
self.setLayout(hbox)
app = self.app
self.plotwidget = myPlotObject(app=app)
hbox.addWidget(self.plotwidget, 4)
vbox = QtGui.QVBoxLayout()
self.chosenVoxelsButtonGroup = QtGui.QButtonGroup()
self.labelInitialVoxelsButton = QtGui.QPushButton("Label Initial Voxels")
self.labelInitialVoxelsButton.setCheckable(True)
self.labelBoundaryVoxelsButton = QtGui.QPushButton("Label Boundary Voxels")
self.labelBoundaryVoxelsButton.setCheckable(True)
self.chosenVoxelsButtonGroup.addButton(self.labelInitialVoxelsButton, 1)
self.chosenVoxelsButtonGroup.addButton(self.labelBoundaryVoxelsButton, 2)
self.partitionNamesButtonGroup = QtGui.QButtonGroup()
self.LMCAButton = QtGui.QPushButton("LMCA")
self.LMCAButton.setCheckable(True)
self.RMCAButton = QtGui.QPushButton("RMCA")
self.RMCAButton.setCheckable(True)
self.ACAButton = QtGui.QPushButton("ACA")
self.ACAButton.setCheckable(True)
self.LPCAButton = QtGui.QPushButton("LPCA")
self.LPCAButton.setCheckable(True)
self.RPCAButton = QtGui.QPushButton("RPCA")
self.RPCAButton.setCheckable(True)
self.partitionNamesButtonGroup.addButton(self.LMCAButton, 11)
self.partitionNamesButtonGroup.addButton(self.RMCAButton, 12)
self.partitionNamesButtonGroup.addButton(self.ACAButton, 13)
self.partitionNamesButtonGroup.addButton(self.LPCAButton, 14)
self.partitionNamesButtonGroup.addButton(self.RPCAButton, 15)
self.loadChosenVoselsButton = QtGui.QPushButton("Load Chosen Voxels")
self.saveChosenVoselsButton = QtGui.QPushButton("Save Chosen Voxels")
self.clearChosenVoselsButton = QtGui.QPushButton("Clear Chosen Voxels")
self.showPartitionsButton = QtGui.QPushButton("Show Partitions")
self.randomWalkBFSButton = QtGui.QPushButton("Random Walk BFS")
self.loadSegmentNodeInfoDictButton = QtGui.QPushButton("Load segment/node InfoDict")
self.showNodeButton = QtGui.QPushButton("Show Node")
self.performFluidSimulationButton = QtGui.QPushButton("Fluid Simulation")
self.loadFluidResultButton = QtGui.QPushButton("Load Fluid Result")
self.showPressureResultButton = QtGui.QPushButton("Show Pressure Result")
self.showVelocityResultButton = QtGui.QPushButton("Show Velocity Result")
self.showSegmentButton = QtGui.QPushButton("Show Segment")
self.segmentIndexBox = QtGui.QLineEdit()
vbox.addWidget(self.labelInitialVoxelsButton, 1)
vbox.addWidget(self.labelBoundaryVoxelsButton, 1)
vbox.addWidget(self.LMCAButton, 1)
vbox.addWidget(self.RMCAButton, 1)
vbox.addWidget(self.ACAButton, 1)
vbox.addWidget(self.LPCAButton, 1)
vbox.addWidget(self.RPCAButton, 1)
vbox.addWidget(self.loadChosenVoselsButton, 1)
vbox.addWidget(self.saveChosenVoselsButton, 1)
vbox.addWidget(self.clearChosenVoselsButton, 1)
vbox.addWidget(self.randomWalkBFSButton, 1)
vbox.addWidget(self.showPartitionsButton, 1)
vbox.addWidget(self.loadSegmentNodeInfoDictButton, 1)
vbox.addWidget(self.showNodeButton, 1)
vbox.addWidget(self.performFluidSimulationButton, 1)
vbox.addWidget(self.loadFluidResultButton, 1)
vbox.addWidget(self.showPressureResultButton, 1)
vbox.addWidget(self.showVelocityResultButton, 1)
vbox.addWidget(self.showSegmentButton, 1)
vbox.addWidget(self.segmentIndexBox, 1)
vbox.addStretch(1)
hbox.addLayout(vbox, 1)
self.setGeometry(30, 30, 1500, 900)
self.show()
def init_variable(self):
self.labelInitialVoxelsButtonClicked = False
self.labelBoundaryVoxelsButtonClicked = False
self.LMCAButtonClicked = False
self.RMCAButtonClicked = False
self.ACAButtonClicked = False
self.LPCAButtonClicked = False
self.RPCAButtonClicked = False
self.chosenVoxels = {}
self.directory = ''
self.buttonIDMap = {-1: 'unused', 1: 'initialVoxels', 2: 'boundaryVoxels', 11: 'LMCA', 12: 'RMCA', 13: 'ACA', 14: 'LPCA', 15: 'RPCA'}
def qt_connections(self):
self.loadChosenVoselsButton.clicked.connect(self.onLoadChosenVoxelsButtonClicked)
self.saveChosenVoselsButton.clicked.connect(self.onSaveChosenVoxelsButtonClicked)
self.clearChosenVoselsButton.clicked.connect(self.onClearChosenVoxelsButtonClicked)
self.showPartitionsButton.clicked.connect(self.onShowPartitionsButtonClicked)
self.randomWalkBFSButton.clicked.connect(self.onRandomWalkBFSButtonClicked)
self.loadSegmentNodeInfoDictButton.clicked.connect(self.onLoadSegmentNodeInfoDictButtonClicked)
self.showNodeButton.clicked.connect(self.onShowNodeButtonClicked)
self.performFluidSimulationButton.clicked.connect(self.onPerformFluidSimulationButtonClicked)
self.loadFluidResultButton.clicked.connect(self.onLoadFluidResultButtonClicked)
self.showPressureResultButton.clicked.connect(self.onShowPressureResultButtonClicked)
self.showVelocityResultButton.clicked.connect(self.onShowVelocityResultButtonClicked)
self.showSegmentButton.clicked.connect(self.onShowSegmentButtonClicked)
def addExtraInfo(self, **kwds):
for arg in kwds.keys():
setattr(self, arg, kwds[arg])
def onLoadChosenVoxelsButtonClicked(self):
directory = self.directory
chosenVoxelsForPartitionFileName = 'chosenVoxelsForPartition.pkl'
chosenVoxelsForPartitionFilePath = os.path.join(directory, chosenVoxelsForPartitionFileName)
if os.path.exists(chosenVoxelsForPartitionFilePath):
with open(chosenVoxelsForPartitionFilePath, 'rb') as f:
self.plotwidget.chosenVoxels = pickle.load(f)
print('{} loaded from {}.'.format(chosenVoxelsForPartitionFileName, chosenVoxelsForPartitionFilePath))
else:
print('{} does not exist at {}.'.format(chosenVoxelsForPartitionFileName, chosenVoxelsForPartitionFilePath))
def onSaveChosenVoxelsButtonClicked(self):
directory = self.directory
chosenVoxels = self.plotwidget.chosenVoxels
partitionInfo = self.plotwidget.partitionInfo
G = self.plotwidget.G
chosenVoxelsForPartitionFileName = 'chosenVoxelsForPartition.pkl'
chosenVoxelsForPartitionFilePath = os.path.join(directory, chosenVoxelsForPartitionFileName)
with open(chosenVoxelsForPartitionFilePath, 'wb') as f:
pickle.dump(chosenVoxels, f, 2)
print('{} saved to {}.'.format(chosenVoxelsForPartitionFileName, chosenVoxelsForPartitionFilePath))
partitionInfoFileName = 'partitionInfo.pkl'
partitionInfoFilePath = os.path.join(directory, partitionInfoFileName)
with open(partitionInfoFilePath, 'wb') as f:
pickle.dump(partitionInfo, f, 2)
print('{} saved to {}.'.format(partitionInfoFileName, partitionInfoFilePath))
graphCleanedWithAdvancedInfoFileName = 'graphRepresentationCleanedWithAdvancedInfo.graphml'
graphCleanedWithAdvancedInfoFilePath = os.path.join(directory, graphCleanedWithAdvancedInfoFileName)
nx.write_graphml(G, graphCleanedWithAdvancedInfoFilePath)
print('{} saved to {}.'.format(graphCleanedWithAdvancedInfoFileName, graphCleanedWithAdvancedInfoFilePath))
def onClearChosenVoxelsButtonClicked(self):
self.plotwidget.clearChosenList()
def onRandomWalkBFSButtonClicked(self, chosenPartitionName=None):
# print(chosenPartitionName)
if chosenPartitionName is None or chosenPartitionName not in self.plotwidget.partitionNames:
chosenPartitionName = self.buttonIDMap[self.partitionNamesButtonGroup.checkedId()]
initialVoxels = self.plotwidget.chosenVoxels[chosenPartitionName]['initialVoxels']
boundaryVoxels = self.plotwidget.chosenVoxels[chosenPartitionName]['boundaryVoxels']
# voxelSegmentIndexArray = self.plotwidget.voxelSegmentIndexArray
G = self.plotwidget.G
G, visitedVoxels, segmentIndexList = mf.randomWalkBFS(G, initialVoxels, boundaryVoxels)
tempDict = {voxel: chosenPartitionName for voxel in visitedVoxels}
nx.set_node_attributes(G, tempDict, 'partitionName')
self.plotwidget.G = G
if len(visitedVoxels) != 0:
self.plotwidget.partitionInfo[chosenPartitionName] = {}
self.plotwidget.partitionInfo[chosenPartitionName]['visitedVoxels'] = visitedVoxels
segmentIndexList = list(np.unique(segmentIndexList)) # in case there is duplicate...
self.plotwidget.partitionInfo[chosenPartitionName]['segmentIndexList'] = segmentIndexList
# add segment level (based on depthLevel of voxels) to each segment
for segmentIndex in segmentIndexList:
segment = self.plotwidget.segmentList[segmentIndex]
depthLevelList = [self.plotwidget.G.node[segment[ii]]['depthLevel'] for ii in range(len(segment)) if 'depthLevel' in self.plotwidget.G.node[segment[ii]]]
self.plotwidget.G.add_path(segment, segmentLevel=int(np.min(depthLevelList)), partitionName=chosenPartitionName)
self.plotwidget.showVoxelsVisited(visitedVoxels)
else:
print('No voxels visited')
def onShowPartitionsButtonClicked(self):
colorPools = [pg.glColor('r'), pg.glColor('g'), pg.glColor('b'), pg.glColor('y'), pg.glColor('c')]
colorPools = [[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1], [1, 1, 0, 1], [1, 0, 1, 1], [0, 1, 1, 1]]
ii = 0
for chosenPartitionName in self.plotwidget.partitionInfo.keys():
visitedVoxels = self.plotwidget.partitionInfo[chosenPartitionName]['visitedVoxels']
color = colorPools[ii]
self.plotwidget.showVoxelsVisited(visitedVoxels, color=color)
ii += 1
def onLoadSegmentNodeInfoDictButtonClicked(self):
directory = self.directory
nodeInfoDictFileName = 'nodeInfoDict.pkl'
nodeInfoDictFilePath = os.path.join(directory, nodeInfoDictFileName)
with open(nodeInfoDictFilePath, 'rb') as f:
self.plotwidget.nodeInfoDict = pickle.load(f)
print('{} loaded from {}.'.format(nodeInfoDictFileName, directory))
segmentInfoDictFileName = 'segmentInfoDict.pkl'
segmentInfoDictFilePath = os.path.join(directory, segmentInfoDictFileName)
with open(segmentInfoDictFilePath, 'rb') as f:
self.plotwidget.segmentInfoDict = pickle.load(f)
print('{} loaded from {}.'.format(segmentInfoDictFileName, directory))
def onShowNodeButtonClicked(self):
self.plotwidget.showNode()
def onPerformFluidSimulationButtonClicked(self):
self.plotwidget.performFluidSimulation()
def onLoadFluidResultButtonClicked(self):
directory = self.directory
filename = 'fluidResult.pkl'
if os.path.exists(directory + filename):
with open(directory + filename, 'rb') as f:
self.plotwidget.fluidResult | |
from pymatgen.core.structure import Molecule
from pymatgen.core.operations import SymmOp
from pymatgen.symmetry.analyzer import PointGroupAnalyzer
from pymatgen.symmetry.analyzer import generate_full_symmops
import numpy as np
from numpy.linalg import eigh
from numpy.linalg import det
from copy import deepcopy
from math import fabs
from random import random
from random import choice as choose
from crystallography.operations import *
from crystallography.crystal import get_wyckoff_symmetry
try:
from ase.build import molecule as ase_molecule
def get_ase_mol(molname):
"""convert ase molecule to pymatgen style"""
ase_mol = ase_molecule(molname)
pos = ase_mol.get_positions()
symbols = ase_mol.get_chemical_symbols()
return(Molecule(symbols, pos))
except:
print("Could not import ASE. Install ASE for additional molecular support:")
print("https://wiki.fysik.dtu.dk/ase/install.html")
identity = np.array([[1,0,0],[0,1,0],[0,0,1]])
inversion = np.array([[-1,0,0],[0,-1,0],[0,0,-1]])
def get_inertia_tensor(mol):
'''
Calculate the symmetric inertia tensor for a Molecule object
'''
mo = mol.get_centered_molecule()
# Initialize elements of the inertia tensor
I11 = I22 = I33 = I12 = I13 = I23 = 0.0
for i in range(len(mo)):
x, y, z = mo.cart_coords[i]
m = mo[i].specie.number
I11 += m * (y ** 2 + z ** 2)
I22 += m * (x ** 2 + z ** 2)
I33 += m * (x ** 2 + y ** 2)
I12 += -m * x * y
I13 += -m * x * z
I23 += -m * y * z
return np.array([[I11, I12, I13],
[I12, I22, I23],
[I13, I23, I33]])
def get_moment_of_inertia(mol, axis, scale=1.0):
'''
Calculate the moment of inertia of a molecule about an axis
scale: changes the length scale of the molecule. Used to compare symmetry
axes for equivalence. Defaults to 1
'''
#convert axis to unit vector
axis = axis / np.linalg.norm(axis)
moment = 0
for i, a in enumerate(mol):
v = a.coords
moment += (scale * np.linalg.norm(np.cross(axis, v)) ) ** 2
return moment
def reoriented_molecule(mol, nested=False):
'''
Return a molecule reoriented so that its principle axes
are aligned with the identity matrix, and the matrix P
used to rotate the molecule into this orientation
'''
def reorient(mol):
new_mol = mol.get_centered_molecule()
A = get_inertia_tensor(new_mol)
#Store the eigenvectors of the inertia tensor
P = np.transpose(eigh(A)[1])
if det(P) < 0:
P[0] *= -1
#reorient the molecule
P = SymmOp.from_rotation_and_translation(P,[0,0,0])
new_mol.apply_operation(P)
#Our molecule should never be inverted during reorientation.
if det(P.rotation_matrix) < 0:
print("Error: inverted reorientation applied.")
return new_mol, P
#If needed, recursively apply reorientation (due to numerical errors)
iterations = 1
max_iterations = 100
new_mol, P = reorient(mol)
while iterations < max_iterations:
is_okay = True
for i in range(3):
for j in range(3):
x = eigh(get_inertia_tensor(new_mol))[1][i][j]
okay = True
if i == j:
#Check that diagonal elements are 0 or 1
if (not np.isclose(x, 0)) and (not np.isclose(x, 1)):
okay = False
else:
#Check that off-diagonal elements are 0
if (not np.isclose(x, 0)):
okay = False
if okay is False:
#If matrix is not diagonal with 1's and/or 0's, reorient
new_mol, Q = reorient(new_mol)
P = Q*P
iterations += 1
elif okay is True:
break
if iterations == max_iterations:
print("Error: Could not reorient molecule after "+str(max_iterations)+" attempts")
print(new_mol)
print(get_inertia_tensor(new_mol))
return False
return new_mol, P
def get_symmetry(mol, already_oriented=False):
'''
Return a list of SymmOps for a molecule's point symmetry
already_oriented: whether or not the principle axes of mol are already reoriented
'''
pga = PointGroupAnalyzer(mol)
#Handle linear molecules
if '*' in pga.sch_symbol:
if already_oriented == False:
#Reorient the molecule
oriented_mol, P = reoriented_molecule(mol)
pga = PointGroupAnalyzer(oriented_mol)
pg = pga.get_pointgroup()
symm_m = []
for op in pg:
symm_m.append(op)
#Add 12-fold and reflections in place of ininitesimal rotation
for axis in [[1,0,0],[0,1,0],[0,0,1]]:
op = SymmOp.from_rotation_and_translation(aa2matrix(axis, pi/6), [0,0,0])
if pga.is_valid_op(op):
symm_m.append(op)
#Any molecule with infinitesimal symmetry is linear;
#Thus, it possess mirror symmetry for any axis perpendicular
#To the rotational axis. pymatgen does not add this symmetry
#for all linear molecules - for example, hydrogen
if axis == [1,0,0]:
symm_m.append(SymmOp.from_xyz_string('x,-y,z'))
symm_m.append(SymmOp.from_xyz_string('x,y,-z'))
r = SymmOp.from_xyz_string('-x,y,-z')
'''if pga.is_valid_op(r):
symm_m.append(r)'''
elif axis == [0,1,0]:
symm_m.append(SymmOp.from_xyz_string('-x,y,z'))
symm_m.append(SymmOp.from_xyz_string('x,y,-z'))
r = SymmOp.from_xyz_string('-x,-y,z')
'''if pga.is_valid_op(r):
symm_m.append(r)'''
elif axis == [0,0,1]:
symm_m.append(SymmOp.from_xyz_string('-x,y,z'))
symm_m.append(SymmOp.from_xyz_string('x,-y,z'))
r = SymmOp.from_xyz_string('x,-y,-z')
'''if pga.is_valid_op(r):
symm_m.append(r)'''
#Generate a full list of SymmOps for the molecule's pointgroup
symm_m = generate_full_symmops(symm_m, 1e-3)
break
#Reorient the SymmOps into mol's original frame
if not already_oriented:
new = []
for op in symm_m:
new.append(P.inverse*op*P)
return new
elif already_oriented:
return symm_m
#Handle nonlinear molecules
else:
pg = pga.get_pointgroup()
symm_m = []
for op in pg:
symm_m.append(op)
return symm_m
def orientation_in_wyckoff_position(mol, sg, index, randomize=True,
exact_orientation=False, already_oriented=False, allow_inversion=False):
'''
Tests if a molecule meets the symmetry requirements of a Wyckoff position.
If it does, return the rotation matrix needed. Otherwise, returns False.
args:
mol: pymatgen.core.structure.Molecule object. Orientation is arbitrary
sg: the spacegroup to check
index: the index of the Wyckoff position within the sg to check
randomize: whether or not to apply a random rotation consistent with
the symmetry requirements.
exact_orientation: whether to only check compatibility for the provided
orientation of the molecule. Used within general case for checking.
If True, this function only returns True or False
already_oriented: whether or not to reorient the principle axes
when calling get_symmetry. Setting to True can remove redundancy,
but is not necessary.
'''
#Obtain the Wyckoff symmetry
symm_w = get_wyckoff_symmetry(sg, molecular=True)[index][0]
pga = PointGroupAnalyzer(mol)
#Check exact orientation
if exact_orientation is True:
mo = deepcopy(mol)
valid = True
for op in symm_w:
if not pga.is_valid_op(op):
valid = False
if valid is True:
return True
elif valid is False:
return False
#Obtain molecular symmetry, exact_orientation==False
symm_m = get_symmetry(mol, already_oriented=already_oriented)
#Store OperationAnalyzer objects for each molecular SymmOp
chiral = True
opa_m = []
for op_m in symm_m:
opa = OperationAnalyzer(op_m)
opa_m.append(opa)
if opa.type == "rotoinversion":
chiral = False
elif opa.type == "inversion":
chiral = False
#If molecule is chiral and allow_inversion is False,
#check if WP breaks symmetry
if chiral is True:
if allow_inversion is False:
gen_pos = get_wyckoffs(sg)[0]
for op in gen_pos:
if np.linalg.det(op.rotation_matrix) < 0:
print("Warning: cannot place chiral molecule in spagegroup #"+str(sg))
return False
#Store OperationAnalyzer objects for each Wyckoff symmetry SymmOp
opa_w = []
for op_w in symm_w:
opa_w.append(OperationAnalyzer(op_w))
#Check for constraints from the Wyckoff symmetry...
#If we find ANY two constraints (SymmOps with unique axes), the molecule's
#point group MUST contain SymmOps which can be aligned to these particular
#constraints. However, there may be multiple compatible orientations of the
#molecule consistent with these constraints
constraint1 = None
constraint2 = None
for i, op_w in enumerate(symm_w):
if opa_w[i].axis is not None:
constraint1 = opa_w[i]
for j, op_w in enumerate(symm_w):
if opa_w[j].axis is not None:
dot = np.dot(opa_w[i].axis, opa_w[j].axis)
if (not np.isclose(dot, 1, rtol=.01)) and (not np.isclose(dot, -1, rtol=.01)):
constraint2 = opa_w[j]
break
break
#Indirectly store the angle between the constraint axes
if (constraint1 is not None
and constraint2 is not None):
dot_w = np.dot(constraint1.axis, constraint2.axis)
#Generate 1st consistent molecular constraints
constraints_m = []
if constraint1 is not None:
for i, opa1 in enumerate(opa_m):
if opa1.is_conjugate(constraint1):
constraints_m.append([opa1, []])
#Generate 2nd constraint in opposite direction
extra = deepcopy(opa1)
extra.axis = [opa1.axis[0]*-1, opa1.axis[1]*-1, opa1.axis[2]*-1]
constraints_m.append([extra, []])
#Remove redundancy for the first constraints
list_i = list(range(len(constraints_m)))
list_j = list(range(len(constraints_m)))
copy = deepcopy(constraints_m)
for i , c1 in enumerate(copy):
if i in list_i:
for j , c2 in enumerate(copy):
if i > j and j in list_j and j in list_i:
#Check if axes are colinear
if np.isclose(np.dot(c1[0].axis, c2[0].axis), 1, rtol=.01):
list_i.remove(j)
list_j.remove(j)
else:# np.isclose(np.dot(c1[0].axis, c2[0].axis), -1, rtol=.01):
cond1 = False
cond2 = False
for opa in opa_m:
if opa.type == "rotation":
op = opa.op
if np.isclose(np.dot(op.operate(c1[0].axis), c2[0].axis), 1, rtol=.05):
cond1 = True
break
if cond1 is True: # or cond2 is True:
list_i.remove(j)
list_j.remove(j)
c_m = deepcopy(constraints_m)
constraints_m = []
for i in list_i:
constraints_m.append(c_m[i])
#Generate 2nd consistent molecular constraints
valid = list(range(len(constraints_m)))
if constraint2 is not None:
for i, c in enumerate(constraints_m):
opa1 = c[0]
for j, opa2 in enumerate(opa_m):
if opa2.is_conjugate(constraint2):
dot_m = np.dot(opa1.axis, opa2.axis)
#Ensure that the angles are equal
if abs(dot_m - dot_w) < .02 or abs(dot_m + dot_w) < .02:
constraints_m[i][1].append(opa2)
#Generate 2nd constraint in opposite direction
extra = deepcopy(opa2)
extra.axis = [opa2.axis[0]*-1, opa2.axis[1]*-1, opa2.axis[2]*-1]
constraints_m[i][1].append(extra)
#If no consistent constraints are found, remove first constraint
| |
is False."
.format(self.circuit_sample_rate))
return self.circuit_sample_rate
def set_nickname(self, nickname):
nickname = nickname.strip()
# Do some basic validation of the nickname
if len(nickname) > 19:
logging.warning("Bad nickname length %d: %s", len(nickname), nickname)
return False
if not all(c in (string.ascii_letters + string.digits) for c in nickname):
logging.warning("Bad nickname characters: %s", nickname)
return False
# Are we replacing an existing nickname?
if self.nickname is not None:
if self.nickname != nickname:
logging.warning("Replacing nickname %s with %s", self.nickname, nickname)
else:
logging.debug("Duplicate nickname received %s", nickname)
self.nickname = nickname
return True
def get_nickname(self):
return self.nickname
@staticmethod
def validate_tor_port(tor_port, description):
'''
Validate a single Tor ORPort or DirPort entry, using description as
the port type in any log messages.
tor_port is an ORPort or DirPort config line.
Some can be IPv6 *Ports, which have an IPv6 address and a port.
Others include options, such as NoListen.
'''
# Do some basic validation of the port
# There isn't much we can do here, because port lines vary so much
if len(tor_port) < 1 or len(tor_port) > 200:
logging.warning("Bad %s length %d: %s",
description, len(tor_port), tor_port)
return False
if not all(c in string.printable for c in tor_port):
logging.warning("Bad %s characters: %s", description, tor_port)
return False
return True
@staticmethod
def add_tor_port(tor_port, tor_port_list, description):
'''
Add a single Tor ORPort or DirPort entry to tor_port_list, using
description as the port type in any log messages.
'''
if tor_port in tor_port_list:
logging.info("Ignoring duplicate %s: %s", description, tor_port)
else:
tor_port_list.append(tor_port)
tor_port_list.sort()
@staticmethod
def get_tor_port(tor_port_list, description):
'''
Create a list of all known *Ports on the relay from tor_port_list,
using description as the port type in any log messages.
'''
if len(tor_port_list) == 0:
return None
else:
return ", ".join(tor_port_list)
def set_orport(self, orport):
'''
Add an ORPort to the set of ORPorts on the relay.
A relay can have multiple ORPorts.
See validate_tor_port for how ORPorts are validated.
'''
orport = orport.strip()
if not Aggregator.validate_tor_port(orport, 'ORPort'):
return False
Aggregator.add_tor_port(orport, self.orport_list, 'ORPort')
return True
def get_orport(self):
'''
Get a comma-separated list of ORPorts on the relay.
'''
return Aggregator.get_tor_port(self.orport_list, 'ORPort')
def set_dirport(self, dirport):
'''
Like set_orport, but for DirPorts.
'''
dirport = dirport.strip()
if not Aggregator.validate_tor_port(dirport, 'DirPort'):
return False
Aggregator.add_tor_port(dirport, self.dirport_list, 'DirPort')
return True
def get_dirport(self):
'''
Like get_orport, but for DirPorts.
'''
return Aggregator.get_tor_port(self.dirport_list, 'DirPort')
@staticmethod
def validate_version(version, old_version, description):
'''
Perform basic validation and processing on version.
Uses description for logging changes to old_version.
Returns a whitespace-stripped version string, or None if the version
is invalid.
'''
if "version" in version:
_, _, version = version.partition("version")
version = version.strip()
# Do some basic validation of the version
# This is hard, because versions can be almost anything
if not len(version) > 0:
logging.warning("Bad %s length %d: %s",
description, len(version), version)
return None
# This means unicode printables, there's no ASCII equivalent
if not all(c in string.printable for c in version):
logging.warning("Bad %s characters: %s",
description, version)
return None
# Are we replacing an existing version?
if old_version is not None:
if old_version != version:
if old_version.lower() in version.lower():
# we just added a git tag to the version
# this happens because GETINFO version has the tag, but
# PROTOCOLINFO does not
logging_level = logging.debug
elif version.lower() in old_version.lower():
# did someone just restart tor?
# this should fix itself during the protocol exchange
logging_level = logging.info
else:
# did someone just restart tor with a new version?
logging_level = logging.warning
logging_level("Replacing %s %s with %s",
description, old_version, version)
else:
logging.debug("Duplicate %s received %s",
description, version)
return version
def set_tor_version(self, version):
validated_version = Aggregator.validate_version(version, self.tor_version,
'Tor version')
if validated_version is not None:
self.tor_version = validated_version
logging.info("Tor version is {}".format(self.tor_version))
return True
else:
return False
def get_tor_version(self):
return self.tor_version
def set_tor_privcount_version(self, tor_privcount_version):
validated_version = Aggregator.validate_version(
tor_privcount_version,
self.tor_privcount_version,
'Tor PrivCount version')
if validated_version is not None:
self.tor_privcount_version = validated_version
logging.info("Tor PrivCount version is {}"
.format(self.tor_privcount_version))
return True
else:
return False
def get_tor_privcount_version(self):
return self.tor_privcount_version
def set_address(self, address):
address = address.strip()
# Do some basic validation of the address
# Relays must all have IPv4 addresses, so just checking for IPv4 is ok
if len(address) < 7 or len(address) > 15:
logging.warning("Bad address length %d: %s", len(address), address)
return False
if not all(c in (string.digits + '.') for c in address):
logging.warning("Bad address characters: %s", address)
return False
# We could check each component is between 0 and 255, but that's overkill
# Are we replacing an existing address?
if self.address is not None:
if self.address != address:
logging.warning("Replacing address %s with %s", self.address, address)
else:
logging.debug("Duplicate address received %s", address)
self.address = address
return True
def get_address(self):
return self.address
def set_fingerprint(self, fingerprint):
'''
If fingerprint is valid, set our stored fingerprint to fingerprint, and
return True.
Otherwise, return False.
Called by TorControlClientProtocol.
'''
fingerprint = fingerprint.strip()
# Do some basic validation of the fingerprint
if not len(fingerprint) == 40:
logging.warning("Bad fingerprint length %d: %s", len(fingerprint), fingerprint)
return False
if not all(c in string.hexdigits for c in fingerprint):
logging.warning("Bad fingerprint characters: %s", fingerprint)
return False
# Is this the first time we've been told a fingerprint?
if self.fingerprint is None:
self.fingerprint = fingerprint
self.generate_noise()
else:
if self.fingerprint != fingerprint:
logging.warning("Received different fingerprint %s, keeping original fingerprint %s",
self.fingerprint, fingerprint)
else:
logging.debug("Duplicate fingerprint received %s", fingerprint)
return True
def get_fingerprint(self):
'''
Return the stored fingerprint for this relay.
'''
return self.fingerprint
def set_flag_list(self, flag_string):
'''
Set our stored flag list to the list of space-separated flags in
flag_string. Ignores flag_string if it is None.
Always returns True.
Called by TorControlClientProtocol.
'''
if flag_string is None:
logging.warning("flag_string was None in set_flag_list()")
return True
self.flag_list = flag_string.split()
logging.info("Updated relay flags to {}".format(self.flag_list))
return True
def get_flag_list(self):
'''
Return the stored flag list for this relay.
'''
return self.flag_list
def set_geoip_file(self, geoip_file):
'''
Set our stored GeoIPFile to geoip_file.
Ignores geoip_file if it is None.
Always returns True.
Called by TorControlClientProtocol.
'''
if geoip_file is None:
logging.warning("geoip_file was None in set_geoip_file()")
return True
self.geoip_file = geoip_file
logging.info("Updated GeoIPFile to '{}'".format(self.geoip_file))
return True
def get_geoip_file(self):
'''
Return the stored GeoIPFile for this relay.
'''
return self.geoip_file
def set_geoipv6_file(self, geoipv6_file):
'''
Set our stored GeoIPv6File to geoipv6_file.
Ignores geoipv6_file if it is None.
Always returns True.
Called by TorControlClientProtocol.
'''
if geoipv6_file is None:
logging.warning("geoipv6_file was None in set_geoipv6_file()")
return True
self.geoipv6_file = geoipv6_file
logging.info("Updated GeoIPv6File to '{}'".format(self.geoipv6_file))
return True
def get_geoipv6_file(self):
'''
Return the stored GeoIPv6File for this relay.
'''
return self.geoipv6_file
def get_context(self):
'''
return a dictionary containing each available context item
'''
context = {}
if self.get_nickname() is not None:
context['nickname'] = self.get_nickname()
if self.get_orport() is not None:
context['orport'] = self.get_orport()
if self.get_dirport() is not None:
context['dirport'] = self.get_dirport()
if self.get_tor_version() is not None:
context['tor_version'] = self.get_tor_version()
if self.get_tor_privcount_version() is not None:
context['tor_privcount_version'] = self.get_tor_privcount_version()
if self.get_address() is not None:
context['address'] = self.get_address()
if self.get_fingerprint() is not None:
context['fingerprint'] = self.get_fingerprint()
if self.get_flag_list() is not None:
context['flag_list'] = self.get_flag_list()
if self.last_event_time is not None:
context['last_event_time'] = self.last_event_time
if self.noise_weight_value is not None:
context['noise_weight_value'] = self.noise_weight_value
if self.geoip_file is not None:
context['geoip_file'] = self.geoip_file
if self.geoipv6_file is not None:
context['geoipv6_file'] = self.geoipv6_file
return context
def handle_event(self, event):
if not self.secure_counters:
return False
# fail on events with no code
if len(event) < 1:
return False
event_code, items = event[0], event[1:]
self.last_event_time = time()
# hand valid events off to the aggregator
# keep events in order of frequency, particularly the cell and bytes
# events (cell happens every 514 bytes, bytes happens every ~16kB)
# This event has tagged fields: fields may be optional.
if event_code == 'PRIVCOUNT_CIRCUIT_CELL':
return self._handle_tagged_event(event_code, items)
# these events have positional fields: order matters
elif event_code == 'PRIVCOUNT_STREAM_BYTES_TRANSFERRED':
if len(items) == Aggregator.STREAM_BYTES_ITEMS:
return self._handle_bytes_event(items[:Aggregator.STREAM_BYTES_ITEMS])
else:
logging.warning("Rejected malformed {} event"
.format(event_code))
return False
elif event_code == 'PRIVCOUNT_STREAM_ENDED':
if len(items) == Aggregator.STREAM_ENDED_ITEMS:
return self._handle_stream_event(items[:Aggregator.STREAM_ENDED_ITEMS])
else:
logging.warning("Rejected malformed | |
<gh_stars>0
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
class ResourceAttributes:
CLOUD_PROVIDER = "cloud.provider"
"""
Name of the cloud provider.
"""
CLOUD_ACCOUNT_ID = "cloud.account.id"
"""
The cloud account ID the resource is assigned to.
"""
CLOUD_REGION = "cloud.region"
"""
The geographical region the resource is running.
Note: Refer to your provider's docs to see the available regions, for example [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), [Google Cloud regions](https://cloud.google.com/about/locations), or [Tencent Cloud regions](https://intl.cloud.tencent.com/document/product/213/6091).
"""
CLOUD_AVAILABILITY_ZONE = "cloud.availability_zone"
"""
Cloud regions often have multiple, isolated locations known as zones to increase availability. Availability zone represents the zone where the resource is running.
Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud.
"""
CLOUD_PLATFORM = "cloud.platform"
"""
The cloud platform in use.
Note: The prefix of the service SHOULD match the one specified in `cloud.provider`.
"""
AWS_ECS_CONTAINER_ARN = "aws.ecs.container.arn"
"""
The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
"""
AWS_ECS_CLUSTER_ARN = "aws.ecs.cluster.arn"
"""
The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
"""
AWS_ECS_LAUNCHTYPE = "aws.ecs.launchtype"
"""
The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) for an ECS task.
"""
AWS_ECS_TASK_ARN = "aws.ecs.task.arn"
"""
The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
"""
AWS_ECS_TASK_FAMILY = "aws.ecs.task.family"
"""
The task definition family this task definition is a member of.
"""
AWS_ECS_TASK_REVISION = "aws.ecs.task.revision"
"""
The revision for this task definition.
"""
AWS_EKS_CLUSTER_ARN = "aws.eks.cluster.arn"
"""
The ARN of an EKS cluster.
"""
AWS_LOG_GROUP_NAMES = "aws.log.group.names"
"""
The name(s) of the AWS log group(s) an application is writing to.
Note: Multiple log groups must be supported for cases like multi-container applications, where a single application has sidecar containers, and each write to their own log group.
"""
AWS_LOG_GROUP_ARNS = "aws.log.group.arns"
"""
The Amazon Resource Name(s) (ARN) of the AWS log group(s).
Note: See the [log group ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
"""
AWS_LOG_STREAM_NAMES = "aws.log.stream.names"
"""
The name(s) of the AWS log stream(s) an application is writing to.
"""
AWS_LOG_STREAM_ARNS = "aws.log.stream.arns"
"""
The ARN(s) of the AWS log stream(s).
Note: See the [log stream ARN format documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain several log streams, so these ARNs necessarily identify both a log group and a log stream.
"""
CONTAINER_NAME = "container.name"
"""
Container name used by container runtime.
"""
CONTAINER_ID = "container.id"
"""
Container ID. Usually a UUID, as for example used to [identify Docker containers](https://docs.docker.com/engine/reference/run/#container-identification). The UUID might be abbreviated.
"""
CONTAINER_RUNTIME = "container.runtime"
"""
The container runtime managing this container.
"""
CONTAINER_IMAGE_NAME = "container.image.name"
"""
Name of the image the container was built on.
"""
CONTAINER_IMAGE_TAG = "container.image.tag"
"""
Container image tag.
"""
DEPLOYMENT_ENVIRONMENT = "deployment.environment"
"""
Name of the [deployment environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka deployment tier).
"""
DEVICE_ID = "device.id"
"""
A unique identifier representing the device.
Note: The device identifier MUST only be defined using the values outlined below. This value is not an advertising identifier and MUST NOT be used as such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the Firebase Installation ID or a globally unique UUID which is persisted across sessions in your application. More information can be found [here](https://developer.android.com/training/articles/user-data-ids) on best practices and exact implementation details. Caution should be taken when storing personal data or anything which can identify a user. GDPR and data protection laws may apply, ensure you do your own due diligence.
"""
DEVICE_MODEL_IDENTIFIER = "device.model.identifier"
"""
The model identifier for the device.
Note: It's recommended this value represents a machine readable version of the model identifier rather than the market or consumer-friendly name of the device.
"""
DEVICE_MODEL_NAME = "device.model.name"
"""
The marketing name for the device model.
Note: It's recommended this value represents a human readable version of the device model rather than a machine readable alternative.
"""
DEVICE_MANUFACTURER = "device.manufacturer"
"""
The name of the device manufacturer.
Note: The Android OS provides this field via [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). iOS apps SHOULD hardcode the value `Apple`.
"""
FAAS_NAME = "faas.name"
"""
The name of the single function that this runtime instance executes.
Note: This is the name of the function as configured/deployed on the FaaS platform and is usually different from the name of the callback function (which may be stored in the [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) span attributes).
"""
FAAS_ID = "faas.id"
"""
The unique ID of the single function that this runtime instance executes.
Note: Depending on the cloud provider, use:
* **AWS Lambda:** The function [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
Take care not to use the "invoked ARN" directly but replace any
[alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) with the resolved function version, as the same runtime instance may be invokable with multiple
different aliases.
* **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full-resource-names)
* **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id).
On some providers, it may not be possible to determine the full ID at startup,
which is why this field cannot be made required. For example, on AWS the account ID
part of the ARN is not available without calling another AWS API
which may be deemed too slow for a short-running lambda function.
As an alternative, consider setting `faas.id` as a span attribute instead.
"""
FAAS_VERSION = "faas.version"
"""
The immutable version of the function being executed.
Note: Depending on the cloud provider and platform, use:
* **AWS Lambda:** The [function version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
(an integer represented as a decimal string).
* **Google Cloud Run:** The [revision](https://cloud.google.com/run/docs/managing/revisions)
(i.e., the function name plus the revision suffix).
* **Google Cloud Functions:** The value of the
[`K_REVISION` environment variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
* **Azure Functions:** Not applicable. Do not set this attribute.
"""
FAAS_INSTANCE = "faas.instance"
"""
The execution environment ID as a string, that will be potentially reused for other invocations to the same function/function version.
Note: * **AWS Lambda:** Use the (full) log stream name.
"""
FAAS_MAX_MEMORY = "faas.max_memory"
"""
The amount of memory available to the serverless function in MiB.
Note: It's recommended to set this attribute since e.g. too little memory can easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information.
"""
HOST_ID = "host.id"
"""
Unique host ID. For Cloud, this must be the instance_id assigned by the cloud provider.
"""
HOST_NAME = "host.name"
"""
Name of the host. On Unix systems, it may contain what the hostname command returns, or the fully qualified hostname, or another name specified by the user.
"""
HOST_TYPE = "host.type"
"""
Type of host. For Cloud, this must be the machine type.
"""
HOST_ARCH = "host.arch"
"""
The CPU architecture the host system is running on.
"""
HOST_IMAGE_NAME = "host.image.name"
"""
Name of the VM image or OS install the host was instantiated from.
"""
HOST_IMAGE_ID = "host.image.id"
"""
VM image ID. For Cloud, this value is from the provider.
"""
HOST_IMAGE_VERSION = "host.image.version"
"""
The version string of the VM image as defined in [Version Attributes](README.md#version-attributes).
"""
K8S_CLUSTER_NAME = "k8s.cluster.name"
"""
The name of the cluster.
"""
K8S_NODE_NAME = "k8s.node.name"
"""
The name of the Node.
"""
K8S_NODE_UID = "k8s.node.uid"
"""
The UID of the Node.
"""
K8S_NAMESPACE_NAME = "k8s.namespace.name"
"""
The name of the namespace that the pod is running in.
"""
K8S_POD_UID = "k8s.pod.uid"
"""
The UID of the Pod.
"""
K8S_POD_NAME = "k8s.pod.name"
"""
The name of the Pod.
"""
K8S_CONTAINER_NAME = "k8s.container.name"
"""
The name of the Container from Pod specification, must be unique within a Pod. Container runtime usually uses different globally unique name (`container.name`).
"""
K8S_CONTAINER_RESTART_COUNT = | |
<reponame>YongxinLIAO/pymodbus
#!/usr/bin/env python
'''
Libmodbus Protocol Wrapper
------------------------------------------------------------
What follows is an example wrapper of the libmodbus library
(http://libmodbus.org/documentation/) for use with pymodbus.
There are two utilities involved here:
* LibmodbusLevel1Client
This is simply a python wrapper around the c library. It is
mostly a clone of the pylibmodbus implementation, but I plan
on extending it to implement all the available protocol using
the raw execute methods.
* LibmodbusClient
This is just another modbus client that can be used just like
any other client in pymodbus.
For these to work, you must have `cffi` and `libmodbus-dev` installed:
sudo apt-get install libmodbus-dev
pip install cffi
'''
#--------------------------------------------------------------------------#
# import system libraries
#--------------------------------------------------------------------------#
from cffi import FFI
#--------------------------------------------------------------------------#
# import pymodbus libraries
#--------------------------------------------------------------------------#
from pymodbus.constants import Defaults
from pymodbus.exceptions import ModbusException
from pymodbus.client.common import ModbusClientMixin
from pymodbus.bit_read_message import ReadCoilsResponse, ReadDiscreteInputsResponse
from pymodbus.register_read_message import ReadHoldingRegistersResponse, ReadInputRegistersResponse
from pymodbus.register_read_message import ReadWriteMultipleRegistersResponse
from pymodbus.bit_write_message import WriteSingleCoilResponse, WriteMultipleCoilsResponse
from pymodbus.register_write_message import WriteSingleRegisterResponse, WriteMultipleRegistersResponse
#--------------------------------------------------------------------------------
# create the C interface
#--------------------------------------------------------------------------------
# * TODO add the protocol needed for the servers
#--------------------------------------------------------------------------------
compiler = FFI()
compiler.cdef("""
typedef struct _modbus modbus_t;
int modbus_connect(modbus_t *ctx);
int modbus_flush(modbus_t *ctx);
void modbus_close(modbus_t *ctx);
const char *modbus_strerror(int errnum);
int modbus_set_slave(modbus_t *ctx, int slave);
void modbus_get_response_timeout(modbus_t *ctx, uint32_t *to_sec, uint32_t *to_usec);
void modbus_set_response_timeout(modbus_t *ctx, uint32_t to_sec, uint32_t to_usec);
int modbus_read_bits(modbus_t *ctx, int addr, int nb, uint8_t *dest);
int modbus_read_input_bits(modbus_t *ctx, int addr, int nb, uint8_t *dest);
int modbus_read_registers(modbus_t *ctx, int addr, int nb, uint16_t *dest);
int modbus_read_input_registers(modbus_t *ctx, int addr, int nb, uint16_t *dest);
int modbus_write_bit(modbus_t *ctx, int coil_addr, int status);
int modbus_write_bits(modbus_t *ctx, int addr, int nb, const uint8_t *data);
int modbus_write_register(modbus_t *ctx, int reg_addr, int value);
int modbus_write_registers(modbus_t *ctx, int addr, int nb, const uint16_t *data);
int modbus_write_and_read_registers(modbus_t *ctx, int write_addr, int write_nb, const uint16_t *src, int read_addr, int read_nb, uint16_t *dest);
int modbus_mask_write_register(modbus_t *ctx, int addr, uint16_t and_mask, uint16_t or_mask);
int modbus_send_raw_request(modbus_t *ctx, uint8_t *raw_req, int raw_req_length);
float modbus_get_float(const uint16_t *src);
void modbus_set_float(float f, uint16_t *dest);
modbus_t* modbus_new_tcp(const char *ip_address, int port);
modbus_t* modbus_new_rtu(const char *device, int baud, char parity, int data_bit, int stop_bit);
void modbus_free(modbus_t *ctx);
int modbus_receive(modbus_t *ctx, uint8_t *req);
int modbus_receive_from(modbus_t *ctx, int sockfd, uint8_t *req);
int modbus_receive_confirmation(modbus_t *ctx, uint8_t *rsp);
""")
LIB = compiler.dlopen('modbus') # create our bindings
#--------------------------------------------------------------------------------
# helper utilites
#--------------------------------------------------------------------------------
def get_float(data):
return LIB.modbus_get_float(data)
def set_float(value, data):
LIB.modbus_set_float(value, data)
def cast_to_int16(data):
return int(compiler.cast('int16_t', data))
def cast_to_int32(data):
return int(compiler.cast('int32_t', data))
#--------------------------------------------------------------------------------
# level1 client
#--------------------------------------------------------------------------------
class LibmodbusLevel1Client(object):
''' A raw wrapper around the libmodbus c library. Feel free
to use it if you want increased performance and don't mind the
entire protocol not being implemented.
'''
@classmethod
def create_tcp_client(klass, host='127.0.0.1', port=Defaults.Port):
''' Create a TCP modbus client for the supplied parameters.
:param host: The host to connect to
:param port: The port to connect to on that host
:returns: A new level1 client
'''
client = LIB.modbus_new_tcp(host.encode(), port)
return klass(client)
@classmethod
def create_rtu_client(klass, **kwargs):
''' Create a TCP modbus client for the supplied parameters.
:param port: The serial port to attach to
:param stopbits: The number of stop bits to use
:param bytesize: The bytesize of the serial messages
:param parity: Which kind of parity to use
:param baudrate: The baud rate to use for the serial device
:returns: A new level1 client
'''
port = kwargs.get('port', '/dev/ttyS0')
baudrate = kwargs.get('baud', Defaults.Baudrate)
parity = kwargs.get('parity', Defaults.Parity)
bytesize = kwargs.get('bytesize', Defaults.Bytesize)
stopbits = kwargs.get('stopbits', Defaults.Stopbits)
client = LIB.modbus_new_rtu(port, baudrate, parity, bytesize, stopbits)
return klass(client)
def __init__(self, client):
''' Initalize a new instance of the LibmodbusLevel1Client. This
method should not be used, instead new instances should be created
using the two supplied factory methods:
* LibmodbusLevel1Client.create_rtu_client(...)
* LibmodbusLevel1Client.create_tcp_client(...)
:param client: The underlying client instance to operate with.
'''
self.client = client
self.slave = Defaults.UnitId
def set_slave(self, slave):
''' Set the current slave to operate against.
:param slave: The new slave to operate against
:returns: The resulting slave to operate against
'''
self.slave = self._execute(LIB.modbus_set_slave, slave)
return self.slave
def connect(self):
''' Attempt to connect to the client target.
:returns: True if successful, throws otherwise
'''
return (self.__execute(LIB.modbus_connect) == 0)
def flush(self):
''' Discards the existing bytes on the wire.
:returns: The number of flushed bytes, or throws
'''
return self.__execute(LIB.modbus_flush)
def close(self):
''' Closes and frees the underlying connection
and context structure.
:returns: Always True
'''
LIB.modbus_close(self.client)
LIB.modbus_free(self.client)
return True
def __execute(self, command, *args):
''' Run the supplied command against the currently
instantiated client with the supplied arguments. This
will make sure to correctly handle resulting errors.
:param command: The command to execute against the context
:param *args: The arguments for the given command
:returns: The result of the operation unless -1 which throws
'''
result = command(self.client, *args)
if result == -1:
message = LIB.modbus_strerror(compiler.errno)
raise ModbusException(compiler.string(message))
return result
def read_bits(self, address, count=1):
'''
:param address: The starting address to read from
:param count: The number of coils to read
:returns: The resulting bits
'''
result = compiler.new("uint8_t[]", count)
self.__execute(LIB.modbus_read_bits, address, count, result)
return result
def read_input_bits(self, address, count=1):
'''
:param address: The starting address to read from
:param count: The number of discretes to read
:returns: The resulting bits
'''
result = compiler.new("uint8_t[]", count)
self.__execute(LIB.modbus_read_input_bits, address, count, result)
return result
def write_bit(self, address, value):
'''
:param address: The starting address to write to
:param value: The value to write to the specified address
:returns: The number of written bits
'''
return self.__execute(LIB.modbus_write_bit, address, value)
def write_bits(self, address, values):
'''
:param address: The starting address to write to
:param values: The values to write to the specified address
:returns: The number of written bits
'''
count = len(values)
return self.__execute(LIB.modbus_write_bits, address, count, values)
def write_register(self, address, value):
'''
:param address: The starting address to write to
:param value: The value to write to the specified address
:returns: The number of written registers
'''
return self.__execute(LIB.modbus_write_register, address, value)
def write_registers(self, address, values):
'''
:param address: The starting address to write to
:param values: The values to write to the specified address
:returns: The number of written registers
'''
count = len(values)
return self.__execute(LIB.modbus_write_registers, address, count, values)
def read_registers(self, address, count=1):
'''
:param address: The starting address to read from
:param count: The number of registers to read
:returns: The resulting read registers
'''
result = compiler.new("uint16_t[]", count)
self.__execute(LIB.modbus_read_registers, address, count, result)
return result
def read_input_registers(self, address, count=1):
'''
:param address: The starting address to read from
:param count: The number of registers to read
:returns: The resulting read registers
'''
result = compiler.new("uint16_t[]", count)
self.__execute(LIB.modbus_read_input_registers, address, count, result)
return result
def read_and_write_registers(self, read_address, read_count, write_address, write_registers):
'''
:param read_address: The address to start reading from
:param read_count: The number of registers to read from address
:param write_address: The address to start writing to
:param write_registers: The registers to write to the specified address
:returns: The resulting read registers
'''
write_count = len(write_registers)
read_result = compiler.new("uint16_t[]", read_count)
self.__execute(LIB.modbus_write_and_read_registers,
write_address, write_count, write_registers,
read_address, read_count, read_result)
return read_result
#--------------------------------------------------------------------------------
# level2 client
#--------------------------------------------------------------------------------
class LibmodbusClient(ModbusClientMixin):
''' A facade around the raw level 1 libmodbus client
that implements the pymodbus protocol on top of the lower level
client.
'''
#-----------------------------------------------------------------------#
# these are used to convert from the pymodbus request types to the
# libmodbus operations (overloaded operator).
#-----------------------------------------------------------------------#
__methods = {
'ReadCoilsRequest' : lambda c, r: c.read_bits(r.address, r.count),
'ReadDiscreteInputsRequest' : lambda c, r: c.read_input_bits(r.address, r.count),
'WriteSingleCoilRequest' : lambda c, r: c.write_bit(r.address, r.value),
'WriteMultipleCoilsRequest' : lambda c, r: c.write_bits(r.address, r.values),
'WriteSingleRegisterRequest' : lambda c, r: c.write_register(r.address, r.value),
'WriteMultipleRegistersRequest' : lambda c, r: c.write_registers(r.address, r.values),
'ReadHoldingRegistersRequest' : lambda c, r: c.read_registers(r.address, r.count),
'ReadInputRegistersRequest' : lambda c, r: c.read_input_registers(r.address, r.count),
'ReadWriteMultipleRegistersRequest' : lambda c, r: c.read_and_write_registers(r.read_address, r.read_count, r.write_address, r.write_registers),
}
#-----------------------------------------------------------------------#
# these are used to convert from the libmodbus result to the
# pymodbus response type
#-----------------------------------------------------------------------#
__adapters = {
'ReadCoilsRequest' : lambda tx, rx: ReadCoilsResponse(list(rx)),
'ReadDiscreteInputsRequest' : lambda tx, rx: ReadDiscreteInputsResponse(list(rx)),
'WriteSingleCoilRequest' : lambda tx, rx: WriteSingleCoilResponse(tx.address, rx),
'WriteMultipleCoilsRequest' : lambda tx, rx: WriteMultipleCoilsResponse(tx.address, rx),
'WriteSingleRegisterRequest' : lambda tx, rx: WriteSingleRegisterResponse(tx.address, rx),
'WriteMultipleRegistersRequest' : lambda tx, rx: WriteMultipleRegistersResponse(tx.address, rx),
'ReadHoldingRegistersRequest' : lambda tx, rx: ReadHoldingRegistersResponse(list(rx)),
'ReadInputRegistersRequest' : lambda tx, rx: ReadInputRegistersResponse(list(rx)),
| |
self.seen_title:
return
if self.previous_section:
self.add_menu(self.previous_section)
else:
self.add_menu(self.document, master=True)
node_name = node['node_name']
pointers = tuple([node_name] + self.rellinks[node_name])
self.add_text('\n@node %s,%s,%s,%s\n' % pointers)
if node_name != node_name.lower():
self.add_text('@anchor{%s}' % node_name.lower())
for id in self.next_section_targets:
self.add_anchor(id, node)
self.next_section_targets = []
self.previous_section = node
self.section_level += 1
def depart_section(self, node):
self.section_level -= 1
headings = (
'@unnumbered',
'@chapter',
'@section',
'@subsection',
'@subsubsection',
)
rubrics = (
'@heading',
'@subheading',
'@subsubheading',
)
def visit_title(self, node):
if not self.seen_title:
self.seen_title = 1
raise nodes.SkipNode
parent = node.parent
if isinstance(parent, nodes.table):
return
if isinstance(parent, nodes.Admonition):
raise nodes.SkipNode
elif isinstance(parent, nodes.sidebar):
self.visit_rubric(node)
elif isinstance(parent, nodes.topic):
raise nodes.SkipNode
elif not isinstance(parent, nodes.section):
self.document.reporter.warning(
'encountered title node not in section, topic, table, '
'admonition or sidebar', base_node=node)
self.visit_rubric(node)
else:
try:
heading = self.headings[self.section_level]
except IndexError:
heading = self.headings[-1]
self.add_text('%s ' % heading, fresh=1)
def depart_title(self, node):
self.add_text('', fresh=1)
def visit_rubric(self, node):
try:
rubric = self.rubrics[self.section_level]
except IndexError:
rubric = self.rubrics[-1]
self.add_text('%s ' % rubric, fresh=1)
def depart_rubric(self, node):
self.add_text('', fresh=1)
def visit_subtitle(self, node):
self.add_text('\n\n@noindent\n')
def depart_subtitle(self, node):
self.add_text('\n\n')
## References
def visit_target(self, node):
if node.get('ids'):
self.add_anchor(node['ids'][0], node)
elif node.get('refid'):
# Section targets need to go after the start of the section.
next = node.next_node(ascend=1, siblings=1)
while isinstance(next, nodes.target):
next = next.next_node(ascend=1, siblings=1)
if isinstance(next, nodes.section):
self.next_section_targets.append(node['refid'])
return
self.add_anchor(node['refid'], node)
elif node.get('refuri'):
pass
else:
self.document.reporter.error("Unknown target type: %r" % node)
def visit_reference(self, node):
if isinstance(node.parent, nodes.title):
return
if isinstance(node[0], nodes.image):
return
if isinstance(node.parent, addnodes.desc_type):
return
name = node.get('name', node.astext()).strip()
if node.get('refid'):
self.add_xref(escape_id(node['refid']),
escape_id(name), node)
raise nodes.SkipNode
if not node.get('refuri'):
self.document.reporter.error("Unknown reference type: %s" % node)
return
uri = node['refuri']
if uri.startswith('#'):
self.add_xref(escape_id(uri[1:]), escape_id(name), node)
elif uri.startswith('%'):
id = uri[1:]
if '#' in id:
src, id = uri[1:].split('#', 1)
assert '#' not in id
self.add_xref(escape_id(id), escape_id(name), node)
elif uri.startswith('mailto:'):
uri = escape_arg(uri[7:])
name = escape_arg(name)
if not name or name == uri:
self.add_text('@email{%s}' % uri)
else:
self.add_text('@email{%s,%s}' % (uri, name))
elif uri.startswith('info:'):
uri = uri[5:].replace('_', ' ')
uri = escape_arg(uri)
id = 'Top'
if '#' in uri:
uri, id = uri.split('#', 1)
id = escape_id(id)
name = escape_id(name)
if name == id:
self.add_text('@pxref{%s,,,%s}' % (id, uri))
else:
self.add_text('@pxref{%s,,%s,%s}' % (id, name, uri))
else:
uri = escape_arg(uri)
name = escape_arg(name)
if not name or uri == name:
self.add_text('@indicateurl{%s}' % uri)
else:
self.add_text('@uref{%s,%s}' % (uri, name))
raise nodes.SkipNode
def depart_reference(self, node):
pass
def visit_title_reference(self, node):
text = node.astext()
self.add_text('@cite{%s}' % escape_arg(text))
raise nodes.SkipNode
def depart_title_reference(self, node):
pass
## Blocks
def visit_paragraph(self, node):
if 'continued' in node or isinstance(node.parent, nodes.compound):
self.add_text('@noindent\n', fresh=1)
def depart_paragraph(self, node):
self.add_text('\n\n')
def visit_block_quote(self, node):
self.rstrip()
self.add_text('\n\n@quotation\n')
def depart_block_quote(self, node):
self.rstrip()
self.add_text('\n@end quotation\n\n')
def visit_literal_block(self, node):
self.rstrip()
self.add_text('\n\n@example\n')
def depart_literal_block(self, node):
self.rstrip()
self.add_text('\n@end example\n\n'
'@noindent\n')
visit_doctest_block = visit_literal_block
depart_doctest_block = depart_literal_block
def visit_line_block(self, node):
self.add_text('@display\n', fresh=1)
def depart_line_block(self, node):
self.add_text('@end display\n', fresh=1)
def visit_line(self, node):
self.rstrip()
self.add_text('\n')
self.escape_newlines += 1
def depart_line(self, node):
self.add_text('@w{ }\n')
self.escape_newlines -= 1
## Inline
def visit_strong(self, node):
self.add_text('@strong{')
def depart_strong(self, node):
self.add_text('}')
def visit_emphasis(self, node):
self.add_text('@emph{')
def depart_emphasis(self, node):
self.add_text('}')
def visit_literal(self, node):
self.add_text('@code{')
def depart_literal(self, node):
self.add_text('}')
def visit_superscript(self, node):
self.add_text('@w{^')
def depart_superscript(self, node):
self.add_text('}')
def visit_subscript(self, node):
self.add_text('@w{[')
def depart_subscript(self, node):
self.add_text(']}')
## Footnotes
def visit_footnote(self, node):
self.visit_block_quote(node)
def depart_footnote(self, node):
self.depart_block_quote(node)
def visit_footnote_reference(self, node):
self.add_text('@w{(')
def depart_footnote_reference(self, node):
self.add_text(')}')
visit_citation = visit_footnote
depart_citation = depart_footnote
def visit_citation_reference(self, node):
self.add_text('@w{[')
def depart_citation_reference(self, node):
self.add_text(']}')
## Lists
def visit_bullet_list(self, node):
bullet = node.get('bullet', '*')
self.rstrip()
self.add_text('\n\n@itemize %s\n' % bullet)
def depart_bullet_list(self, node):
self.rstrip()
self.add_text('\n@end itemize\n\n')
def visit_enumerated_list(self, node):
# Doesn't support Roman numerals
enum = node.get('enumtype', 'arabic')
starters = {'arabic': '',
'loweralpha': 'a',
'upperalpha': 'A',}
start = node.get('start', starters.get(enum, ''))
self.rstrip()
self.add_text('\n\n@enumerate %s\n' % start)
def depart_enumerated_list(self, node):
self.rstrip()
self.add_text('\n@end enumerate\n\n')
def visit_list_item(self, node):
self.rstrip()
self.add_text('\n@item\n')
def depart_list_item(self, node):
pass
## Option List
def visit_option_list(self, node):
self.add_text('\n@table @option\n')
def depart_option_list(self, node):
self.rstrip()
self.add_text('\n@end table\n\n')
def visit_option_list_item(self, node):
pass
def depart_option_list_item(self, node):
pass
def visit_option_group(self, node):
self.at_item_x = '@item'
def depart_option_group(self, node):
pass
def visit_option(self, node):
self.add_text(self.at_item_x + ' ', fresh=1)
self.at_item_x = '@itemx'
def depart_option(self, node):
pass
def visit_option_string(self, node):
pass
def depart_option_string(self, node):
pass
def visit_option_argument(self, node):
self.add_text(node.get('delimiter', ' '))
def depart_option_argument(self, node):
pass
def visit_description(self, node):
self.add_text('', fresh=1)
def depart_description(self, node):
pass
## Definitions
def visit_definition_list(self, node):
self.add_text('\n@table @asis\n')
def depart_definition_list(self, node):
self.rstrip()
self.add_text('\n@end table\n\n')
def visit_definition_list_item(self, node):
self.at_item_x = '@item'
def depart_definition_list_item(self, node):
pass
def visit_term(self, node):
if node.get('ids') and node['ids'][0]:
self.add_anchor(node['ids'][0], node)
self.add_text(self.at_item_x + ' ', fresh=1)
self.at_item_x = '@itemx'
def depart_term(self, node):
pass
def visit_termsep(self, node):
self.add_text(self.at_item_x + ' ', fresh=1)
def visit_classifier(self, node):
self.add_text(' : ')
def depart_classifier(self, node):
pass
def visit_definition(self, node):
self.add_text('', fresh=1)
def depart_definition(self, node):
pass
## Tables
def visit_table(self, node):
self.entry_sep = '@item'
def depart_table(self, node):
self.rstrip()
self.add_text('\n@end multitable\n\n')
def visit_tabular_col_spec(self, node):
pass
def depart_tabular_col_spec(self, node):
pass
def visit_colspec(self, node):
self.colwidths.append(node['colwidth'])
if len(self.colwidths) != self.n_cols:
return
self.add_text('@multitable ', fresh=1)
for i, n in enumerate(self.colwidths):
self.add_text('{%s} ' %('x' * (n+2)))
def depart_colspec(self, node):
pass
def visit_tgroup(self, node):
self.colwidths = []
self.n_cols = node['cols']
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
self.entry_sep = '@headitem'
def depart_thead(self, node):
pass
def visit_tbody(self, node):
pass
def depart_tbody(self, node):
pass
def visit_row(self, node):
pass
def depart_row(self, node):
self.entry_sep = '@item'
def visit_entry(self, node):
self.rstrip()
self.add_text('\n%s ' % self.entry_sep)
self.entry_sep = '@tab'
def depart_entry(self, node):
for i in xrange(node.get('morecols', 0)):
self.add_text('@tab\n', fresh=1)
self.add_text('', fresh=1)
## Field Lists
def visit_field_list(self, node):
self.add_text('\n@itemize @w\n')
def depart_field_list(self, node):
self.rstrip()
self.add_text('\n@end itemize\n\n')
def visit_field(self, node):
if not isinstance(node.parent, nodes.field_list):
self.visit_field_list(None)
def depart_field(self, node):
if not isinstance(node.parent, nodes.field_list):
self.depart_field_list(None)
def visit_field_name(self, node):
self.add_text('@item ', fresh=1)
def depart_field_name(self, node):
self.add_text(':')
def visit_field_body(self, node):
self.add_text('', fresh=1)
def depart_field_body(self, node):
pass
## Admonitions
def visit_admonition(self, node):
title = escape(node[0].astext())
self.add_text('\n@cartouche\n'
'@quotation %s\n' % title)
def depart_admonition(self, node):
self.rstrip()
self.add_text('\n@end quotation\n'
'@end cartouche\n\n')
def _make_visit_admonition(typ):
def visit(self, node):
title = escape(typ)
self.add_text('\n@cartouche\n'
'@quotation %s\n' % title)
return visit
visit_attention = _make_visit_admonition('Attention')
visit_caution = _make_visit_admonition('Caution')
visit_danger = _make_visit_admonition('Danger')
visit_error = _make_visit_admonition('Error')
visit_important = _make_visit_admonition('Important')
visit_note = _make_visit_admonition('Note')
visit_tip = _make_visit_admonition('Tip')
visit_hint = _make_visit_admonition('Hint')
visit_warning = _make_visit_admonition('Warning')
depart_attention = depart_admonition
depart_caution = depart_admonition
depart_danger = depart_admonition
depart_error = depart_admonition
depart_important = depart_admonition
depart_note = depart_admonition
depart_tip = depart_admonition
depart_hint = depart_admonition
depart_warning = depart_admonition
## Misc
def visit_docinfo(self, node):
# No 'docinfo_xform'
raise nodes.SkipNode
def visit_topic(self, node):
# Ignore TOC's since we have to have a "menu" anyway
if 'contents' in node.get('classes', []):
raise nodes.SkipNode
title = node[0]
self.visit_rubric(title)
self.add_text('%s\n' % escape(title.astext()))
self.visit_block_quote(node)
def depart_topic(self, node):
self.depart_block_quote(node)
def visit_generated(self, node):
raise nodes.SkipNode
def depart_generated(self, node):
pass
def visit_transition(self, node):
self.add_text('\n\n@noindent\n'
'@exdent @w{%s}\n\n'
'@noindent\n' % ('_' * 70))
def depart_transition(self, node):
pass
def visit_attribution(self, node):
self.add_text('@flushright\n', fresh=1)
def depart_attribution(self, node):
self.add_text('@end flushright\n', fresh=1)
def visit_raw(self, node):
format = node.get('format', '').split()
if 'texinfo' in format or 'texi' in format:
self.add_text(node.astext())
raise nodes.SkipNode
def depart_raw(self, node):
pass
def visit_figure(self, node):
self.add_text('\n@float Figure\n')
def depart_figure(self, node):
self.rstrip()
self.add_text('\n@end float\n\n')
def visit_caption(self, node):
if not isinstance(node.parent, nodes.figure):
self.document.reporter.warning('Caption not inside a figure.',
base_node=node)
return
self.add_text('@caption{', fresh=1)
def depart_caption(self, node):
if isinstance(node.parent, nodes.figure):
self.rstrip()
self.add_text('}\n')
def visit_image(self, node):
if node['uri'] in self.builder.images:
uri = self.builder.images[node['uri']]
else:
# missing image!
if self.ignore_missing_images:
return
uri = node['uri']
if uri.find('://') != -1:
# ignore remote images
return
name, ext = path.splitext(uri)
attrs = node.attributes
# ignored in non-tex output
width = self.tex_image_length(attrs.get('width', ''))
height = self.tex_image_length(attrs.get('height', ''))
alt = escape_arg(attrs.get('alt', ''))
self.add_text('\n\n@image{%s,%s,%s,%s,%s}\n\n' %
(name, width, height, alt, ext[1:]))
def depart_image(self, node):
pass
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_sidebar(self, node):
pass
def depart_sidebar(self, node):
pass
def visit_label(self, node):
self.add_text('@w{(')
def depart_label(self, node):
self.add_text(')} ')
def visit_legend(self, node):
pass
def depart_legend(self, node):
pass
def visit_substitution_reference(self, node):
pass
def depart_substitution_reference(self, node):
pass
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def depart_substitution_definition(self, node):
pass
def visit_system_message(self, node):
self.add_text('\n@format\n'
'---------- SYSTEM MESSAGE -----------\n')
def depart_system_message(self, node):
self.rstrip()
self.add_text('\n------------------------------------\n'
'@end format\n')
def visit_comment(self, node):
for line in node.astext().splitlines():
self.add_text('@c %s\n' % line, fresh=1)
raise nodes.SkipNode
def visit_problematic(self, node):
| |
<filename>gpnet_exact.py
from __future__ import division
import numpy as np
import theano.tensor as T
import theano
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import lasagne
from lasagne.nonlinearities import softmax, rectify, tanh
from lasagne.layers import InputLayer, DenseLayer, Conv1DLayer, MaxPool1DLayer
from lasagne.layers import DimshuffleLayer, DropoutLayer
from lasagne.layers import LSTMLayer, SliceLayer, ConcatLayer
from lasagne.layers import get_all_param_values, set_all_param_values
from lasagne.objectives import categorical_crossentropy
from lasagne.regularization import regularize_network_params, l2
from lasagne.updates import adagrad, adadelta, nesterov_momentum
from itertools import izip
import time
import argparse
import sys
from pickle_io import pickle_load, pickle_save
from exact_posterior_gp import PosteriorGP
#from posterior_gp import PosteriorGP
#from gp_kernel import kernel, symbolic_kernel
from fast_gp import sparse_w
from logger import Logger
def build_logistic_regression(input_layer):
return input_layer
def build_cnn(input_layer):
# Add a channel axis for convolutional nets
network = DimshuffleLayer(input_layer, (0, 'x', 1))
network = Conv1DLayer(network, num_filters=4, filter_size=5,
nonlinearity=rectify)
network = MaxPool1DLayer(network, pool_size=2)
network = Conv1DLayer(network, num_filters=4, filter_size=5,
nonlinearity=rectify)
network = MaxPool1DLayer(network, pool_size=2)
network = DropoutLayer(network, p=.5)
network = DenseLayer(network, num_units=256, nonlinearity=rectify)
return network
def build_mlp(input_layer):
network = DenseLayer(input_layer, num_units=256, nonlinearity=rectify)
network = DenseLayer(network, num_units=256, nonlinearity=rectify)
return network
def sliding_window_input(input_layer):
window_size = 5
sub_input = []
for i in xrange(window_size):
indices = slice(window_size - i - 1, -i if i > 0 else None)
network = DimshuffleLayer(SliceLayer(input_layer, indices, axis=-1),
(0, 1, 'x'))
sub_input.append(network)
network = ConcatLayer(sub_input, -1)
return network
def build_lstm(input_layer):
#network = sliding_window_input(input_layer)
network = DimshuffleLayer(input_layer, (0, 1, 'x'))
n_hidden = 50
grad_clipping = 20
network = LSTMLayer(network, num_units=n_hidden,
grad_clipping=grad_clipping, nonlinearity=tanh)
network = LSTMLayer(network, num_units=n_hidden,
grad_clipping=grad_clipping, nonlinearity=tanh)
network = SliceLayer(network, indices=-1, axis=1)
#network = DenseLayer(network, num_units=256, nonlinearity=rectify)
return network
cls_network = {
'logreg': build_logistic_regression,
'cnn': build_cnn,
'mlp': build_mlp,
#'lstm': build_lstm,
}
class GPNet(object):
def __init__(self,
n_classes,
inducing_pts,
t_test,
update_gp=True,
init_gp_params=None, # kernel parameters & noise parameter
#n_inducing_pts=50,
#t_min=0,
#t_max=1,
net_arch='logreg',
stochastic_train=True,
stochastic_predict=False,
n_samples=10,
n_epochs=100,
regularize_weight=0,
optimizer=adadelta,
optimizer_kwargs={},
load_params=None,
random_seed=123):
'''
n_samples: number of Monte Carlo samples to estimate the expectation
n_inducing_pts: number of inducing points
'''
lasagne.random.set_rng(np.random.RandomState(seed=random_seed))
self.rng = RandomStreams(seed=random_seed)
self.t_test = t_test
if load_params:
(model_params,
network_params,
init_gp_params) = pickle_load(load_params)
(self.net_arch,
self.n_classes,
self.inducing_pts,
self.idx_test, self.w_test,
self.gp_output_len) = model_params
else:
self.net_arch = net_arch
self.n_classes = n_classes
self.inducing_pts = inducing_pts
self.idx_test, self.w_test = sparse_w(inducing_pts, t_test)
self.gp_output_len = len(t_test)
self.n_epochs = n_epochs
self.n_samples = n_samples
self.post_gp = PosteriorGP(t_test, init_params=init_gp_params)
self.update_gp = update_gp
self.regularize_weight = regularize_weight
self.optimizer = optimizer
self.optimizer_kwargs = optimizer_kwargs
# Save stochastic train/predict flags for storing parameters
self.stochastic_train = stochastic_train
self.stochastic_predict = stochastic_predict
self.compile_train_predict(stochastic_train, stochastic_predict)
if load_params:
self.load_params(network_params)
def compile_train_predict(self, stochastic_train, stochastic_predict):
# symbolic functions to compute marginal posterior GP
input_vars = self.post_gp.data_variables
gp_hyperparams = self.post_gp.params
self.gp_hyperparams = gp_hyperparams
mu = self.post_gp.mean()
mu = mu.dimshuffle('x', 0) # make a row out of 1d vector (N to 1xN)
self.train_network = self.extend_network(mu, stochastic_train)
train_predict = lasagne.layers.get_output(self.train_network)
# Compute the exepcted prediction
#if stochastic_train and self.n_samples > 1:
# train_predict = train_predict.mean(axis=0, keepdims=True)
label = T.ivector('label')
# For expected loss
if stochastic_train:
label_rep = label.repeat(self.n_samples)
else:
label_rep = label
loss = categorical_crossentropy(train_predict, label_rep).mean()
# For expected prediction
#loss = categorical_crossentropy(train_predict, label).mean()
if self.regularize_weight > 0:
penalty = (self.regularize_weight *
regularize_network_params(self.train_network, l2))
loss += penalty
params = lasagne.layers.get_all_params(self.train_network,
trainable=True)
update_params = params
if self.update_gp:
update_params += gp_hyperparams
grad_loss = theano.grad(loss, update_params,
consider_constant=input_vars)
updates = self.optimizer(grad_loss, update_params,
**self.optimizer_kwargs)
self.train_fn = theano.function(input_vars + [label],
loss, updates=updates)
if stochastic_train == stochastic_predict:
self.test_network = self.train_network
self.copy_params = False
else:
self.test_network = self.extend_network(mu, stochastic_predict)
self.copy_params = True
# Set deterministic=True for dropout training if used.
test_predict = lasagne.layers.get_output(self.test_network,
deterministic=True)
if stochastic_predict and self.n_samples > 1:
test_predict = test_predict.mean(axis=0, keepdims=True)
self.predict_fn = theano.function(input_vars, test_predict)
def cov_mat(self, x1, x2, exp_a, exp_b):
x1_col = x1.dimshuffle(0, 'x')
x2_row = x2.dimshuffle('x', 0)
K = exp_a * T.exp(-exp_b * T.sqr(x1_col - x2_row))
return K
def extend_network(self, mu, draw_sample):
if not draw_sample:
batch_size = 1
input_data = mu
else:
batch_size = self.n_samples
cov_zs = self.post_gp.cov_rand_proj(n_sample=batch_size)
input_data = mu + cov_zs
input_layer = InputLayer(shape=(batch_size, self.gp_output_len),
input_var=input_data)
network_builder = cls_network[self.net_arch]
network = network_builder(input_layer)
#network = DropoutLayer(network, p=.5)
l_output = DenseLayer(network, num_units=self.n_classes,
nonlinearity=softmax)
return l_output
def predict(self, x, y):
test_prediction = self.predict_proba(x, y)
return np.argmax(test_prediction, axis=1)
def predict_proba(self, x, y):
test_prediction = []
for each_x, each_y in izip(x, y):
test_prediction.append(
self.predict_fn(each_x, self.t_test, each_y))
test_prediction = np.vstack(test_prediction)
return test_prediction
def evaluate_prediction(self, x_test, y_test, l_test):
if x_test is None:
return 0, 0
predict_test_proba = self.predict_proba(x_test, y_test)
predict_test = np.argmax(predict_test_proba, axis=1)
# accuracy
accuracy = np.mean(l_test == predict_test)
# categorical crossentropy
prob_hit = predict_test_proba[np.arange(len(l_test)), l_test]
crossentropy = -np.log(prob_hit).mean()
return accuracy, crossentropy
def inspect_train(self, x_train, y_train, l_train,
x_valid, y_valid, l_valid, x_test, y_test, l_test,
save_params=None):
for epoch in xrange(self.n_epochs):
history = []
count = 1
t1 = time.time()
for each_x, each_y, each_label in izip(x_train, y_train, l_train):
history.append(self.train_fn(each_x, self.t_test, each_y,
[each_label]))
count += 1
if count % 20 == 0:
sys.stdout.write('.')
sys.stdout.flush()
#for v in self.gp_hyperparams:
# print v.get_value()
#print '-' * 30
t2 = time.time()
print ' ', t2 - t1
#print
mean_loss = np.mean(history)
if self.copy_params:
all_params = get_all_param_values(self.train_network)
set_all_param_values(self.test_network, all_params)
accuracy_train, loss_train = self.evaluate_prediction(
x_train, y_train, l_train)
accuracy_valid, loss_valid = self.evaluate_prediction(
x_valid, y_valid, l_valid)
accuracy_test, loss_test = self.evaluate_prediction(
x_test, y_test, l_test)
print '%4d - %.5f %.5f %.5f %.5f %.5f %.5f %.5f' % (
epoch, mean_loss, accuracy_train, loss_train,
accuracy_valid, loss_valid, accuracy_test, loss_test)
for v in self.gp_hyperparams:
print v.get_value()
if save_params:
self.save_params(save_params + '-%03d' % epoch)
def save_params(self, params_file):
model_params = (self.net_arch, self.n_classes, self.inducing_pts,
self.idx_test, self.w_test, self.gp_output_len)
network_params = get_all_param_values(self.train_network)
gp_params = [p.get_value() for p in self.post_gp.params]
pickle_save(params_file, model_params, network_params, gp_params)
def load_params(self, network_params):
set_all_param_values(self.train_network, network_params)
set_all_param_values(self.test_network, network_params)
def run_gpnet(data,
task_name, # for logging
n_inducing_pts=256,
n_network_inputs=1000,
update_gp=True,
init_gp_params=None, # kernel parameters & noise parameter
net_arch='logreg',
regularize_weight=0,
stochastic_train=True,
stochastic_predict=False,
n_samples=5,
n_epochs=500,
optimizer=adadelta,
optimizer_kwargs={},
subset_train=None,
validation_set=.3,
swap=False,
save_params_epochs=None,
save_params=None,
load_params=None):
np.random.seed(1)
x_train, y_train, x_test, y_test, l_train, l_test = pickle_load(data)
if swap:
x_train, x_test = x_test, x_train
y_train, y_test = y_test, y_train
l_train, l_test = l_test, l_train
if subset_train:
if 0 < subset_train <= 1:
n_train = int(len(l_train) * validation_set)
elif subset_train > 1:
n_train = int(subset_train)
x_train = x_train[:n_train]
y_train = y_train[:n_train]
l_train = l_train[:n_train]
x_valid, y_valid, l_valid = None, None, None
if validation_set:
total_train = len(l_train)
if 0 < validation_set <= 1:
n_valid = int(total_train * validation_set)
elif validation_set > 1:
n_valid = int(validation_set)
n_train = total_train - n_valid
x_train, x_valid = x_train[:n_train], x_train[n_train:]
y_train, y_valid = y_train[:n_train], y_train[n_train:]
l_train, l_valid = l_train[:n_train], l_train[n_train:]
n_classes = len(set(l_train) | set(l_test))
t_min, t_max = 0, 1
extra_u = 2
margin = (t_max - t_min) / (n_inducing_pts - extra_u * 2) * 2
inducing_pts = np.linspace(t_min - margin, t_max + margin, n_inducing_pts)
if n_network_inputs <= 0:
t_test = inducing_pts[1:-1]
else:
t_test = np.linspace(t_min, t_max, n_network_inputs)
gpnet = GPNet(n_classes,
inducing_pts=inducing_pts,
t_test=t_test,
update_gp=update_gp,
init_gp_params=init_gp_params,
net_arch=net_arch,
regularize_weight=regularize_weight,
stochastic_train=stochastic_train,
stochastic_predict=stochastic_predict,
n_samples=n_samples,
n_epochs=n_epochs,
optimizer=optimizer,
optimizer_kwargs=optimizer_kwargs,
load_params=load_params)
def print_parameters():
print '[exact GP inference]'
print 'data:', data
print 'n_train:', len(x_train)
print 'n_valid:', len(x_valid) if x_valid else 0
print 'n_test:', len(x_test)
print 'n_classes:', n_classes
print 'n_net_inputs:', len(t_test)
print 'stochastic_train:', stochastic_train
print 'stochastic_predict:', stochastic_predict
print 'n_samples:', n_samples
print 'n_epochs:', n_epochs
print 'network:', gpnet.net_arch
print 'optimizer:', optimizer.__name__
print 'optimizer_kwargs:', optimizer_kwargs
print 'regularize_weight:', regularize_weight
print 'init_gp_params:', init_gp_params
print 'update_gp:', update_gp
print 'load:', load_params
print 'save:', save_params
print 'save_epochs:', save_params_epochs
print_parameters()
for v in gpnet.post_gp.params:
print v.get_value()
t1 = time.time()
gpnet.inspect_train(x_train, y_train, l_train,
x_valid, y_valid, l_valid,
x_test, y_test, l_test,
save_params_epochs)
t2 = time.time()
if save_params:
gpnet.save_params(save_params)
print_parameters()
print 'time:', t2 - t1
print task_name
return
gpnet.train(x_train, y_train, l_train)
predict_train = gpnet.predict(x_train, y_train)
print np.mean(l_train == predict_train)
predict_test = gpnet.predict(x_test, y_test)
print np.mean(l_test == predict_test)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', dest='data',
#default='data/UWaveGestureLibraryAll-10.pkl',
default='data/B-UWaveGestureLibraryAll-10.pkl',
help='data file')
parser.add_argument('-e', dest='epochs', type=int, default=50,
help='number of epochs')
parser.add_argument('-d', dest='det_train', action='store_true',
default=False, help='deterministic train')
parser.add_argument('-o', dest='optimizer', default='nesterov_momentum',
help='optimizer: nesterov_momentum, adagrad, adadelta')
parser.add_argument('-n', dest='net_arch', default='logreg',
help='network architecture: ' + ', '.join(cls_network))
parser.add_argument('-r', dest='lrate', type=float, default=0.0008,
help='learning rate')
parser.add_argument('-s', dest='subset', type=float, default=None,
help='portion or size of subset of data')
parser.add_argument('-v', dest='validate', type=float, default=.3,
help='portion or size of validation set (override -s)')
parser.add_argument('-p', dest='gp_params', default=None,
help='file of GP parameters')
parser.add_argument('-k', dest='ind_pts', type=int, default=256,
help='number of inducing points')
parser.add_argument('-i', dest='net_ins', type=int, default=0,
help='number of network inputs. '
'0: use inducing points')
parser.add_argument('-m', dest='samples', type=int, default=10,
help='number of Monte Carlo samples')
parser.add_argument('-g', dest='reg', type=float, default=0,
help='regularization weight')
parser.add_argument('-u', dest='fix_gp', action='store_true',
default=False, help='fix GP parameters')
parser.add_argument('-l', | |
* self.sr
t = pg.TextItem(utt['text'], anchor=(0.5, 0.5), color=text_color)
top_point = point_min - speaker_tier_range * (speaker_ind - 1)
y_mid_point = top_point - (speaker_tier_range / 2)
t.setPos(mid_point, y_mid_point)
font = t.textItem.font()
font.setPointSize(plot_text_font)
t.setFont(font)
if t.textItem.boundingRect().width() > plot_text_width:
t.setTextWidth(plot_text_width)
begin_line = pg.PlotCurveItem([b_s, b_s], [top_point, top_point - speaker_tier_range],
pen=pg.mkPen(pen))
end_line = pg.PlotCurveItem([e_s, e_s], [top_point, top_point - speaker_tier_range],
pen=pg.mkPen(pen))
begin_line.setClickable(False)
end_line.setClickable(False)
fill_brush = pg.mkBrush(interval_background_color)
fill_between = pg.FillBetweenItem(begin_line, end_line, brush=fill_brush)
return t, begin_line, end_line, fill_between
class UtteranceDetailWidget(QtWidgets.QWidget): # pragma: no cover
lookUpWord = QtCore.pyqtSignal(object)
createWord = QtCore.pyqtSignal(object)
saveUtterance = QtCore.pyqtSignal(object, object)
selectUtterance = QtCore.pyqtSignal(object)
createUtterance = QtCore.pyqtSignal(object, object, object, object)
refreshCorpus = QtCore.pyqtSignal(object)
updateSpeaker = QtCore.pyqtSignal(object, object)
utteranceUpdated = QtCore.pyqtSignal(object)
def __init__(self, parent):
super(UtteranceDetailWidget, self).__init__(parent=parent)
self.corpus = None
self.dictionary = None
self.utterance = None
self.audio = None
self.sr = None
self.current_time = 0
self.min_time = 0
self.max_time = None
self.selected_min = None
self.selected_max = None
self.background_color = '#000000'
self.m_audioOutput = MediaPlayer()
# self.m_audioOutput.error.connect(self.showError)
self.m_audioOutput.positionChanged.connect(self.notified)
self.m_audioOutput.stateChanged.connect(self.handleAudioState)
# self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.ax = pg.PlotWidget()
# self.ax.setFocusPolicy(QtCore.Qt.NoFocus)
self.line = pg.InfiniteLine(
pos=-20,
pen=pg.mkPen('r', width=1),
movable=False # We have our own code to handle dragless moving.
)
self.ax.getPlotItem().hideAxis('left')
self.ax.getPlotItem().setMouseEnabled(False, False)
# self.ax.getPlotItem().setFocusPolicy(QtCore.Qt.NoFocus)
self.ax.addItem(self.line)
self.ax.getPlotItem().setMenuEnabled(False)
self.ax.scene().sigMouseClicked.connect(self.update_current_time)
layout = QtWidgets.QVBoxLayout()
self.scroll_bar = QtWidgets.QScrollBar(QtCore.Qt.Horizontal)
self.scroll_bar.valueChanged.connect(self.update_from_slider)
button_layout = QtWidgets.QVBoxLayout()
self.play_button = QtWidgets.QPushButton('Play')
self.play_button.clicked.connect(self.play_audio)
# self.play_button.setFocusPolicy(QtCore.Qt.NoFocus)
self.speaker_dropdown = QtWidgets.QComboBox()
self.speaker_dropdown.currentIndexChanged.connect(self.update_speaker)
self.speaker_dropdown.hide()
# self.speaker_dropdown.setFocusPolicy(QtCore.Qt.NoFocus)
volume_layout = QtWidgets.QHBoxLayout()
label = QtWidgets.QLabel()
label.setPixmap(self.style().standardPixmap(QtWidgets.QStyle.SP_MediaVolume))
label.setSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
volume_layout.addWidget(label)
self.volume_slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.volume_slider.setTickInterval(1)
self.volume_slider.setMaximum(100)
self.volume_slider.setMinimum(0)
self.volume_slider.setMaximumWidth(100)
self.volume_slider.setSliderPosition(self.m_audioOutput.volume())
self.volume_slider.sliderMoved.connect(self.m_audioOutput.setVolume)
volume_layout.addWidget(self.volume_slider)
button_layout.addWidget(self.play_button)
button_layout.addLayout(volume_layout)
button_layout.addWidget(self.speaker_dropdown)
self.text_widget = TranscriptionWidget()
self.text_widget.setMaximumHeight(100)
self.text_widget.playAudio.connect(self.play_audio)
self.text_widget.textChanged.connect(self.update_utterance_text)
self.text_widget.setFontPointSize(20)
self.text_widget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# self.text_widget.setFocusPolicy(QtCore.Qt.ClickFocus)
self.text_widget.customContextMenuRequested.connect(self.generate_context_menu)
text_layout = QtWidgets.QHBoxLayout()
text_layout.addWidget(self.text_widget)
text_layout.addLayout(button_layout)
layout.addWidget(self.ax)
layout.addWidget(self.scroll_bar)
layout.addLayout(text_layout)
self.setLayout(layout)
self.wav_path = None
self.channels = 1
self.wave_data = None
self.long_file = None
self.sr = None
self.file_utts = []
self.selected_utterance = None
def update_config(self, config):
self.background_color = config['background_color']
self.play_line_color = config['play_line_color']
self.selected_range_color = config['selected_range_color']
self.selected_line_color = config['selected_line_color']
self.break_line_color = config['break_line_color']
self.text_color = config['text_color']
self.wave_line_color = config['wave_line_color']
self.interval_background_color = config['interval_background_color']
self.plot_text_font = config['plot_text_font']
self.plot_text_width = config['plot_text_width']
self.update_plot(self.min_time, self.max_time)
def update_from_slider(self, value):
if self.max_time is None:
return
cur_window = self.max_time - self.min_time
self.update_plot(value, value + cur_window)
def update_speaker(self):
self.updateSpeaker.emit(self.utterance, self.speaker_dropdown.currentText())
def update_utterance_text(self):
if self.utterance is None:
return
new_text = self.text_widget.toPlainText().strip().lower()
self.corpus.text_mapping[self.utterance] = new_text
for u in self.file_utts:
if u['utt'] == self.utterance:
u['text'] = new_text
break
self.update_plot(self.min_time, self.max_time)
self.utteranceUpdated.emit(self.utterance)
def update_plot_scale(self):
self.p2.setGeometry(self.p1.vb.sceneBoundingRect())
def refresh_speaker_dropdown(self):
self.speaker_dropdown.clear()
if not self.corpus:
return
speakers = sorted(self.corpus.speak_utt_mapping.keys())
for s in self.corpus.speak_utt_mapping.keys():
if not s:
continue
self.speaker_dropdown.addItem(s)
def reset(self):
self.utterance = None
self.file_name = None
self.wave_data = None
self.wav_path = None
self.scroll_bar.valueChanged.disconnect(self.update_from_slider)
self.ax.getPlotItem().clear()
self.reset_text()
def update_corpus(self, corpus):
self.wave_data = None
self.corpus = corpus
if corpus is None:
self.reset()
self.refresh_speaker_dropdown()
if self.utterance:
self.reset_text()
def update_dictionary(self, dictionary):
self.dictionary = dictionary
def generate_context_menu(self, location):
menu = self.text_widget.createStandardContextMenu()
cursor = self.text_widget.cursorForPosition(location)
cursor.select(QtGui.QTextCursor.WordUnderCursor)
word = cursor.selectedText()
# add extra items to the menu
lookUpAction = QtWidgets.QAction("Look up '{}' in dictionary".format(word), self)
createAction = QtWidgets.QAction("Add pronunciation for '{}'".format(word), self)
lookUpAction.triggered.connect(lambda: self.lookUpWord.emit(word))
createAction.triggered.connect(lambda: self.createWord.emit(word))
menu.addAction(lookUpAction)
menu.addAction(createAction)
# show the menu
menu.exec_(self.text_widget.mapToGlobal(location))
def update_current_time(self, ev):
modifiers = QtWidgets.QApplication.keyboardModifiers()
point = self.ax.getPlotItem().vb.mapSceneToView(ev.scenePos())
x = point.x()
y = point.y()
y_range = self.max_point - self.min_point
speaker_tier_range = (y_range / 2)
move_line = False
time = x / self.sr
if y < self.min_point:
speaker = None
for k, s_id in self.speaker_mapping.items():
top_pos = self.min_point - speaker_tier_range * (s_id - 1)
bottom_pos = top_pos - speaker_tier_range
if bottom_pos < y < top_pos:
speaker = k
break
utt = None
for u in self.file_utts:
if u['end'] < time:
continue
if u['begin'] > time:
break
if u['speaker'] != speaker:
continue
utt = u
if utt is not None:
if modifiers == QtCore.Qt.ControlModifier and utt is not None:
self.selectUtterance.emit(utt['utt'])
else:
self.selectUtterance.emit(None)
if utt is not None:
self.selectUtterance.emit(utt['utt'])
self.m_audioOutput.setMinTime(self.selected_min)
self.m_audioOutput.setMaxTime(self.selected_max)
self.m_audioOutput.setPosition(self.m_audioOutput.min_time)
elif ev.double():
beg = time - 0.5
end = time + 0.5
channel = 0
if self.channels > 1:
ind = self.corpus.speaker_ordering[self.file_name].index(speaker)
if ind >= len(self.corpus.speaker_ordering[self.file_name]) / 2:
channel = 1
self.createUtterance.emit(speaker, beg, end, channel)
return
else:
move_line = True
elif ev.double():
beg = time - 0.5
end = time + 0.5
channel = 0
if self.channels > 1:
if y < 2:
channel = 1
if self.file_name not in self.corpus.speaker_ordering:
self.corpus.speaker_ordering[self.file_name] = ['speech']
if channel == 0:
ind = 0
else:
ind = int(round(len(self.corpus.speaker_ordering[self.file_name]) / 2))
speaker = self.corpus.speaker_ordering[self.file_name][ind]
self.createUtterance.emit(speaker, beg, end, channel)
return
else:
move_line = True
if move_line:
self.current_time = x / self.sr
if self.current_time < self.min_time:
self.current_time = self.min_time
x = self.current_time * self.sr
if self.current_time > self.max_time:
self.current_time = self.max_time
x = self.current_time * self.sr
self.line.setPos(x)
self.m_audioOutput.setMinTime(self.current_time)
self.m_audioOutput.setMaxTime(self.max_time)
self.m_audioOutput.setPosition(self.m_audioOutput.min_time)
def refresh_view(self):
self.refresh_utterances()
self.update_plot(self.min_time, self.max_time)
def refresh_utterances(self):
self.file_utts = []
if not self.file_name:
return
self.wav_path = self.corpus.utt_wav_mapping[self.file_name]
self.wav_info = get_wav_info(self.wav_path)
self.scroll_bar.setMaximum(self.wav_info['duration'])
if self.file_name in self.corpus.file_utt_mapping:
for u in self.corpus.file_utt_mapping[self.file_name]:
begin = self.corpus.segments[u]['begin']
end = self.corpus.segments[u]['end']
self.file_utts.append({'utt': u, 'begin': begin,
'end': end, 'text': self.corpus.text_mapping[u],
'speaker': self.corpus.utt_speak_mapping[u]})
else:
u = self.file_name
self.file_utts.append({'utt': u, 'begin': 0,
'end': self.wav_info['duration'], 'text': self.corpus.text_mapping[u],
'speaker': self.corpus.utt_speak_mapping[u]})
self.file_utts.sort(key=lambda x: x['begin'])
def update_file_name(self, file_name):
if not self.corpus:
self.file_name = None
self.wave_data = None
return
self.file_name = file_name
if file_name in self.corpus.utt_speak_mapping:
self.long_file = False
self.speaker_dropdown.hide()
else:
self.long_file = True
self.speaker_dropdown.show()
if self.wav_path != self.corpus.utt_wav_mapping[file_name]:
self.refresh_utterances()
self.wav_path = self.corpus.utt_wav_mapping[file_name]
try:
self.scroll_bar.valueChanged.disconnect(self.update_from_slider)
except TypeError:
pass
self.scroll_bar.valueChanged.connect(self.update_from_slider)
self.channels = self.wav_info['num_channels']
end = min(10, self.wav_info['duration'])
self.update_plot(0, end)
p = QtCore.QUrl.fromLocalFile(self.wav_path)
self.m_audioOutput.setMedia(QtMultimedia.QMediaContent(p))
self.updatePlayTime(0)
self.m_audioOutput.setMinTime(0)
self.m_audioOutput.setMaxTime(end)
self.m_audioOutput.setPosition(self.m_audioOutput.min_time)
def update_utterance(self, utterance):
if utterance is None:
return
self.utterance = utterance
self.reset_text()
if self.utterance in self.corpus.segments:
segment = self.corpus.segments[self.utterance]
file_name = segment['file_name']
begin = segment['begin']
end = segment['end']
begin = float(begin)
end = float(end)
self.update_file_name(file_name)
self.selected_min = begin
self.selected_max = end
# if self.max_time is not None and self.min_time is not None:
# if self.min_time + 1 <= end <= self.max_time - 1:
# return
# if self.min_time + 1 <= begin <= self.max_time - 1:
# return
self.long_file = True
begin -= 1
end += 1
else:
self.update_file_name(self.utterance)
self.long_file = False
self.wave_data = None
begin = 0
end = self.wav_info['duration']
self.update_plot(begin, end)
if self.long_file:
self.m_audioOutput.setMinTime(self.selected_min)
self.m_audioOutput.setMaxTime(self.selected_max)
self.updatePlayTime(self.selected_min)
self.m_audioOutput.setPosition(self.m_audioOutput.min_time)
def update_selected_times(self, region):
self.selected_min, self.selected_max = region.getRegion()
self.selected_min /= self.sr
self.selected_max /= self.sr
self.updatePlayTime(self.selected_min)
self.m_audioOutput.setMinTime(self.selected_min)
self.m_audioOutput.setMaxTime(self.selected_max)
self.m_audioOutput.setPosition(self.m_audioOutput.min_time)
def update_plot(self, begin, end):
self.ax.getPlotItem().clear()
self.ax.setBackground(self.background_color)
if self.corpus is None:
return
if self.wav_path is None:
return
from functools import partial
if end is None:
return
if end <= 0:
end = self.max_time
if begin < 0:
begin = 0
if self.long_file:
duration = end - begin
self.wave_data, self.sr = librosa.load(self.wav_path, offset=begin, duration=duration, sr=None, mono=False)
elif self.wave_data is None:
self.wave_data, self.sr = librosa.load(self.wav_path, sr=None)
# Normalize y1 between 0 and 2
self.wave_data /= np.max(np.abs(self.wave_data), axis=0) # between -1 and 1
self.wave_data += 1 # shift to 0 and 2
begin_samp = int(begin * self.sr)
end_samp = int(end * self.sr)
window_size = end - begin
try:
self.scroll_bar.valueChanged.disconnect(self.update_from_slider)
except TypeError:
pass
self.scroll_bar.setValue(begin)
self.scroll_bar.setPageStep(window_size)
self.scroll_bar.setMaximum(self.wav_info['duration'] - window_size)
self.scroll_bar.valueChanged.connect(self.update_from_slider)
self.min_time = begin
self.max_time = end
self.ax.addItem(self.line)
self.updatePlayTime(self.min_time)
wave_pen = pg.mkPen(self.wave_line_color, width=1)
if len(self.wave_data.shape) > 1 and self.wave_data.shape[0] == 2:
if not self.long_file:
y0 = self.wave_data[0, begin_samp:end_samp]
y1 = self.wave_data[1, begin_samp:end_samp]
x = np.arange(start=begin_samp, stop=end_samp)
else:
y0 = self.wave_data[0, :]
y1 = self.wave_data[1, :]
x = np.arange(start=begin_samp, stop=begin_samp + y0.shape[0])
# Normalize y0 between 2 and 4
y0 /= np.max(np.abs(y0), axis=0) # between -1 and 1
y0[np.isnan(y0)] = 0
y0 += 3 # shift to 2 and 4
# Normalize y1 between 0 and 2
y1 /= np.max(np.abs(y1), axis=0) # between -1 and 1
y1[np.isnan(y1)] = 0
y1 += 1 # shift to 0 and 2
pen = pg.mkPen(self.break_line_color, width=1)
pen.setStyle(QtCore.Qt.DotLine)
sub_break_line = pg.InfiniteLine(
pos=2,
angle=0,
pen=pen,
movable=False # We have our own code to handle dragless moving.
)
self.ax.addItem(sub_break_line)
self.ax.plot(x, y0, pen=wave_pen)
self.ax.plot(x, y1, pen=wave_pen)
self.min_point = 0
self.max_point = 4
else:
if not self.long_file:
y = self.wave_data[begin_samp:end_samp]
x = np.arange(start=begin_samp, stop=begin_samp + y.shape[0])
else:
y = self.wave_data
y /= np.max(np.abs(y), axis=0) # between -1 and 1
y += 1 # shift to 0 and 2
x = np.arange(start=begin_samp, stop=begin_samp + y.shape[0])
self.min_point = 0
self.max_point = 2
self.ax.plot(x, y, pen=wave_pen)
if self.file_name in self.corpus.speaker_ordering:
break_line = pg.InfiniteLine(
pos=self.min_point,
angle=0,
pen=pg.mkPen(self.break_line_color, width=2),
| |
"checksum": "0x3603",
"lsa-id": "0.0.0.19",
"lsa-length": "44",
"lsa-type": "Extern",
"sequence-number": "0x8000034d"
},
{
"advertising-router": "10.169.14.240",
"age": "2138",
"checksum": "0xab95",
"lsa-id": "0.0.0.22",
"lsa-length": "44",
"lsa-type": "Extern",
"sequence-number": "0x800002b9"
},
{
"advertising-router": "10.169.14.240",
"age": "501",
"checksum": "0x7049",
"lsa-id": "0.0.0.23",
"lsa-length": "44",
"lsa-type": "Extern",
"sequence-number": "0x80000247"
},
{
"advertising-router": "10.169.14.240",
"age": "2410",
"checksum": "0x4e6c",
"lsa-id": "0.0.0.24",
"lsa-length": "44",
"lsa-type": "Extern",
"sequence-number": "0x80000246"
},
{
"advertising-router": "10.169.14.241",
"age": "2593",
"checksum": "0xd341",
"lsa-id": "0.0.0.9",
"lsa-length": "44",
"lsa-type": "Extern",
"sequence-number": "0x800002f0"
},
{
"advertising-router": "10.169.14.241",
"age": "593",
"checksum": "0xd4f2",
"lsa-id": "0.0.0.10",
"lsa-length": "44",
"lsa-type": "Extern",
"sequence-number": "0x80000246"
},
{
"advertising-router": "10.169.14.241",
"age": "2926",
"checksum": "0xe6df",
"lsa-id": "0.0.0.11",
"lsa-length": "44",
"lsa-type": "Extern",
"sequence-number": "0x80000245"
},
{
"advertising-router": "10.189.5.252",
"age": "1913",
"checksum": "0x3ff4",
"lsa-id": "0.0.0.1",
"lsa-length": "44",
"lsa-type": "Extern",
"our-entry": True,
"sequence-number": "0x8000063f"
},
{
"advertising-router": "10.189.5.253",
"age": "1915",
"checksum": "0x7dcd",
"lsa-id": "0.0.0.1",
"lsa-length": "44",
"lsa-type": "Extern",
"sequence-number": "0x80000e1e"
},
{
"advertising-router": "10.189.5.252",
"age": "413",
"checksum": "0xae5c",
"lsa-id": "0.0.0.2",
"lsa-length": "56",
"lsa-type": "Link",
"our-entry": True,
"sequence-number": "0x8000178a"
},
{
"advertising-router": "10.189.5.253",
"age": "2415",
"checksum": "0x13d7",
"lsa-id": "0.0.0.2",
"lsa-length": "56",
"lsa-type": "Link",
"sequence-number": "0x80001787"
},
{
"advertising-router": "10.169.14.240",
"age": "1047",
"checksum": "0xbe92",
"lsa-id": "0.0.0.3",
"lsa-length": "56",
"lsa-type": "Link",
"sequence-number": "0x8000179e"
},
{
"advertising-router": "10.189.5.252",
"age": "2913",
"checksum": "0x607c",
"lsa-id": "0.0.0.3",
"lsa-length": "56",
"lsa-type": "Link",
"our-entry": True,
"sequence-number": "0x80001789"
},
{
"advertising-router": "10.189.5.252",
"age": "2413",
"checksum": "0xa440",
"lsa-id": "0.0.0.1",
"lsa-length": "44",
"lsa-type": "Link",
"our-entry": True,
"sequence-number": "0x8000178b"
}
],
"ospf3-intf-header": [
{
"ospf-area": "0.0.0.8",
"ospf-intf": "ge-0/0/0.0"
},
{
"ospf-area": "0.0.0.8",
"ospf-intf": "ge-0/0/1.0"
},
{
"ospf-area": "0.0.0.8",
"ospf-intf": "lo0.0"
}
]
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowOspf3Database(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowOspf3Database(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
class TestShowOspf3InterfaceExtensive(unittest.TestCase):
maxDiff = None
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_output = {'execute.return_value': """
show ospf3 interface extensive | no-more
Interface State Area DR ID BDR ID Nbrs
ge-0/0/0.0 PtToPt 0.0.0.8 0.0.0.0 0.0.0.0 1
Address fe80::250:56ff:fe8d:c829, Prefix-length 64
OSPF3-Intf-index 2, Type P2P, MTU 1500, Cost 5
Adj count: 1, Router LSA ID: 0
Hello 10, Dead 40, ReXmit 5, Not Stub
Protection type: None
ge-0/0/1.0 PtToPt 0.0.0.8 0.0.0.0 0.0.0.0 1
Address fe80::250:56ff:fe8d:a96c, Prefix-length 64
OSPF3-Intf-index 3, Type P2P, MTU 1500, Cost 100
Adj count: 1, Router LSA ID: 0
Hello 10, Dead 40, ReXmit 5, Not Stub
Protection type: None
lo0.0 DR 0.0.0.8 10.189.5.252 0.0.0.0 0
Address fe80::250:560f:fc8d:7c08, Prefix-length 128
OSPF3-Intf-index 1, Type LAN, MTU 65535, Cost 0, Priority 128
Adj count: 0, Router LSA ID: -
DR addr fe80::250:560f:fc8d:7c08
Hello 10, Dead 40, ReXmit 5, Not Stub
Protection type: None
"""
}
golden_parsed_output = {
"ospf3-interface-information": {
"ospf3-interface": [
{
"adj-count": "1",
"bdr-id": "0.0.0.0",
"dead-interval": "40",
"dr-id": "0.0.0.0",
"hello-interval": "10",
"interface-address": "fe80::250:56ff:fe8d:c829",
"interface-cost": "5",
"interface-name": "ge-0/0/0.0",
"interface-type": "P2P",
"mtu": "1500",
"neighbor-count": "1",
"ospf-area": "0.0.0.8",
"ospf-interface-protection-type": "None",
"ospf-interface-state": "PtToPt",
"ospf-stub-type": "Not Stub",
"ospf3-interface-index": "2",
"ospf3-router-lsa-id": "0",
"prefix-length": "64",
"retransmit-interval": "5"
},
{
"adj-count": "1",
"bdr-id": "0.0.0.0",
"dead-interval": "40",
"dr-id": "0.0.0.0",
"hello-interval": "10",
"interface-address": "fe80::250:56ff:fe8d:a96c",
"interface-cost": "100",
"interface-name": "ge-0/0/1.0",
"interface-type": "P2P",
"mtu": "1500",
"neighbor-count": "1",
"ospf-area": "0.0.0.8",
"ospf-interface-protection-type": "None",
"ospf-interface-state": "PtToPt",
"ospf-stub-type": "Not Stub",
"ospf3-interface-index": "3",
"ospf3-router-lsa-id": "0",
"prefix-length": "64",
"retransmit-interval": "5"
},
{
"adj-count": "0",
"bdr-id": "0.0.0.0",
"dead-interval": "40",
"dr-address": "fe80::250:560f:fc8d:7c08",
"dr-id": "10.189.5.252",
"hello-interval": "10",
"interface-address": "fe80::250:560f:fc8d:7c08",
"interface-cost": "0",
"interface-name": "lo0.0",
"interface-type": "LAN",
"mtu": "65535",
"neighbor-count": "0",
"ospf-area": "0.0.0.8",
"ospf-interface-protection-type": "None",
"ospf-interface-state": "DR",
"ospf-stub-type": "Not Stub",
"ospf3-interface-index": "1",
"prefix-length": "128",
"retransmit-interval": "5",
"router-priority": "128"
}
]
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowOspf3InterfaceExtensive(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowOspf3InterfaceExtensive(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
class TestShowOspf3DatabaseExternalExtensive(unittest.TestCase):
maxDiff = None
device = Device(name='test-device')
empty_output = {'execute.return_value': ''}
golden_output = {'execute.return_value': '''
show ospf3 database external extensive | no-more
OSPF3 AS SCOPE link state database
Type ID Adv Rtr Seq Age Cksum Len
Extern 0.0.0.1 10.34.2.250 0x8000178e 1412 0x3c81 28
Prefix ::/0
Prefix-options 0x0, Metric 1, Type 1,
Aging timer 00:36:27
Installed 00:23:26 ago, expires in 00:36:28, sent 00:23:24 ago
Last changed 29w5d 21:04:29 ago, Change count: 1
Extern 0.0.0.3 10.34.2.250 0x8000178e 1037 0x21bf 44
Prefix 2001:db8:eb18:ca45::2/128
Prefix-options 0x0, Metric 50, Type 1,
Aging timer 00:42:42
Installed 00:17:11 ago, expires in 00:42:43, sent 00:17:09 ago
Last changed 29w5d 21:04:29 ago, Change count: 1
Extern 0.0.0.4 10.34.2.250 0x80000246 2913 0xcc71 44
Prefix 2001:db8:eb18:ca45::1/128
Prefix-options 0x0, Metric 50, Type 1,
Aging timer 00:11:26
Installed 00:48:26 ago, expires in 00:11:27, sent 00:48:24 ago
Last changed 2w6d 04:51:00 ago, Change count: 1
Extern 0.0.0.1 10.34.2.251 0x80001789 1412 0x4081 28
Prefix ::/0
Prefix-options 0x0, Metric 1, Type 1,
Aging timer 00:36:27
Installed 00:23:23 ago, expires in 00:36:28, sent 00:23:21 ago
Last changed 29w5d 21:04:28 ago, Change count: 1
Extern 0.0.0.2 10.34.2.251 0x80001788 2912 0x17d0 44
Prefix 2001:db8:eb18:ca45::1/128
Prefix-options 0x0, Metric 50, Type 1,
Aging timer 00:11:27
Installed 00:48:23 ago, expires in 00:11:28, sent 00:48:21 ago
Last changed 29w5d 21:04:28 ago, Change count: 1
Extern 0.0.0.3 10.34.2.251 0x80000246 287 0xea52 44
Prefix 2001:db8:eb18:ca45::2/128
Prefix-options 0x0, Metric 50, Type 1,
Aging timer 00:55:12
Installed 00:04:38 ago, expires in 00:55:13, sent 00:04:36 ago
Last changed 2w6d 04:10:55 ago, Change count: 1
Extern 0.0.0.18 10.169.14.240 0x80000349 1722 0xbddb 28
Prefix ::/0
Prefix-options 0x0, Metric 1, Type 1,
Aging timer 00:31:17
Installed 00:28:39 ago, expires in 00:31:18, sent 00:28:37 ago
Last changed 4w1d 01:48:00 ago, Change count: 1
Extern 0.0.0.19 10.169.14.240 0x8000034d 904 0x3603 44
Prefix 2001:db8:6aa8:6a53::1001/128
Prefix-options 0x0, Metric 50, Type 1,
Aging timer 00:44:55
Installed 00:15:01 ago, expires in 00:44:56, sent 00:14:59 ago
Last changed 3w3d 02:05:47 ago, Change count: 3
Extern 0.0.0.22 10.169.14.240 0x800002b9 2268 0xab95 44
Prefix 2001:db8:223c:ca45::b/128
Prefix-options 0x0, Metric 50, Type 1,
Aging timer 00:22:11
Installed 00:37:45 ago, expires in 00:22:12, sent 00:37:43 ago
Last changed 3w0d 17:02:47 ago, Change count: 1
Extern 0.0.0.23 10.169.14.240 0x80000247 631 0x7049 44
Prefix 2001:db8:b0f8:ca45::14/128
Prefix-options 0x0, Metric 50, Type 1,
Aging timer 00:49:28
Installed 00:10:28 ago, expires in 00:49:29, sent 00:10:26 ago
Last changed 2w6d 04:51:04 ago, Change count: 1
Extern 0.0.0.24 10.169.14.240 0x80000246 2540 0x4e6c 44
Prefix 2001:db8:b0f8:ca45::13/128
Prefix-options 0x0, Metric 50, Type 1,
Aging timer 00:17:39
Installed 00:42:17 ago, expires in 00:17:40, sent 00:42:15 ago
Last changed 2w6d 04:50:58 ago, Change count: 1
Extern 0.0.0.9 10.169.14.241 0x800002f0 2723 0xd341 44
Prefix 2001:db8:223c:ca45::c/128
Prefix-options 0x0, Metric 50, Type 1,
Aging timer 00:14:36
Installed 00:45:17 ago, expires in 00:14:37, sent 00:45:15 ago
Last changed 3w2d 03:24:20 ago, Change count: 11
Extern 0.0.0.10 10.169.14.241 0x80000246 723 0xd4f2 44
Prefix 2001:db8:b0f8:ca45::13/128
Prefix-options 0x0, Metric 50, Type 1,
Aging timer 00:47:56
Installed 00:11:57 ago, expires in 00:47:57, sent 00:11:55 ago
Last changed 2w6d 04:10:59 ago, Change count: 1
Extern 0.0.0.11 10.169.14.241 0x80000246 56 0xe4e0 44
Prefix 2001:db8:b0f8:ca45::14/128
Prefix-options 0x0, Metric 50, Type 1,
Aging timer 00:59:03
Installed 00:00:50 ago, expires in 00:59:04, sent 00:00:48 ago
Last changed 2w6d 04:10:53 ago, Change count: 1
Extern *0.0.0.1 10.189.5.252 0x8000063f 2043 0x3ff4 44
Prefix 2001:db8:eb18:ca45::1/128
Prefix-options 0x0, Metric 50, Type 1,
Gen timer 00:15:56
Aging timer 00:25:56
Installed 00:34:03 ago, expires in 00:25:57, sent 00:34:01 ago
Last changed 3w0d 17:02:47 ago, Change count: 2, Ours
Extern 0.0.0.1 10.189.5.253 0x80000e1e 2045 0x7dcd 44
Prefix 2001:db8:eb18:ca45::2/128
Prefix-options 0x0, Metric 50, Type 1,
Aging timer 00:25:54
Installed 00:34:02 ago, expires in 00:25:55, sent 00:34:01 ago
Last changed 3w3d 00:31:46 ago, Change count: 15
'''}
golden_parsed_output = {'ospf3-database-information':
{'ospf3-database': [
{'advertising-router': '10.34.2.250',
'age': '1412',
'checksum': '0x3c81',
'lsa-id': '0.0.0.1',
'lsa-length': '28',
'lsa-type': 'Extern',
'ospf-database-extensive': {
'aging-timer': {
'#text': '00:36:27'
},
'expiration-time': {'#text': '00:36:28'},
'installation-time': {'#text': '00:23:26'},
'lsa-change-count': '1',
'lsa-changed-time': {'#text': '29w5d 21:04:29'},
'send-time': {'#text': '00:23:24'}
},
'ospf3-external-lsa': {
'metric': '1',
'ospf3-prefix': '::/0',
'ospf3-prefix-options': '0x0',
'type-value': '1'},
'sequence-number': '0x8000178e'
},
{'advertising-router': '10.34.2.250',
'age': '1037',
'checksum': '0x21bf',
'lsa-id': '0.0.0.3',
'lsa-length': '44',
'lsa-type': 'Extern',
'ospf-database-extensive': {'aging-timer': {'#text': '00:42:42'},
'expiration-time': {'#text': '00:42:43'},
'installation-time': {'#text': '00:17:11'},
'lsa-change-count': '1',
'lsa-changed-time': {'#text': '29w5d '
'21:04:29'},
'send-time': {'#text': '00:17:09'}},
'ospf3-external-lsa': {'metric': '50',
'ospf3-prefix': '2001:db8:eb18:ca45::2/128',
'ospf3-prefix-options': '0x0',
'type-value': '1'},
'sequence-number': '0x8000178e'},
{'advertising-router': '10.34.2.250',
'age': '2913',
'checksum': | |
= (0.4117647058823529, 0.3764705882352941, 0.023529411764705882)
BUFF = (0.996078431372549, 0.9647058823529412, 0.6196078431372549)
YELLOWISH = (0.9803921568627451, 0.9333333333333333, 0.4)
GREEN_BROWN = (0.32941176470588235, 0.3058823529411765, 0.011764705882352941)
UGLY_YELLOW = (0.8156862745098039, 0.7568627450980392, 0.00392156862745098)
OLIVE_YELLOW = (0.7607843137254902, 0.7176470588235294, 0.03529411764705882)
KHAKI = (0.6666666666666666, 0.6509803921568628, 0.3843137254901961)
EGG_SHELL = (1.0, 0.9882352941176471, 0.7686274509803922)
STRAW = (0.9882352941176471, 0.9647058823529412, 0.4745098039215686)
BROWN_GREEN = (0.4392156862745098, 0.4235294117647059, 0.06666666666666667)
MANILLA = (1.0, 0.9803921568627451, 0.5254901960784314)
DIRTY_YELLOW = (0.803921568627451, 0.7725490196078432, 0.0392156862745098)
PISS_YELLOW = (0.8666666666666667, 0.8392156862745098, 0.09411764705882353)
VOMIT_YELLOW = (0.7803921568627451, 0.7568627450980392, 0.047058823529411764)
BROWNY_GREEN = (0.43529411764705883, 0.4235294117647059, 0.0392156862745098)
SUNNY_YELLOW = (1.0, 0.9764705882352941, 0.09019607843137255)
PARCHMENT = (0.996078431372549, 0.9882352941176471, 0.6862745098039216)
PUKE_YELLOW = (0.7607843137254902, 0.7450980392156863, 0.054901960784313725)
CUSTARD = (1.0, 0.9921568627450981, 0.47058823529411764)
BUTTER_YELLOW = (1.0, 0.9921568627450981, 0.4549019607843137)
LIGHT_BEIGE = (1.0, 0.996078431372549, 0.7137254901960784)
SUNSHINE_YELLOW = (1.0, 0.9921568627450981, 0.21568627450980393)
BRIGHT_YELLOW = (1.0, 0.9921568627450981, 0.00392156862745098)
LIGHT_YELLOW = (1.0, 0.996078431372549, 0.47843137254901963)
PASTEL_YELLOW = (1.0, 0.996078431372549, 0.44313725490196076)
CANARY_YELLOW = (1.0, 0.996078431372549, 0.25098039215686274)
OFF_WHITE = (1.0, 1.0, 0.8941176470588236)
EGGSHELL = (1.0, 1.0, 0.8313725490196079)
IVORY = (1.0, 1.0, 0.796078431372549)
CREAM = (1.0, 1.0, 0.7607843137254902)
CREME = (1.0, 1.0, 0.7137254901960784)
PALE_YELLOW = (1.0, 1.0, 0.5176470588235295)
YELLOWISH_TAN = (0.9882352941176471, 0.9882352941176471, 0.5058823529411764)
BUTTER = (1.0, 1.0, 0.5058823529411764)
BANANA = (1.0, 1.0, 0.49411764705882355)
YELLOW = (1.0, 1.0, 0.0784313725490196)
PUKE = (0.6470588235294118, 0.6470588235294118, 0.00784313725490196)
FADED_YELLOW = (0.996078431372549, 1.0, 0.4980392156862745)
LEMON_YELLOW = (0.9921568627450981, 1.0, 0.2196078431372549)
OFF_YELLOW = (0.9450980392156862, 0.9529411764705882, 0.24705882352941178)
LEMON = (0.9921568627450981, 1.0, 0.3215686274509804)
CANARY = (0.9921568627450981, 1.0, 0.38823529411764707)
VOMIT = (0.6352941176470588, 0.6431372549019608, 0.08235294117647059)
DRAB = (0.5098039215686274, 0.5137254901960784, 0.26666666666666666)
ECRU = (0.996078431372549, 1.0, 0.792156862745098)
BANANA_YELLOW = (0.9803921568627451, 0.996078431372549, 0.29411764705882354)
BROWNISH_GREEN = (0.41568627450980394, 0.43137254901960786, 0.03529411764705882)
PEA_SOUP = (0.5725490196078431, 0.6, 0.00392156862745098)
MUD_GREEN = (0.3764705882352941, 0.4, 0.00784313725490196)
BABY_POOP_GREEN = (0.5607843137254902, 0.596078431372549, 0.0196078431372549)
OLIVE = (0.43137254901960786, 0.4588235294117647, 0.054901960784313725)
MUSTARD_GREEN = (0.6588235294117647, 0.7098039215686275, 0.01568627450980392)
BABY_PUKE_GREEN = (0.7137254901960784, 0.7686274509803922, 0.023529411764705882)
BILE = (0.7098039215686275, 0.7647058823529411, 0.023529411764705882)
SHIT_GREEN = (0.4588235294117647, 0.5019607843137255, 0.0)
SNOT = (0.6745098039215687, 0.7333333333333333, 0.050980392156862744)
GREENISH_BEIGE = (0.788235294117647, 0.8196078431372549, 0.4745098039215686)
OLIVE_DRAB = (0.43529411764705883, 0.4627450980392157, 0.19607843137254902)
POOP_GREEN = (0.43529411764705883, 0.48627450980392156, 0.0)
SICKLY_YELLOW = (0.8156862745098039, 0.8941176470588236, 0.1607843137254902)
DARK_OLIVE = (0.21568627450980393, 0.24313725490196078, 0.00784313725490196)
BABY_SHIT_GREEN = (0.5333333333333333, 0.592156862745098, 0.09019607843137255)
PUKE_GREEN = (0.6039215686274509, 0.6823529411764706, 0.027450980392156862)
PEA_SOUP_GREEN = (0.5803921568627451, 0.6509803921568628, 0.09019607843137255)
GREEN_SLASH_YELLOW = (0.7098039215686275, 0.807843137254902, 0.03137254901960784)
SWAMP_GREEN = (0.4549019607843137, 0.5215686274509804, 0.0)
MURKY_GREEN = (0.4235294117647059, 0.47843137254901963, 0.054901960784313725)
BARF_GREEN = (0.5803921568627451, 0.6745098039215687, 0.00784313725490196)
LIGHT_KHAKI = (0.9019607843137255, 0.9490196078431372, 0.6352941176470588)
VOMIT_GREEN = (0.5372549019607843, 0.6352941176470588, 0.011764705882352941)
OLIVE_GREEN = (0.403921568627451, 0.47843137254901963, 0.01568627450980392)
BRIGHT_OLIVE = (0.611764705882353, 0.7333333333333333, 0.01568627450980392)
BOOGER_GREEN = (0.5882352941176471, 0.7058823529411765, 0.011764705882352941)
PEA = (0.6431372549019608, 0.7490196078431373, 0.12549019607843137)
GROSS_GREEN = (0.6274509803921569, 0.7490196078431373, 0.08627450980392157)
GREENISH_TAN = (0.7372549019607844, 0.796078431372549, 0.47843137254901963)
SNOT_GREEN = (0.615686274509804, 0.7568627450980392, 0.0)
PEA_GREEN = (0.5568627450980392, 0.6705882352941176, 0.07058823529411765)
NEON_YELLOW = (0.8117647058823529, 1.0, 0.01568627450980392)
GREENISH_YELLOW = (0.803921568627451, 0.9921568627450981, 0.00784313725490196)
UGLY_GREEN = (0.47843137254901963, 0.592156862745098, 0.011764705882352941)
SICK_GREEN = (0.615686274509804, 0.7254901960784313, 0.17254901960784313)
SICKLY_GREEN = (0.5803921568627451, 0.6980392156862745, 0.10980392156862745)
LIME_YELLOW = (0.8156862745098039, 0.996078431372549, 0.11372549019607843)
DARK_YELLOW_GREEN = (0.4470588235294118, 0.5607843137254902, 0.00784313725490196)
GREENY_YELLOW = (0.7764705882352941, 0.9725490196078431, 0.03137254901960784)
BOOGER = (0.6078431372549019, 0.7098039215686275, 0.23529411764705882)
LIGHT_OLIVE = (0.6745098039215687, 0.7490196078431373, 0.4117647058823529)
ICKY_GREEN = (0.5607843137254902, 0.6823529411764706, 0.13333333333333333)
YELLOWISH_GREEN = (0.6901960784313725, 0.8666666666666667, 0.08627450980392157)
MUDDY_GREEN = (0.396078431372549, 0.4549019607843137, 0.19607843137254902)
DARK_OLIVE_GREEN = (0.23529411764705882, 0.30196078431372547, 0.011764705882352941)
CHARTREUSE = (0.7568627450980392, 0.9725490196078431, 0.0392156862745098)
CAMO = (0.4980392156862745, 0.5607843137254902, 0.3058823529411765)
YELLOWY_GREEN = (0.7490196078431373, 0.9450980392156862, 0.1568627450980392)
GREEN_YELLOW = (0.788235294117647, 1.0, 0.15294117647058825)
AVOCADO_GREEN = (0.5294117647058824, 0.6627450980392157, 0.13333333333333333)
PALE_OLIVE = (0.7254901960784313, 0.8, 0.5058823529411764)
ARMY_GREEN = (0.29411764705882354, 0.36470588235294116, 0.08627450980392157)
SLIME_GREEN = (0.6, 0.8, 0.01568627450980392)
KHAKI_GREEN = (0.4470588235294118, 0.5254901960784314, 0.2235294117647059)
AVOCADO = (0.5647058823529412, 0.6941176470588235, 0.20392156862745098)
YELLOWGREEN = (0.7333333333333333, 0.9764705882352941, 0.058823529411764705)
LIGHT_OLIVE_GREEN = (0.6431372549019608, 0.7450980392156863, 0.3607843137254902)
TAN_GREEN = (0.6627450980392157, 0.7450980392156863, 0.4392156862745098)
YELLOW_SLASH_GREEN = (0.7843137254901961, 0.9921568627450981, 0.23921568627450981)
DARK_LIME = (0.5176470588235295, 0.7176470588235294, 0.00392156862745098)
CAMOUFLAGE_GREEN = (0.29411764705882354, 0.3803921568627451, 0.07450980392156863)
YELLOW_GREEN = (0.7529411764705882, 0.984313725490196, 0.17647058823529413)
DIRTY_GREEN = (0.4, 0.49411764705882355, 0.17254901960784313)
PEAR = (0.796078431372549, 0.9725490196078431, 0.37254901960784315)
LEMON_LIME = (0.7490196078431373, 0.996078431372549, 0.1568627450980392)
CAMO_GREEN = (0.3215686274509804, 0.396078431372549, 0.1450980392156863)
LEMON_GREEN = (0.6784313725490196, 0.9725490196078431, 0.00784313725490196)
DARK_LIME_GREEN = (0.49411764705882355, 0.7411764705882353, 0.00392156862745098)
ELECTRIC_LIME = (0.6588235294117647, 1.0, 0.01568627450980392)
SWAMP = (0.4117647058823529, 0.5137254901960784, 0.2235294117647059)
MILITARY_GREEN = (0.4, 0.48627450980392156, 0.24313725490196078)
PALE_OLIVE_GREEN = (0.6941176470588235, 0.8235294117647058, 0.4823529411764706)
BRIGHT_YELLOW_GREEN = (0.615686274509804, 1.0, 0.0)
LIGHT_YELLOW_GREEN = (0.8, 0.9921568627450981, 0.4980392156862745)
SAP_GREEN = (0.3607843137254902, 0.5450980392156862, 0.08235294117647059)
MOSSY_GREEN = (0.38823529411764707, 0.5450980392156862, 0.15294117647058825)
LIGHT_MOSS_GREEN = (0.6509803921568628, 0.7843137254901961, 0.4588235294117647)
NAVY_GREEN = (0.20784313725490197, 0.3254901960784314, 0.0392156862745098)
LIME = (0.6666666666666666, 1.0, 0.19607843137254902)
ACID_GREEN = (0.5607843137254902, 0.996078431372549, 0.03529411764705882)
PALE_LIME = (0.7450980392156863, 0.9921568627450981, 0.45098039215686275)
LIGHT_LIME_GREEN = (0.7254901960784313, 1.0, 0.4)
MOSS_GREEN = (0.396078431372549, 0.5450980392156862, 0.2196078431372549)
LEAF_GREEN = (0.3607843137254902, 0.6627450980392157, 0.01568627450980392)
LIGHT_PEA_GREEN = (0.7686274509803922, 0.996078431372549, 0.5098039215686274)
LIME_GREEN = (0.5372549019607843, 0.996078431372549, 0.0196078431372549)
BRIGHT_LIME = (0.5294117647058824, 0.9921568627450981, 0.0196078431372549)
KIWI = (0.611764705882353, 0.9372549019607843, 0.2627450980392157)
LEAF = (0.44313725490196076, 0.6666666666666666, 0.20392156862745098)
KERMIT_GREEN = (0.3607843137254902, 0.6980392156862745, 0.0)
DRAB_GREEN = (0.4549019607843137, 0.5843137254901961, 0.3176470588235294)
PALE_LIME_GREEN = (0.6941176470588235, 1.0, 0.396078431372549)
LIGHT_YELLOWISH_GREEN = (0.7607843137254902, 1.0, 0.5372549019607843)
APPLE_GREEN = (0.4627450980392157, 0.803921568627451, 0.14901960784313725)
PISTACHIO = (0.7529411764705882, 0.9803921568627451, 0.5450980392156862)
KIWI_GREEN = (0.5568627450980392, 0.8980392156862745, 0.24705882352941178)
MOSS = (0.4627450980392157, 0.6, 0.34509803921568627)
LIGHT_LIME = (0.6823529411764706, 0.9921568627450981, 0.4235294117647059)
FROG_GREEN = (0.34509803921568627, 0.7372549019607844, 0.03137254901960784)
KEY_LIME = (0.6823529411764706, 1.0, 0.43137254901960786)
LAWN_GREEN = (0.30196078431372547, 0.6431372549019608, 0.03529411764705882)
NASTY_GREEN = (0.4392156862745098, 0.6980392156862745, 0.24705882352941178)
CELERY = (0.7568627450980392, 0.9921568627450981, 0.5843137254901961)
DARK_GRASS_GREEN = (0.2196078431372549, 0.5019607843137255, 0.01568627450980392)
SPRING_GREEN = (0.6627450980392157, 0.9764705882352941, 0.44313725490196076)
GRASSY_GREEN = (0.2549019607843137, 0.611764705882353, 0.011764705882352941)
ASPARAGUS = (0.4666666666666667, 0.6705882352941176, 0.33725490196078434)
BRIGHT_LIME_GREEN = (0.396078431372549, 0.996078431372549, 0.03137254901960784)
GRASS = (0.3607843137254902, 0.6745098039215687, 0.17647058823529413)
LIGHT_GRASS_GREEN = (0.6039215686274509, 0.9686274509803922, 0.39215686274509803)
TURTLE_GREEN = (0.4588235294117647, 0.7215686274509804, 0.30980392156862746)
GRASS_GREEN = (0.24705882352941178, 0.6078431372549019, 0.043137254901960784)
FLAT_GREEN = (0.4117647058823529, 0.615686274509804, 0.2980392156862745)
APPLE = (0.43137254901960786, 0.796078431372549, 0.23529411764705882)
LIGHT_GRAY_GREEN = (0.7176470588235294, 0.8823529411764706, 0.6313725490196078)
LIGHT_GREY_GREEN = (0.7176470588235294, 0.8823529411764706, 0.6313725490196078)
LICHEN = (0.5607843137254902, 0.7137254901960784, 0.4823529411764706)
SAGE = (0.5294117647058824, 0.6823529411764706, 0.45098039215686275)
GREEN_APPLE = (0.3686274509803922, 0.8627450980392157, 0.12156862745098039)
MEDIUM_GRAY = (0.49019607843137253, 0.4980392156862745, 0.48627450980392156)
MEDIUM_GREY = (0.49019607843137253, 0.4980392156862745, 0.48627450980392156)
LIGHT_GRAY = (0.8470588235294118, 0.8627450980392157, 0.8392156862745098)
LIGHT_GREY = (0.8470588235294118, 0.8627450980392157, 0.8392156862745098)
TEA_GREEN = (0.7411764705882353, 0.9725490196078431, 0.6392156862745098)
TOXIC_GREEN = (0.3803921568627451, 0.8705882352941177, 0.16470588235294117)
LIGHT_LIGHT_GREEN = (0.7843137254901961, 1.0, 0.6901960784313725)
VERY_LIGHT_GREEN = (0.8196078431372549, 1.0, 0.7411764705882353)
OFF_GREEN = (0.4196078431372549, 0.6392156862745098, 0.3254901960784314)
VERY_PALE_GREEN = (0.8117647058823529, 0.9921568627450981, 0.7372549019607844)
WASHED_OUT_GREEN = (0.7372549019607844, 0.9607843137254902, 0.6509803921568628)
GREENISH_GRAY = (0.5882352941176471, 0.6823529411764706, 0.5529411764705883)
GREENISH_GREY = (0.5882352941176471, 0.6823529411764706, 0.5529411764705883)
SAGE_GREEN = (0.5333333333333333, 0.7019607843137254, 0.47058823529411764)
DULL_GREEN = (0.4549019607843137, 0.6509803921568628, 0.3843137254901961)
GRAY_SLASH_GREEN = (0.5254901960784314, 0.6313725490196078, 0.49019607843137253)
GREY_SLASH_GREEN = (0.5254901960784314, 0.6313725490196078, 0.49019607843137253)
LIGHT_SAGE = (0.7372549019607844, 0.9254901960784314, 0.6745098039215687)
PALE_GREEN = (0.7803921568627451, 0.9921568627450981, 0.7098039215686275)
GRAY = (0.5725490196078431, 0.5843137254901961, 0.5686274509803921)
GREY = (0.5725490196078431, 0.5843137254901961, 0.5686274509803921)
PALE_LIGHT_GREEN = (0.6941176470588235, 0.9882352941176471, 0.6)
FORREST_GREEN = (0.08235294117647059, 0.26666666666666666, 0.023529411764705882)
GREEN_GRAY = (0.4666666666666667, 0.5725490196078431, 0.43529411764705883)
GREEN_GREY = (0.4666666666666667, 0.5725490196078431, 0.43529411764705883)
FERN_GREEN = (0.32941176470588235, 0.5529411764705883, 0.26666666666666666)
LIGHT_GREEN = (0.5882352941176471, 0.9764705882352941, 0.4823529411764706)
FERN = (0.38823529411764707, 0.6627450980392157, 0.3137254901960784)
PASTEL_GREEN = (0.6901960784313725, 1.0, 0.615686274509804)
FRESH_GREEN = (0.4117647058823529, 0.8470588235294118, 0.30980392156862746)
POISON_GREEN = (0.25098039215686274, 0.9921568627450981, 0.0784313725490196)
LEAFY_GREEN = (0.3176470588235294, 0.7176470588235294, 0.23137254901960785)
TREE_GREEN = (0.16470588235294117, 0.49411764705882355, 0.09803921568627451)
MUTED_GREEN = (0.37254901960784315, 0.6274509803921569, 0.3215686274509804)
LIGHT_PASTEL_GREEN = (0.6980392156862745, 0.984313725490196, 0.6470588235294118)
VIVID_GREEN = (0.1843137254901961, 0.9372549019607843, 0.06274509803921569)
GRAY_GREEN = (0.47058823529411764, 0.6078431372549019, 0.45098039215686275)
GREY_GREEN = (0.47058823529411764, 0.6078431372549019, 0.45098039215686275)
GRAYISH_GREEN = (0.5098039215686274, 0.6509803921568628, 0.49019607843137253)
GREYISH_GREEN = (0.5098039215686274, 0.6509803921568628, 0.49019607843137253)
LIGHTER_GREEN = (0.4588235294117647, 0.9921568627450981, 0.38823529411764707)
FADED_GREEN = (0.4823529411764706, 0.6980392156862745, 0.4549019607843137)
EASTER_GREEN = (0.5490196078431373, 0.9921568627450981, 0.49411764705882355)
GREENY_GRAY = (0.49411764705882355, 0.6274509803921569, 0.47843137254901963)
GREENY_GREY = (0.49411764705882355, 0.6274509803921569, 0.47843137254901963)
CELADON = (0.7450980392156863, 0.9921568627450981, 0.7176470588235294)
MID_GREEN = (0.3137254901960784, 0.6549019607843137, 0.2784313725490196)
HIGHLIGHTER_GREEN = (0.10588235294117647, 0.9882352941176471, 0.023529411764705882)
ELECTRIC_GREEN = (0.12941176470588237, 0.9882352941176471, 0.050980392156862744)
VERY_DARK_GREEN = (0.023529411764705882, 0.1803921568627451, 0.011764705882352941)
DARK_SAGE = (0.34901960784313724, 0.5215686274509804, 0.33725490196078434)
RADIOACTIVE_GREEN = (0.17254901960784313, 0.9803921568627451, 0.12156862745098039)
DARK_GREEN = (0.011764705882352941, 0.20784313725490197, 0.0)
DUSTY_GREEN = (0.4627450980392157, 0.6627450980392157, 0.45098039215686275)
HUNTER_GREEN = (0.043137254901960784, 0.25098039215686274, 0.03137254901960784)
FLURO_GREEN = (0.0392156862745098, 1.0, 0.00784313725490196)
TRUE_GREEN = (0.03137254901960784, 0.5803921568627451, 0.01568627450980392)
FOREST = (0.043137254901960784, 0.3333333333333333, 0.03529411764705882)
RACING_GREEN = (0.00392156862745098, 0.27450980392156865, 0.0)
VIBRANT_GREEN = (0.0392156862745098, 0.8666666666666667, 0.03137254901960784)
LIGHTISH_GREEN = (0.3803921568627451, 0.8823529411764706, 0.3764705882352941)
NEON_GREEN = (0.047058823529411764, 1.0, 0.047058823529411764)
FLUORESCENT_GREEN = (0.03137254901960784, 1.0, 0.03137254901960784)
DARK_PASTEL_GREEN = (0.33725490196078434, 0.6823529411764706, 0.3411764705882353)
BOTTLE_GREEN = (0.01568627450980392, 0.2901960784313726, 0.0196078431372549)
HOT_GREEN = (0.1450980392156863, 1.0, 0.1607843137254902)
BRIGHT_GREEN = (0.00392156862745098, 1.0, 0.027450980392156862)
BORING_GREEN = (0.38823529411764707, 0.7019607843137254, 0.396078431372549)
DARKGREEN = (0.0196078431372549, 0.28627450980392155, 0.027450980392156862)
GREEN = (0.08235294117647059, 0.6901960784313725, 0.10196078431372549)
LIGHT_NEON_GREEN = (0.3058823529411765, 0.9921568627450981, 0.32941176470588235)
LIGHTGREEN = (0.4627450980392157, 1.0, 0.4823529411764706)
LIGHT_BRIGHT_GREEN = (0.3254901960784314, 0.996078431372549, 0.3607843137254902)
LIGHT_FOREST_GREEN = (0.30980392156862746, 0.5686274509803921, 0.3254901960784314)
LIGHT_MINT = (0.7137254901960784, 1.0, 0.7333333333333333)
SOFT_GREEN = (0.43529411764705883, 0.7607843137254902, 0.4627450980392157)
DARK_FOREST_GREEN = (0.0, 0.17647058823529413, 0.01568627450980392)
FOREST_GREEN = (0.023529411764705882, 0.2784313725490196, 0.047058823529411764)
BRITISH_RACING_GREEN = (
0.0196078431372549,
0.2823529411764706,
0.050980392156862744,
)
MEDIUM_GREEN = (0.2235294117647059, 0.6784313725490196, 0.2823529411764706)
LIGHT_MINT_GREEN = (0.6509803921568628, 0.984313725490196, 0.6980392156862745)
MINT_GREEN = (0.5607843137254902, 1.0, 0.6235294117647059)
DEEP_GREEN = (0.00784313725490196, 0.34901960784313724, 0.058823529411764705)
BABY_GREEN = (0.5490196078431373, 1.0, 0.6196078431372549)
LIGHT_SEAFOAM_GREEN = (0.6549019607843137, 1.0, 0.7098039215686275)
DARKISH_GREEN = (0.1568627450980392, 0.48627450980392156, 0.21568627450980393)
MINT = (0.6235294117647059, 0.996078431372549, 0.6901960784313725)
PINE = (0.16862745098039217, 0.36470588235294116, 0.20392156862745098)
BRIGHT_LIGHT_GREEN = (0.17647058823529413, 0.996078431372549, 0.32941176470588235)
EMERALD_GREEN = (0.00784313725490196, 0.5607843137254902, | |
# coding: utf-8
"""Reimplementation of HMMER binaries with the pyHMMER API.
"""
import abc
import contextlib
import collections
import ctypes
import itertools
import io
import queue
import time
import threading
import typing
import os
import multiprocessing
import psutil
from .easel import Alphabet, DigitalSequence, DigitalMSA, MSA, MSAFile, TextSequence, SequenceFile, SSIWriter
from .plan7 import Builder, Background, Pipeline, TopHits, HMM, HMMFile, Profile, TraceAligner
from .utils import peekable
# the query type for the pipeline
_Q = typing.TypeVar("_Q")
# --- Pipeline threads -------------------------------------------------------
class _PipelineThread(typing.Generic[_Q], threading.Thread):
"""A generic worker thread to parallelize a pipelined search.
Attributes:
sequence (iterable of `DigitalSequence`): The target sequences to
search for hits. **Must be able to be iterated upon more than
once.**
query_queue (`queue.Queue`): The queue used to pass queries between
threads. It contains both the query and its index, so that the
results can be returned in the same order.
query_count (`multiprocessing.Value`): An atomic counter storing
the total number of queries that have currently been loaded.
Passed to the ``callback`` so that an UI can show the total
for a progress bar.
hits_queue (`queue.PriorityQueue`): The queue used to pass back
the `TopHits` to the main thread. The results are inserted
using the index of the query, so that the main thread can
pull results in order.
kill_switch (`threading.Event`): An event flag shared between
all worker threads, used to notify emergency exit.
hits_found (`list` of `threading.Event`): A list of event flags,
such that ``hits_found[i]`` is set when results have been
obtained for the query of index ``i``. This allows the main
thread to keep waiting for the right `TopHits` to yield even
if subsequent queries have already been treated, and to make
sure the next result returned by ``hits_queue.get`` will also
be of index ``i``.
callback (`callable`, optional): An optional callback to be called
after each query has been processed. It should accept two
arguments: the query object that was processed, and the total
number of queries read until now.
options (`dict`): A dictionary of options to be passed to the
`pyhmmer.plan7.Pipeline` object wrapped by the worker thread.
"""
@staticmethod
def _none_callback(hmm: _Q, total: int) -> None:
pass
def __init__(
self,
sequences: typing.Iterable[DigitalSequence],
query_queue: "queue.Queue[typing.Optional[typing.Tuple[int, _Q]]]",
query_count: multiprocessing.Value, # type: ignore
hits_queue: "queue.PriorityQueue[typing.Tuple[int, TopHits]]",
kill_switch: threading.Event,
hits_found: typing.List[threading.Event],
callback: typing.Optional[typing.Callable[[_Q, int], None]],
options: typing.Dict[str, typing.Any],
) -> None:
super().__init__()
self.options = options
self.pipeline = Pipeline(alphabet=Alphabet.amino(), **options)
self.sequences = sequences
self.query_queue = query_queue
self.query_count = query_count
self.hits_queue = hits_queue
self.callback = callback or self._none_callback
self.kill_switch = kill_switch
self.hits_found = hits_found
self.error: typing.Optional[BaseException] = None
def run(self) -> None:
while not self.kill_switch.is_set():
# attempt to get the next argument, with a timeout
# so that the thread can periodically check if it has
# been killed, even when the query queue is empty
try:
args = self.query_queue.get(timeout=1)
except queue.Empty:
continue
# check if arguments from the queue are a poison-pill (`None`),
# in which case the thread will stop running
if args is None:
self.query_queue.task_done()
return
else:
index, query = args
# process the arguments, making sure to capture any exception
# raised while processing the query, and then mark the hits
# as "found" using a `threading.Event` for each.
try:
self.process(index, query)
self.query_queue.task_done()
except BaseException as exc:
self.error = exc
self.kill()
return
finally:
self.hits_found[index].set()
def kill(self) -> None:
self.kill_switch.set()
def process(self, index: int, query: _Q) -> None:
hits = self.search(query)
self.hits_queue.put((index, hits))
self.callback(query, self.query_count.value) # type: ignore
self.pipeline.clear()
@abc.abstractmethod
def search(self, query: _Q) -> TopHits:
return NotImplemented
class _HMMPipelineThread(_PipelineThread[HMM]):
def search(self, query: HMM) -> TopHits:
return self.pipeline.search_hmm(query, self.sequences)
class _SequencePipelineThread(_PipelineThread[DigitalSequence]):
def __init__(
self,
sequences: typing.Iterable[DigitalSequence],
query_queue: "queue.Queue[typing.Optional[typing.Tuple[int, DigitalSequence]]]",
query_count: multiprocessing.Value, # type: ignore
hits_queue: "queue.PriorityQueue[typing.Tuple[int, TopHits]]",
kill_switch: threading.Event,
hits_found: typing.List[threading.Event],
callback: typing.Optional[typing.Callable[[DigitalSequence, int], None]],
options: typing.Dict[str, typing.Any],
builder: Builder,
) -> None:
super().__init__(
sequences,
query_queue,
query_count,
hits_queue,
kill_switch,
hits_found,
callback,
options,
)
self.builder = builder
def search(self, query: DigitalSequence) -> TopHits:
return self.pipeline.search_seq(query, self.sequences, self.builder)
class _MSAPipelineThread(_PipelineThread[DigitalMSA]):
def __init__(
self,
sequences: typing.Iterable[DigitalSequence],
query_queue: "queue.Queue[typing.Optional[typing.Tuple[int, DigitalMSA]]]",
query_count: multiprocessing.Value, # type: ignore
hits_queue: "queue.PriorityQueue[typing.Tuple[int, TopHits]]",
kill_switch: threading.Event,
hits_found: typing.List[threading.Event],
callback: typing.Optional[typing.Callable[[DigitalMSA, int], None]],
options: typing.Dict[str, typing.Any],
builder: Builder,
) -> None:
super().__init__(
sequences,
query_queue,
query_count,
hits_queue,
kill_switch,
hits_found,
callback,
options,
)
self.builder = builder
def search(self, query: DigitalMSA) -> TopHits:
return self.pipeline.search_msa(query, self.sequences, self.builder)
# --- Search runners ---------------------------------------------------------
@abc.abstractmethod
class _Search(typing.Generic[_Q], abc.ABC):
def __init__(
self,
queries: typing.Iterable[_Q],
sequences: typing.Collection[DigitalSequence],
cpus: int = 0,
callback: typing.Optional[typing.Callable[[_Q, int], None]] = None,
**options,
) -> None:
self.queries = queries
self.sequences = sequences
self.cpus = cpus
self.callback = callback
self.options = options
@abc.abstractmethod
def _new_thread(
self,
query_queue: "queue.Queue[typing.Optional[typing.Tuple[int, _Q]]]",
query_count: "multiprocessing.Value[int]",
hits_queue: "queue.PriorityQueue[typing.Tuple[int, TopHits]]",
kill_switch: threading.Event,
hits_found: typing.List[threading.Event],
) -> _PipelineThread[_Q]:
return NotImplemented
def _single_threaded(self) -> typing.Iterator[TopHits]:
# create the queues to pass the HMM objects around, as well as atomic
# values that we use to synchronize the threads
hits_found: typing.List[threading.Event] = []
query_queue = queue.Queue() # type: ignore
query_count = multiprocessing.Value(ctypes.c_ulong)
hits_queue = queue.PriorityQueue() # type: ignore
kill_switch = threading.Event()
# create the thread (to recycle code)
thread = self._new_thread(query_queue, query_count, hits_queue, kill_switch, hits_found)
# process each HMM iteratively and yield the result
# immediately so that the user can iterate over the
# TopHits one at a time
for index, query in enumerate(self.queries):
query_count.value += 1
thread.process(index, query)
yield hits_queue.get_nowait()[1]
def _multi_threaded(self) -> typing.Iterator[TopHits]:
# create the queues to pass the HMM objects around, as well as atomic
# values that we use to synchronize the threads
hits_found: typing.List[threading.Event] = []
hits_queue = queue.PriorityQueue() # type: ignore
query_count = multiprocessing.Value(ctypes.c_ulong)
kill_switch = threading.Event()
# the query queue is bounded so that we only feed more queries
# if the worker threads are waiting for some
query_queue = queue.Queue(maxsize=self.cpus) # type: ignore
# create and launch one pipeline thread per CPU
threads = []
for _ in range(self.cpus):
thread = self._new_thread(query_queue, query_count, hits_queue, kill_switch, hits_found)
thread.start()
threads.append(thread)
# catch exceptions to kill threads in the background before exiting
try:
# enumerate queries, so that we now the index of each query
# and we can yield the results in the same order
queries = enumerate(self.queries)
# initially feed one query per thread so that they can start
# working before we enter the main loop
for (index, query) in itertools.islice(queries, self.cpus):
query_count.value += 1
hits_found.append(threading.Event())
query_queue.put((index, query))
# alternate between feeding queries to the threads and
# yielding back results, if available
hits_yielded = 0
while hits_yielded < query_count.value:
# get the next query, or break the loop if there is no query
# left to process in the input iterator.
index, query = next(queries, (-1, None))
if query is None:
break
else:
query_count.value += 1
hits_found.append(threading.Event())
query_queue.put((index, query))
# yield the top hits for the next query, if available
if hits_found[hits_yielded].is_set():
yield hits_queue.get_nowait()[1]
hits_yielded += 1
# now that we exhausted all queries, poison pill the
# threads so they stop on their own
for _ in threads:
query_queue.put(None)
# yield remaining results
while hits_yielded < query_count.value:
hits_found[hits_yielded].wait()
yield hits_queue.get_nowait()[1]
hits_yielded += 1
except queue.Empty:
# the only way we can get queue.Empty is if a thread has set
# the flag for `hits_found[i]` without actually putting it in
# the queue: this only happens when a background thread raised
# an exception, so we must chain it
for thread in threads:
if thread.error is not None:
raise thread.error from None
# if this is exception is otherwise a bug, make sure to reraise it
raise
except BaseException:
# make sure threads are killed to avoid being stuck,
# e.g. after a KeyboardInterrupt
kill_switch.set()
raise
def run(self) -> typing.Iterator[TopHits]:
if self.cpus == 1:
return self._single_threaded()
else:
return self._multi_threaded()
class _HMMSearch(_Search[HMM]):
def _new_thread(
self,
query_queue: "queue.Queue[typing.Optional[typing.Tuple[int, HMM]]]",
query_count: "multiprocessing.Value[int]",
hits_queue: "queue.PriorityQueue[typing.Tuple[int, TopHits]]",
kill_switch: threading.Event,
hits_found: typing.List[threading.Event],
) -> _HMMPipelineThread:
return _HMMPipelineThread(
self.sequences,
query_queue,
query_count,
hits_queue,
kill_switch,
hits_found,
self.callback,
self.options,
)
class _SequenceSearch(_Search[DigitalSequence]):
def __init__(
self,
builder: Builder,
queries: typing.Iterable[DigitalSequence],
sequences: typing.Collection[DigitalSequence],
cpus: int = 0,
callback: typing.Optional[typing.Callable[[DigitalSequence, int], None]] = None,
**options,
) -> None:
super().__init__(queries, sequences, cpus, callback, **options)
self.builder = builder
| |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from skcurve import Point, Segment, Curve, GeometryAlgorithmsWarning, IntersectionType
@pytest.mark.parametrize('p1, p2, ndim', [
(Point([1, 1]), Point([2, 2]), 2),
(Point([1, 1, 1]), Point([2, 2, 2]), 3),
])
def test_create_segment(p1, p2, ndim):
segment = Segment(p1, p2)
assert segment.ndim == ndim
assert segment.p1 == p1
assert segment.p2 == p2
def test_equal_segments():
segment1 = Segment(Point([1, 1]), Point([2, 2]))
segment2 = Segment(Point([1, 1]), Point([2, 2]))
segment3 = Segment(Point([2, 2]), Point([1, 1]))
assert segment1 == segment2
assert segment1 != segment3
def test_singular():
segment = Segment(Point([1, 1]), Point([1, 1]))
assert segment.singular
@pytest.mark.parametrize('t, expected_point', [
(0., Point([1, 1])),
(1., Point([2, 2])),
(0.5, Point([1.5, 1.5])),
])
def test_segment_point(t, expected_point):
segment = Segment(Point([1, 1]), Point([2, 2]))
assert segment.point(t) == expected_point
@pytest.mark.parametrize('point, expected_t', [
(Point([1, 1]), 0.),
(Point([2, 2]), 1.),
(Point([1.5, 1.5]), 0.5),
])
def test_segment_t_2d(point, expected_t):
segment = Segment(Point([1, 1]), Point([2, 2]))
assert segment.t(point) == pytest.approx(expected_t)
@pytest.mark.parametrize('point, expected_t', [
(Point([1, 1, 1]), 0.),
(Point([2, 2, 2]), 1.),
(Point([1.5, 1.5, 1.5]), 0.5),
])
def test_segment_t_3d(point, expected_t):
segment = Segment(Point([1, 1, 1]), Point([2, 2, 2]))
assert segment.t(point) == pytest.approx(expected_t)
@pytest.mark.parametrize('points1, points2, expected_angle', [
# 2d
((Point([1, 1]), Point([2, 2])), (Point([0, 0]), Point([3, 3])), 0.),
((Point([1, 1]), Point([2, 2])), (Point([3, 3]), Point([2, 2])), np.pi),
((Point([1, 1]), Point([2, 2])), (Point([3, 3]), Point([0, 0])), np.pi),
((Point([1, 1]), Point([1, 2])), (Point([3, 3]), Point([5, 3])), np.pi / 2),
((Point([1, 1]), Point([1, 2])), (Point([0, 0]), Point([2, 2])), np.pi / 4),
((Point([1, 1]), Point([1, 2])), (Point([2, 2]), Point([0, 0])), 3 * np.pi / 4),
# 3d
((Point([1, 1, 1]), Point([2, 2, 2])), (Point([0, 0, 0]), Point([3, 3, 3])), 0.),
((Point([1, 1, 1]), Point([2, 2, 2])), (Point([3, 3, 3]), Point([2, 2, 2])), np.pi),
((Point([1, 1, 1]), Point([2, 2, 2])), (Point([3, 3, 3]), Point([0, 0, 0])), np.pi),
((Point([1, 2, 1]), Point([1, 2, 2])), (Point([1, 2, 2]), Point([1, 2, 1])), np.pi),
((Point([1, 1, 1]), Point([1, 2, 1])), (Point([3, 3, 1]), Point([5, 3, 1])), np.pi / 2),
((Point([1, 1, 1]), Point([1, 2, 1])), (Point([0, 0, 1]), Point([2, 2, 1])), np.pi / 4),
((Point([1, 1, 1]), Point([1, 2, 1])), (Point([2, 2, 1]), Point([0, 0, 1])), 3 * np.pi / 4),
])
def test_angle(points1, points2, expected_angle):
segment1 = Segment(*points1)
segment2 = Segment(*points2)
assert segment1.angle(segment2) == pytest.approx(expected_angle)
def test_angle_nan():
segment1 = Segment(Point([1, 1]), Point([1, 2]))
segment2 = Segment(Point([0, 0]), Point([0, 0]))
with pytest.warns(GeometryAlgorithmsWarning):
assert np.isnan(segment1.angle(segment2))
@pytest.mark.parametrize('points1, points2, expected_flag', [
# 2d
((Point([1, 1]), Point([2, 2])), (Point([0, 0]), Point([3, 3])), True),
((Point([1, 1]), Point([2, 2])), (Point([3, 3]), Point([1, 1])), True),
((Point([0, 0]), Point([2, 2])), (Point([-1, -1]), Point([1, 1])), True),
((Point([0, 1]), Point([0, 2])), (Point([0, 2]), Point([0, 3])), True),
((Point([0, 1]), Point([0, 2])), (Point([1, 1]), Point([1, 2])), False),
((Point([0, 0]), Point([1, 2])), (Point([0, 0]), Point([1, 2.01])), False),
# 3d
((Point([1, 1, 0]), Point([1, 2, 0])), (Point([1, 2, 0]), Point([1, 1, 0])), True),
((Point([1, 2, 1]), Point([1, 2, 2])), (Point([1, 2, 2]), Point([1, 2, 1])), True),
((Point([0, 0, 0]), Point([1, 1, 1])), (Point([1.5, 1.5, 1.5]), Point([3, 3, 3])), True),
((Point([0, 0, 1]), Point([1, 1, 2])), (Point([0, 0, 0]), Point([1, 1, 3])), False),
])
def test_collinear(points1, points2, expected_flag):
segment1 = Segment(*points1)
segment2 = Segment(*points2)
assert segment1.collinear(segment2) == expected_flag
@pytest.mark.parametrize('points1, points2, expected_flag', [
# 2d
((Point([1, 1]), Point([2, 2])), (Point([0, 0]), Point([3, 3])), True),
((Point([1, 0]), Point([2, 0])), (Point([2, 1]), Point([1, 1])), True),
((Point([0, 1]), Point([1, 1])), (Point([1, 2]), Point([2, 1])), False),
((Point([0, 0]), Point([2, 0])), (Point([1, 2]), Point([2, 2])), True),
((Point([0, 0]), Point([2, 0])), (Point([3, 3]), Point([3, 3])), True), # singular
((Point([0, 0]), Point([0, 0])), (Point([3, 3]), Point([3, 3])), True), # singular
# 3d
((Point([1, 1, 0]), Point([1, 2, 0])), (Point([1, 2, 0]), Point([1, 1, 0])), True),
((Point([0, 0, 1]), Point([1, 1, 2])), (Point([0, 0, 0]), Point([1, 1, 3])), False),
])
def test_parallel(points1, points2, expected_flag):
segment1 = Segment(*points1)
segment2 = Segment(*points2)
assert segment1.parallel(segment2) == expected_flag
@pytest.mark.parametrize('points1, points2, expected_flag', [
# 2d
((Point([1, 1]), Point([2, 2])), (Point([0, 0]), Point([3, 3])), True),
((Point([1, 0]), Point([2, 0])), (Point([2, 1]), Point([1, 1])), True),
((Point([0, 1]), Point([1, 1])), (Point([1, 2]), Point([2, 1])), True),
# 3d
((Point([1, 1, 1]), Point([2, 2, 2])), (Point([1, 1, 2]), Point([2, 2, 1])), True),
((Point([0, 0, 1]), Point([1, 1, 2])), (Point([0, 0, 0]), Point([1, 1, 3])), True),
((Point([1, 1, 2]), Point([1, 2, 3])), (Point([0, 0, 0]), Point([2, 3, 1])), False),
])
def test_coplanar(points1, points2, expected_flag):
segment1 = Segment(*points1)
segment2 = Segment(*points2)
assert segment1.coplanar(segment2) == expected_flag
@pytest.mark.parametrize('points1, points2, expected_points', [
((Point([1, 1]), Point([2, 2])), (Point([0, 0]), Point([1.5, 1.5])), (Point([1, 1]), Point([1.5, 1.5]))),
((Point([2, 2]), Point([1, 1])), (Point([0, 0]), Point([1.5, 1.5])), (Point([1, 1]), Point([1.5, 1.5]))),
((Point([1, 1]), Point([2, 2])), (Point([1.5, 1.5]), Point([0, 0])), (Point([1, 1]), Point([1.5, 1.5]))),
((Point([2, 2]), Point([1, 1])), (Point([1.5, 1.5]), Point([0, 0])), (Point([1, 1]), Point([1.5, 1.5]))),
((Point([1, 1]), Point([2, 2])), (Point([0, 0]), Point([3, 3])), (Point([1, 1]), Point([2, 2]))),
((Point([2, 2]), Point([1, 1])), (Point([0, 0]), Point([3, 3])), (Point([1, 1]), Point([2, 2]))),
((Point([1, 1]), Point([2, 2])), (Point([3, 3]), Point([0, 0])), (Point([1, 1]), Point([2, 2]))),
((Point([2, 2]), Point([1, 1])), (Point([3, 3]), Point([0, 0])), (Point([1, 1]), Point([2, 2]))),
((Point([0, 0]), Point([1, 1])), (Point([2, 2]), Point([3, 3])), None),
((Point([1, 1]), Point([0, 0])), (Point([2, 2]), Point([3, 3])), None),
((Point([0, 0]), Point([1, 1])), (Point([3, 3]), Point([2, 2])), None),
((Point([1, 1]), Point([0, 0])), (Point([3, 3]), Point([2, 2])), None),
((Point([3, 3]), Point([2, 2])), (Point([1, 1]), Point([0, 0])), None),
((Point([0, 0]), Point([1, 1])), (Point([1, 1]), Point([2, 2])), (Point([1, 1]), Point([1, 1]))),
((Point([0, 0]), Point([1, 1])), (Point([2, 2]), Point([1, 1])), (Point([1, 1]), Point([1, 1]))),
((Point([1, 1]), Point([0, 0])), (Point([1, 1]), Point([2, 2])), (Point([1, 1]), Point([1, 1]))),
((Point([1, 1]), Point([2, 2])), (Point([1, 1]), Point([0, 0])), (Point([1, 1]), Point([1, 1]))),
])
def test_overlap(points1, points2, expected_points):
segment1 = Segment(*points1)
segment2 = Segment(*points2)
if expected_points is None:
assert segment1.overlap(segment2) == expected_points
else:
assert segment1.overlap(segment2) == Segment(*expected_points)
@pytest.mark.parametrize('points1, points2, expected_intersect_point, intersect_type', [
# 2d
((Point([1, 1]), Point([2, 2])),
(Point([1, 2]), Point([2, 1])), Point([1.5, 1.5]), IntersectionType.EXACT),
((Point([1, 1]), Point([2, 2])),
(Point([3, 3]), Point([0, 0])), Point([1.5, 1.5]), IntersectionType.OVERLAP),
((Point([1, 1]), Point([2, 2])), (Point([-5, 2]), Point([2, 10])), None, IntersectionType.NONE),
# 3d
((Point([1, 1, 1]), Point([2, 2, 2])),
(Point([1, 2, 1]), Point([2, 1, 2])), Point([1.5, 1.5, 1.5]), IntersectionType.EXACT),
((Point([1, 1, 1]), Point([2, 2, 2])),
(Point([0, 0, 0]), Point([3, 3, 3])), Point([1.5, 1.5, 1.5]), IntersectionType.OVERLAP),
((Point([1, 1, 2]), Point([1, 2, 3])), (Point([0, 0, 0]), Point([2, 3, 1])), None, IntersectionType.NONE),
])
def test_intersect(points1, points2, expected_intersect_point, intersect_type):
segment1 = Segment(*points1)
segment2 = Segment(*points2)
intersection = segment1.intersect(segment2)
assert intersection.intersect_point == expected_intersect_point
assert intersection.intersect_type == intersect_type
@pytest.mark.parametrize('segment_points, point, expected_distance', [
# 2d
((Point([0, 0]), Point([2, 0])), Point([1, 1]), 1.0),
((Point([0, 0]), Point([2, 0])), Point([-1, -1]), np.sqrt(2)),
((Point([0, 0]), Point([2, 0])), Point([3, 1]), np.sqrt(2)),
((Point([0, 0]), Point([2, 2])), Point([0, 2]), np.sqrt(2)),
((Point([0, 0]), Point([2, 2])), Point([-2, 0]), 2.0),
((Point([0, 0]), Point([2, 2])), Point([-1, -1]), np.sqrt(2)),
((Point([0, 0]), Point([2, 2])), Point([1, 0]), np.sqrt(2)/2),
((Point([0, 0]), Point([2, 2])), Point([3, 3]), np.sqrt(2)),
((Point([0, 0]), Point([2, 2])), Point([3, 2]), 1.0),
((Point([0, 0]), Point([2, 2])), Point([3, 1]), np.sqrt(2)),
((Point([0, 0]), Point([2, 2])), Point([1, 1]), 0.0),
# 3d
((Point([0, 0, 0]), Point([2, 0, 0])), Point([1, 1, 0]), 1.0),
((Point([0, 0, 0]), Point([2, 2, 2])), Point([1, 1, 1]), 0.0),
((Point([0, 0, 0]), Point([2, 2, 2])), Point([3, 3, 1]), np.sqrt(3)),
((Point([0, 0, 0]), Point([2, 2, 2])), Point([3, 3, 3]), np.sqrt(3)),
((Point([0, 0, 0]), Point([2, 2, 2])), Point([-1, -1, -1]), np.sqrt(3)),
])
def test_distance_point(segment_points, point, expected_distance):
segment = Segment(*segment_points)
assert segment.distance(point) == pytest.approx(expected_distance)
@pytest.mark.parametrize('points1, points2, expected_distance', [
# 2d
((Point([0, 0]), Point([2, 0])), (Point([0, 2]), Point([2, 2])), 2.0),
((Point([0, 0]), Point([2, 0])), (Point([1, 1]), Point([3, 1])), 1.0),
((Point([0, 0]), Point([2, 0])), (Point([-1, 0]), Point([3, 0])), 0.0),
((Point([1, 1]), Point([2, 2])), (Point([1, 2]), Point([2, 1])), 0.0),
((Point([2, 0]), Point([2, 2])), (Point([1, 1]), Point([3, 1])), 0.0),
((Point([0, 0]), Point([1, 0])), (Point([2, 1]), Point([3, 1])), np.sqrt(2)),
((Point([0, 0]), Point([1, 0])), (Point([-2, -1]), Point([-1, -1])), np.sqrt(2)),
# 3d
((Point([2, 2, 0]), Point([2, 2, 2])), (Point([1, 3, 1]), Point([3, 3, 1])), 1.0),
((Point([2, 2, 0]), Point([2, 2, 2])), (Point([1, 2, 1]), Point([3, 2, 1])), 0.0),
((Point([0, 0, 0]), Point([1, 1, 1])), (Point([2, 2, 1]), Point([3, 3, 2])), np.sqrt(2)),
])
def test_distance_segment(points1, points2, expected_distance):
segment1 = Segment(*points1)
segment2 = Segment(*points2)
seg = segment1.shortest_segment(segment2)
assert seg.seglen == pytest.approx(expected_distance)
def test_to_curve():
segment = Segment(Point([1, 1]), Point([2, 2]))
assert segment.to_curve() == Curve([(1, 2), (1, 2)])
def test_reverse():
segment | |
S3Importer.define_upload_table()
S3ImportJob.define_job_table()
S3ImportJob.define_item_table()
# -------------------------------------------------------------------------
# Resource components
#--------------------------------------------------------------------------
def add_component(self, table, **links):
"""
Defines a component.
@param table: the component table or table name
@param links: the component links
"""
db = current.db
if not links:
return
if hasattr(table, "_tablename"):
tablename = table._tablename
else:
tablename = table
prefix, name = tablename.split("_", 1)
for primary in links:
hooks = self.components.get(primary, Storage())
l = links[primary]
if not isinstance(l, (list, tuple)):
l = [l]
for link in l:
if link is None or isinstance(link, str):
alias = name
pkey = None
fkey = link
linktable = None
lkey = None
rkey = None
actuate = None
autodelete = False
autocomplete = None
values = None
multiple = True
else:
alias = link.get("name", name)
joinby = link.get("joinby", None)
if joinby is None:
continue
linktable = link.get("link", None)
if hasattr(linktable, "_tablename"):
linktable = linktable._tablename
pkey = link.get("pkey", None)
if linktable is None:
lkey = None
rkey = None
fkey = joinby
else:
lkey = joinby
rkey = link.get("key", None)
if not rkey:
continue
fkey = link.get("fkey", None)
actuate = link.get("actuate", None)
autodelete = link.get("autodelete", False)
autocomplete = link.get("autocomplete", None)
values = link.get("values", None)
multiple = link.get("multiple", True)
component = Storage(tablename=tablename,
pkey=pkey,
fkey=fkey,
linktable=linktable,
lkey=lkey,
rkey=rkey,
actuate=actuate,
autodelete=autodelete,
autocomplete=autocomplete,
values=values,
multiple=multiple)
hooks[alias] = component
self.components[primary] = hooks
return
# -------------------------------------------------------------------------
def get_component(self, table, name):
"""
Finds a component definition.
@param table: the primary table or table name
@param name: the component name (without prefix)
"""
components = self.get_components(table, names=name)
if name in components:
return components[name]
else:
return None
# -------------------------------------------------------------------------
def get_components(self, table, names=None):
"""
Finds components of a table
@param table: the table or table name
@param names: a list of components names to limit the search to,
None or empty list for all available components
"""
db = current.db
load = S3Model.table
hooks = Storage()
single = False
if hasattr(table, "_tablename"):
tablename = table._tablename
else:
tablename = table
table = load(tablename)
if table is None:
# Primary table not defined
return None
if isinstance(names, str):
single = True
names = [names]
h = self.components.get(tablename, None)
if h:
self.__get_hooks(hooks, h, names=names)
if not single or single and not len(hooks):
supertables = self.get_config(tablename, "super_entity")
if supertables:
if not isinstance(supertables, (list, tuple)):
supertables = [supertables]
for s in supertables:
if isinstance(s, str):
s = load(s)
if s is None:
continue
h = self.components.get(s._tablename, None)
if h:
self.__get_hooks(hooks, h, names=names, supertable=s)
components = Storage()
for alias in hooks:
hook = hooks[alias]
tn = hook.tablename
lt = hook.linktable
ctable = load(tn)
if ctable is None:
continue
if lt:
ltable = load(lt)
if ltable is None:
continue
else:
ltable = None
prefix, name = tn.split("_", 1)
component = Storage(values=hook.values,
multiple=hook.multiple,
tablename=tn,
table=ctable,
prefix=prefix,
name=name,
alias=alias)
if hook.supertable is not None:
joinby = hook.supertable._id.name
else:
joinby = hook.fkey
if hook.pkey is None:
if hook.supertable is not None:
component.pkey = joinby
else:
component.pkey = table._id.name
else:
component.pkey = hook.pkey
if ltable is not None:
if hook.actuate:
component.actuate = hook.actuate
else:
component.actuate = "link"
component.linktable = ltable
if hook.fkey is None:
component.fkey = ctable._id.name
else:
component.fkey = hook.fkey
component.lkey = hook.lkey
component.rkey = hook.rkey
component.autocomplete = hook.autocomplete
component.autodelete = hook.autodelete
else:
component.linktable = None
component.fkey = hook.fkey
component.lkey = component.rkey = None
component.actuate = None
component.autocomplete = None
component.autodelete = None
components[alias] = component
return components
# -------------------------------------------------------------------------
def has_components(self, table):
"""
Checks whether there are components defined for a table
@param table: the table or table name
"""
db = current.db
load = S3Model.table
hooks = Storage()
if hasattr(table, "_tablename"):
tablename = table._tablename
else:
tablename = table
table = load(tablename)
if table is None:
# Primary table not defined
return False
h = self.components.get(tablename, None)
if h:
self.__get_hooks(hooks, h)
if len(hooks):
return True
supertables = self.get_config(tablename, "super_entity")
if supertables:
if not isinstance(supertables, (list, tuple)):
supertables = [supertables]
for s in supertables:
if isinstance(s, str):
s = S3Model.table(s)
if s is None:
continue
h = self.components.get(s._tablename, None)
if h:
self.__get_hooks(hooks, h, supertable=s)
if len(hooks):
return True
return False
# -------------------------------------------------------------------------
def __get_hooks(self, components, hooks, names=None, supertable=None):
"""
DRY Helper method to filter component hooks
"""
db = current.db
for alias in hooks:
if alias in components:
continue
if names is not None and alias not in names:
continue
hook = hooks[alias]
hook["supertable"] = supertable
components[alias] = hook
# -------------------------------------------------------------------------
# Resource Methods
# -------------------------------------------------------------------------
def set_method(self, prefix, name,
component_name=None,
method=None,
action=None):
"""
Adds a custom method for a resource or component
@param prefix: prefix of the resource name (=module name)
@param name: name of the resource (=without prefix)
@param component_name: name of the component
@param method: name of the method
@param action: function to invoke for this method
"""
if not method:
raise SyntaxError("No method specified")
tablename = "%s_%s" % (prefix, name)
if not component_name:
if method not in self.methods:
self.methods[method] = {}
self.methods[method][tablename] = action
else:
if method not in self.cmethods:
self.cmethods[method] = {}
if component_name not in self.cmethods[method]:
self.cmethods[method][component_name] = {}
self.cmethods[method][component_name][tablename] = action
# -------------------------------------------------------------------------
def get_method(self, prefix, name, component_name=None, method=None):
"""
Retrieves a custom method for a resource or component
@param prefix: prefix of the resource name (=module name)
@param name: name of the resource (=without prefix)
@param component_name: name of the component
@param method: name of the method
"""
if not method:
return None
tablename = "%s_%s" % (prefix, name)
if not component_name:
if method in self.methods and tablename in self.methods[method]:
return self.methods[method][tablename]
else:
return None
else:
if method in self.cmethods and \
component_name in self.cmethods[method] and \
tablename in self.cmethods[method][component_name]:
return self.cmethods[method][component_name][tablename]
else:
return None
# -------------------------------------------------------------------------
# Resource configuration
# -------------------------------------------------------------------------
def configure(self, tablename, **attr):
"""
Update the extra configuration of a table
@param tablename: the name of the table
@param attr: dict of attributes to update
"""
try:
cfg = self.config.get(tablename, Storage())
except:
if hasattr(tablename, "_tablename"):
tablename = tablename._tablename
cfg = self.config.get(tablename, Storage())
else:
return
cfg.update(attr)
self.config[tablename] = cfg
# -------------------------------------------------------------------------
def get_config(self, tablename, key, default=None):
"""
Reads a configuration attribute of a resource
@param tablename: the name of the resource DB table
@param key: the key (name) of the attribute
"""
if tablename in self.config:
return self.config[tablename].get(key, default)
else:
return default
# -------------------------------------------------------------------------
def clear_config(self, tablename, *keys):
"""
Removes configuration attributes of a resource
@param table: the resource DB table
@param keys: keys of attributes to remove (maybe multiple)
"""
if not keys:
if tablename in self.config:
del self.config[tablename]
else:
if tablename in self.config:
for k in keys:
if k in self.config[tablename]:
del self.config[tablename][k]
# -------------------------------------------------------------------------
# Super-Entity API
# -------------------------------------------------------------------------
def super_entity(self, tablename, key, types, *fields, **args):
"""
Define a super-entity table
@param tablename: the tablename
@param key: name of the primary key
@param types: a dictionary of instance types
@param fields: any shared fields
@param args: table arguments (e.g. migrate)
"""
# postgres workaround
if current.db._dbname == "postgres":
sequence_name = "%s_%s_Seq" % (tablename, key)
else:
sequence_name = None
table = current.db.define_table(tablename,
Field(key, "id",
readable=False,
writable=False),
Field("deleted", "boolean",
readable=False,
writable=False,
default=False),
Field("instance_type",
readable=False,
writable=False),
Field("uuid", length=128,
readable=False,
writable=False),
sequence_name=sequence_name,
*fields, **args)
table.instance_type.represent = lambda opt: types.get(opt, opt)
return table
# -------------------------------------------------------------------------
@staticmethod
def super_key(supertable, default=None):
"""
Get the name of the key for a super-entity
@param supertable: the super-entity table
"""
if supertable is None and default:
return default
try:
return supertable._id.name
except AttributeError:
pass
raise SyntaxError("No id-type key found in %s" % supertable._tablename)
# -------------------------------------------------------------------------
def super_link(self, name, supertable,
label=None,
comment=None,
represent=None,
orderby=None,
sort=True,
filterby=None,
filter_opts=None,
groupby=None,
widget=None,
empty=True,
default=DEFAULT,
ondelete="CASCADE",
readable=False,
writable=False):
"""
Get a foreign key field for a super-entity
@param supertable: the super-entity table
@param label: label for the field
@param comment: comment for the field
@param readable: set the field readable
@param represent: set a representation function for the field
"""
if isinstance(supertable, str):
supertable = S3Model.table(supertable)
if supertable is None:
if name is not None:
return | |
<reponame>jskinn/arvet
import unittest
import typing
import numpy as np
import transforms3d as tf3d
import arvet.util.transform as tf
from arvet.util.test_helpers import ExtendedTestCase
import arvet.util.trajectory_helpers as th
class TestZeroTrajectory(ExtendedTestCase):
def test_zero_trajectory_starts_result_at_zero(self):
traj = create_trajectory(start_location=np.array([100, 200, 300]))
result = th.zero_trajectory(traj)
first_pose = result[min(result.keys())]
self.assertEqual(tf.Transform(), first_pose)
def test_zero_trajectory_keeps_same_timestamps(self):
traj = create_trajectory(start_location=np.array([100, 200, 300]))
result = th.zero_trajectory(traj)
self.assertEqual(set(traj.keys()), set(result.keys()))
def test_zero_trajectory_preserves_relative_motion(self):
traj = create_trajectory(start_location=np.array([100, 200, 300]))
original_motions = th.trajectory_to_motion_sequence(traj)
result = th.zero_trajectory(traj)
result_motions = th.trajectory_to_motion_sequence(result)
self.assertEqual(set(original_motions.keys()), set(result_motions.keys()))
for time in original_motions.keys():
self.assertNPClose(original_motions[time].location, result_motions[time].location)
self.assertNPClose(original_motions[time].rotation_quat(True), result_motions[time].rotation_quat(True))
class TestFindTrajectoryScale(unittest.TestCase):
def test_returns_zero_for_empty_trajectory(self):
self.assertEqual(0, th.find_trajectory_scale({}))
def test_returns_zero_for_single_pose_trajectory(self):
self.assertEqual(0, th.find_trajectory_scale({0: tf.Transform()}))
def test_returns_speed_for_constant_speed(self):
random = np.random.RandomState(16492)
speed = random.uniform(10, 100)
traj = {
float(time): tf.Transform(location=(speed * time, 0, 0))
for time in range(100)
}
result = th.find_trajectory_scale(traj)
self.assertAlmostEqual(speed, result, places=13)
def test_returns_mean_speed(self):
random = np.random.RandomState(56634)
speeds = random.uniform(10, 100, 100)
traj = {0: tf.Transform()}
prev_location = np.zeros(3)
for time in range(speeds.shape[0]):
new_location = prev_location + np.array([speeds[time], 0, 0])
traj[float(time) + 1] = tf.Transform(location=new_location)
prev_location = new_location
result = th.find_trajectory_scale(traj)
self.assertAlmostEqual(float(np.mean(speeds)), result, places=13)
def test_is_independent_of_direction_or_orientation(self):
random = np.random.RandomState(15646)
speeds = random.uniform(10, 100, 100)
traj1 = {0: tf.Transform()}
traj2 = {0: tf.Transform()}
prev_location = np.zeros(3)
for time in range(speeds.shape[0]):
direction = random.uniform(-1, 1, 3)
direction = direction / np.linalg.norm(direction)
new_location = prev_location + speeds[time] * direction
traj1[float(time) + 1] = tf.Transform(location=new_location)
traj2[float(time) + 1] = tf.Transform(location=new_location, rotation=random.uniform(-1, 1, 4))
prev_location = new_location
no_motion_ref = th.find_trajectory_scale(traj1)
result = th.find_trajectory_scale(traj2)
self.assertAlmostEqual(float(np.mean(speeds)), result, places=13)
self.assertEqual(no_motion_ref, result)
def test_scales_with_time(self):
random = np.random.RandomState(21273)
speeds = random.uniform(10, 100, 100)
times = np.abs(random.uniform(0.1, 3, 100))
traj = {0: tf.Transform()}
prev_location = np.zeros(3)
prev_time = 0
for idx in range(speeds.shape[0]):
direction = random.uniform(-1, 1, 3)
direction = direction / np.linalg.norm(direction)
new_location = prev_location + speeds[idx] * times[idx] * direction
traj[prev_time + times[idx]] = tf.Transform(location=new_location, rotation=random.uniform(-1, 1, 4))
prev_location = new_location
prev_time = prev_time + times[idx]
result = th.find_trajectory_scale(traj)
self.assertAlmostEqual(float(np.mean(speeds)), result, places=13)
def test_handles_none(self):
traj = {
0: tf.Transform(),
1: tf.Transform(location=(10, 0, 0)),
2: None,
3: tf.Transform(location=(30, 0, 0)),
4: tf.Transform(location=(40, 0, 0)),
5: None,
6: tf.Transform(location=(60, 0, 0)),
7: None
}
self.assertEqual(10, th.find_trajectory_scale(traj))
def test_returns_zero_for_trajectory_entirely_none(self):
result = th.find_trajectory_scale({0.334 * idx: None for idx in range(100)})
self.assertEqual(0, result)
class TestRescaleTrajectory(ExtendedTestCase):
def test_does_nothing_to_empty_trajectory(self):
self.assertEqual({}, th.rescale_trajectory({}, 3))
def test_does_nothing_to_single_pose_trajectory(self):
self.assertEqual({0: tf.Transform()}, th.rescale_trajectory({0: tf.Transform()}, 3))
def test_does_nothing_to_a_trajectory_entirely_none(self):
traj = {0.2233 * idx: None for idx in range(10)}
result = th.rescale_trajectory(traj, 10)
self.assertEqual(traj, result)
def test_changes_trajectory_scale(self):
traj = create_trajectory(seed=64075)
result = th.rescale_trajectory(traj, 10)
self.assertAlmostEqual(10, th.find_trajectory_scale(result), places=14)
def test_preserves_timestamps(self):
traj = create_trajectory(seed=55607)
result = th.rescale_trajectory(traj, 10)
self.assertEqual(set(traj.keys()), set(result.keys()))
def test_preserves_motion_direction(self):
traj = create_trajectory(seed=23377)
base_motions = th.trajectory_to_motion_sequence(traj)
result = th.rescale_trajectory(traj, 10)
result_motions = th.trajectory_to_motion_sequence(result)
for time in base_motions.keys():
if not np.array_equal(base_motions[time].location, np.zeros(3)):
base_direction = base_motions[time].location / np.linalg.norm(base_motions[time].location)
result_direction = result_motions[time].location / np.linalg.norm(result_motions[time].location)
self.assertNPClose(base_direction, result_direction)
def test_preserves_orientations(self):
traj = create_trajectory(seed=56604)
result = th.rescale_trajectory(traj, 10)
for time in traj.keys():
self.assertNPEqual(traj[time].rotation_quat(True), result[time].rotation_quat(True))
def test_scales_uniformly(self):
traj = create_trajectory(seed=47243)
base_motions = th.trajectory_to_motion_sequence(traj)
base_distances = {
time: np.linalg.norm(motion.location)
for time, motion in base_motions.items()
}
result = th.rescale_trajectory(traj, 10)
result_motions = th.trajectory_to_motion_sequence(result)
result_distances = {
time: np.linalg.norm(motion.location)
for time, motion in result_motions.items()
}
# For each pair of times, asssert that their relative scale remains the same
for time1 in base_motions.keys():
for time2 in base_motions.keys():
if time2 > time1:
self.assertAlmostEqual(
base_distances[time1] / base_distances[time2],
result_distances[time1] / result_distances[time2],
places=10
)
def test_handles_trajectory_containing_none(self):
traj = {
0: tf.Transform(),
1: tf.Transform(location=(10, 0, 0)),
2: None,
3: tf.Transform(location=(30, 0, 0)),
4: tf.Transform(location=(40, 0, 0)),
5: None,
6: tf.Transform(location=(60, 0, 0)),
7: None
}
result = th.rescale_trajectory(traj, 1)
self.assertEqual({
0: tf.Transform(),
1: tf.Transform(location=(1, 0, 0)),
2: None,
3: tf.Transform(location=(3, 0, 0)),
4: tf.Transform(location=(4, 0, 0)),
5: None,
6: tf.Transform(location=(6, 0, 0)),
7: None
}, result)
class TestTrajectoryToMotionSequence(unittest.TestCase):
def test_works_on_empty_trajectory(self):
self.assertEqual({}, th.trajectory_to_motion_sequence({}))
def test_works_on_single_pose_trajectory(self):
self.assertEqual({}, th.trajectory_to_motion_sequence({0: tf.Transform()}))
def test_skips_first_timestamp_due_to_fencepost_error(self):
traj = create_trajectory(seed=64577)
motions = th.trajectory_to_motion_sequence(traj)
self.assertEqual(len(traj) - 1, len(motions))
traj_times = set(traj.keys())
motion_times = set(motions.keys())
self.assertNotIn(min(traj_times), motion_times)
self.assertEqual({time for time in traj_times if time != min(traj_times)}, motion_times)
def test_contains_sequence_of_relative_motions(self):
traj = create_trajectory(seed=58690)
motions = th.trajectory_to_motion_sequence(traj)
prev_time = 0
for time in sorted(motions.keys()):
self.assertEqual(traj[prev_time].find_relative(traj[time]), motions[time])
prev_time = time
class TestComputeAverageTrajectory(ExtendedTestCase):
def test_produces_average_location(self):
# this is 5 sequences of 10 3-vector locations
locations = np.random.normal(0, 4, (10, 5, 3))
locations += np.array([[[idx, 25 - idx * idx, 3]] for idx in range(10)])
# Flatten across the 5 sequences, giving 10 3-vectors
mean_locations = np.mean(locations, axis=1)
# Find the mean trajectory
trajectories = [
{
time: tf.Transform(location=locations[time, traj_idx, :])
for time in range(10)
}
for traj_idx in range(5)
]
mean_trajectory = th.compute_average_trajectory(trajectories)
# Check the locations are averaged
for time in range(10):
self.assertIn(time, mean_trajectory)
self.assertNPEqual(mean_locations[time], mean_trajectory[time].location)
def test_produces_average_orientation(self):
# this is 5 sequences of 10 quaternion orientations that are close together
orientations = [
[
tf3d.quaternions.axangle2quat(
(1, time_idx, 3),
(time_idx + np.random.uniform(-0.1, 0.1)) * np.pi / 17
)
for _ in range(5)
]
for time_idx in range(10)
]
# Flatten across the 5 sequences, giving 10 quaternions
# We use our custom quat_mean, because average orientations are hard
mean_orientations = [
tf.quat_mean(orientations_at_time)
for orientations_at_time in orientations
]
# Find the mean trajectory
trajectories = [
{
time_idx: tf.Transform(location=(time_idx, 0, 0),
rotation=orientations[time_idx][traj_idx], w_first=True)
for time_idx in range(10)
}
for traj_idx in range(5)
]
mean_trajectory = th.compute_average_trajectory(trajectories)
# Check the locations are averaged
for time_idx in range(10):
self.assertIn(time_idx, mean_trajectory)
self.assertNPEqual(mean_orientations[time_idx], mean_trajectory[time_idx].rotation_quat(w_first=True))
def test_associates_on_median_times(self):
# 5 sequences of times, each length 10, randomly varied by 0.05, which is as much variance as this will handle
times = np.random.uniform(-0.05, 0.05, (10, 5))
times += np.arange(10).reshape(10, 1) # Make time increase linearly
# Find the median times
median_times = np.median(times, axis=1)
# this is 5 sequences of 10 3-vector locations
locations = np.random.normal(0, 4, (10, 5, 3))
locations += np.array([[[idx, 25 - idx * idx, 3]] for idx in range(10)])
# Flatten across the 5 sequences, giving 10 3-vectors
mean_locations = np.mean(locations, axis=1)
# Find the mean trajectory
trajectories = [
{
times[time_idx, traj_idx]: tf.Transform(location=locations[time_idx, traj_idx, :])
for time_idx in range(10)
}
for traj_idx in range(5)
]
mean_trajectory = th.compute_average_trajectory(trajectories)
# Check the locations are averaged
for time_idx in range(10):
self.assertIn(median_times[time_idx], mean_trajectory)
self.assertNPEqual(mean_locations[time_idx], mean_trajectory[median_times[time_idx]].location)
def test_handles_missing_poses(self):
# Choose different times in different trajectories where the pose will be missing
missing = [[(time_idx + 1) * (traj_idx + 1) % 12 == 0 for traj_idx in range(5)] for time_idx in range(10)]
# this is 5 sequences of 10 3-vector locations
locations = np.random.normal(0, 4, (10, 5, 3))
locations += np.array([[[idx, 25 - idx * idx, 3]] for idx in range(10)])
# Flatten across the 5 sequences, giving 10 3-vectors
# We have to leave the missing poses out of the mean
mean_locations = [
np.mean([
locations[time_idx, traj_idx, :]
for traj_idx in range(5)
if not missing[time_idx][traj_idx]
], axis=0)
for time_idx in range(10)
]
# Find the mean trajectory
trajectories = [
{
time_idx: tf.Transform(location=locations[time_idx, traj_idx, :])
for time_idx in range(10)
if not missing[time_idx][traj_idx]
}
for traj_idx in range(5)
]
mean_trajectory = th.compute_average_trajectory(trajectories)
# Check the locations are averaged
for time in range(10):
self.assertIn(time, mean_trajectory)
self.assertNPEqual(mean_locations[time], mean_trajectory[time].location)
def test_handles_poses_being_none(self):
# Choose different times in different trajectories where the pose will be missing
lost_start = 3
lost_end = 7
missing = [[(time_idx + 1) * (traj_idx + 1) % 12 == 0 for traj_idx in range(5)] for time_idx in range(10)]
# this is 5 sequences of 10 3-vector locations
locations = np.random.normal(0, 4, (10, 5, 3))
locations += np.array([[[idx, 25 - idx * idx, 3]] for idx in range(10)])
# Flatten across the 5 sequences, giving 10 3-vectors
# We have to leave the missing poses out of the mean
mean_locations = [
np.mean([
locations[time_idx, traj_idx, :]
for traj_idx in range(5)
if not lost_start + traj_idx <= time_idx < lost_end
], axis=0)
for time_idx in range(10)
]
# Find the mean trajectory
trajectories = [
{
time_idx: tf.Transform(location=locations[time_idx, traj_idx, :])
if not lost_start + traj_idx <= time_idx < lost_end else None
for time_idx in range(10)
}
for traj_idx in range(5)
]
mean_trajectory = th.compute_average_trajectory(trajectories)
# Check the locations are averaged
for time in range(10):
self.assertIn(time, | |
<filename>ansible/roles/test/files/ptftests/sad_path.py
import datetime
import ipaddress
import re
import time
from arista import Arista
from device_connection import DeviceConnection
class SadTest(object):
def __init__(self, oper_type, vm_list, portchannel_ports, vm_dut_map, test_args, vlan_ports, ports_per_vlan):
self.oper_type = oper_type
self.vm_list = vm_list
self.portchannel_ports = portchannel_ports
self.vm_dut_map = vm_dut_map
self.test_args = test_args
self.vlan_ports = vlan_ports
self.ports_per_vlan = ports_per_vlan
self.fails_vm = set()
self.fails_dut = set()
self.log = []
self.shandle = SadOper(self.oper_type, self.vm_list, self.portchannel_ports, self.vm_dut_map, self.test_args, self.vlan_ports, self.ports_per_vlan)
def setup(self):
self.shandle.sad_setup(is_up=False)
return self.shandle.retreive_test_info(), self.shandle.retreive_logs()
def route_setup(self):
self.shandle.modify_routes()
return self.shandle.retreive_logs()
def verify(self, pre_check=True, inboot=False):
if 'vlan' in self.oper_type:
self.shandle.verify_vlan_port_state(pre_check=pre_check)
elif 'routing' in self.oper_type:
self.shandle.verify_route_add(pre_check=pre_check, inboot=inboot)
else:
self.shandle.sad_bgp_verify()
if 'lag' in self.oper_type:
self.shandle.sad_lag_verify(pre_check=pre_check)
return self.shandle.retreive_logs()
def revert(self):
self.shandle.sad_setup()
return self.shandle.retreive_logs()
class SadPath(object):
def __init__(self, oper_type, vm_list, portchannel_ports, vm_dut_map, test_args, vlan_ports, ports_per_vlan):
self.oper_type = ''
self.memb_cnt = 0
self.cnt = 1 if 'routing' not in oper_type else len(vm_list)
self.ip_cnt = 1
self.vm_list = vm_list
self.portchannel_ports = portchannel_ports
self.vm_dut_map = vm_dut_map
self.test_args = test_args
self.dut_connection = DeviceConnection(test_args['dut_hostname'], test_args['dut_username'], password=test_args['dut_password'])
self.vlan_ports = vlan_ports
self.ports_per_vlan = ports_per_vlan
self.vlan_if_port = self.test_args['vlan_if_port']
self.neigh_vms = []
self.neigh_names = dict()
self.vm_handles = dict()
self.neigh_bgps = dict()
self.dut_bgps = dict()
self.log = []
self.fails = dict()
self.fails['dut'] = set()
self.tot_memb_cnt = 0
self.memb_index = 0
self.if_port = []
self.down_vlan_info = []
self.bp_ip = None
self.bp_ip6 = None
self.extract_oper_info(oper_type)
self.extract_nexthops()
def extract_nexthops(self):
if self.test_args['nexthop_ips']:
self.bp_ip = str(self.test_args['nexthop_ips'][0])
self.bp_ip6 = str(self.test_args['nexthop_ips'][1])
def extract_oper_info(self, oper_type):
if oper_type and ':' in oper_type:
temp = oper_type.split(':')
self.oper_type = temp[0]
# get number of VMs where the preboot sad pass oper needs to be done. For vlan_member case,
# this will be the number of down vlan ports
if 'routing' not in oper_type:
self.cnt = int(temp[1])
if len(temp) > 2:
# get the number of lag members in a portchannel that should be brought down
self.memb_cnt = int(temp[-1])
else:
# for sad operation during reboot, all VMs should be included in the cnt
self.cnt = len(self.vm_list)
self.ip_cnt = int(temp[-1])
else:
self.oper_type = oper_type
def select_vm(self):
self.vm_list.sort()
vm_len = len(self.vm_list)
# use the day of the month to select start VM from the list for the sad pass operation
# neigh_vms list will contain cnt number of VMs starting from the start VM. vm_list will have the rest of the VMs
vm_index = datetime.datetime.now().day % vm_len if vm_len > 0 else 0
exceed_len = vm_index + self.cnt - vm_len
if exceed_len <= 0:
self.neigh_vms.extend(self.vm_list[vm_index:vm_index+self.cnt])
self.vm_list = self.vm_list[0:vm_index] + self.vm_list[vm_index+self.cnt:]
else:
self.neigh_vms.extend(self.vm_list[vm_index:])
self.neigh_vms.extend(self.vm_list[0:exceed_len])
self.vm_list = self.vm_list[exceed_len:exceed_len + vm_len - self.cnt]
def get_neigh_name(self):
for key in self.vm_dut_map:
for neigh_vm in self.neigh_vms:
if self.vm_dut_map[key]['mgmt_addr'] == neigh_vm:
self.neigh_names[neigh_vm] = key # VM address to name mapping
break
def down_neigh_port(self):
# extract ptf ports for the selected VMs and mark them down
for neigh_name in self.neigh_names.values():
for port in self.vm_dut_map[neigh_name]['ptf_ports']:
self.portchannel_ports.remove(port)
def vm_connect(self):
for neigh_vm in self.neigh_vms:
self.vm_handles[neigh_vm] = Arista(neigh_vm, None, self.test_args)
self.vm_handles[neigh_vm].connect()
def __del__(self):
self.vm_disconnect()
def vm_disconnect(self):
for vm in self.vm_handles:
self.vm_handles[vm].disconnect()
def select_member(self):
# select index of lag member to put down
if self.tot_memb_cnt != 0:
self.memb_index = datetime.datetime.now().day % self.tot_memb_cnt
def select_vlan_ports(self):
self.if_port = sorted(self.vlan_if_port, key=lambda tup: tup[0])
vlan_len = len(self.if_port)
vlan_index = datetime.datetime.now().day % vlan_len if vlan_len > 0 else 0
exceed_len = vlan_index + self.cnt - vlan_len
if exceed_len <= 0:
self.down_vlan_info.extend(self.if_port[vlan_index:vlan_index+self.cnt])
self.if_port = self.if_port[0:vlan_index] + self.if_port[vlan_index+self.cnt:]
else:
self.down_vlan_info.extend(self.if_port[vlan_index:])
self.down_vlan_info.extend(self.if_port[0:exceed_len])
self.if_port = self.if_port[exceed_len:exceed_len + vlan_len - self.cnt]
def down_vlan_ports(self):
# extract the selected vlan ports and mark them down
for item in self.down_vlan_info:
self.vlan_ports = [port for port in self.vlan_ports if port != item[1]]
for vlan in self.ports_per_vlan:
self.ports_per_vlan[vlan].remove(item[1])
def setup(self):
self.select_vm()
self.get_neigh_name()
self.vm_connect()
# bring down the VM PTF ports only for preboot sad oper
if 'routing' not in self.oper_type:
self.down_neigh_port()
# decide if its all member down or few members down for lag member oper type
if 'member' in self.oper_type:
self.tot_memb_cnt = len(self.vm_dut_map[self.neigh_names.values()[0]]['dut_ports'])
if self.memb_cnt == 0:
self.memb_cnt = self.tot_memb_cnt
if self.tot_memb_cnt != self.memb_cnt:
self.select_member()
for vm in self.vm_handles:
self.neigh_bgps[vm], self.dut_bgps[vm] = self.vm_handles[vm].get_bgp_info()
self.fails[vm] = set()
self.log.append('Neighbor AS: %s' % self.neigh_bgps[vm]['asn'])
self.log.append('BGP v4 neighbor: %s' % self.neigh_bgps[vm]['v4'])
self.log.append('BGP v6 neighbor: %s' % self.neigh_bgps[vm]['v6'])
self.log.append('DUT BGP v4: %s' % self.dut_bgps[vm]['v4'])
self.log.append('DUT BGP v6: %s' % self.dut_bgps[vm]['v6'])
def retreive_test_info(self):
return self.vm_list, self.portchannel_ports, self.neigh_vms, self.vlan_ports, self.ports_per_vlan
def retreive_logs(self):
return self.log, self.fails
class SadOper(SadPath):
def __init__(self, oper_type, vm_list, portchannel_ports, vm_dut_map, test_args, vlan_ports, ports_per_vlan):
super(SadOper, self).__init__(oper_type, vm_list, portchannel_ports, vm_dut_map, test_args, vlan_ports, ports_per_vlan)
self.dut_needed = dict()
self.lag_members_down = dict()
self.neigh_lag_members_down = dict()
self.neigh_lag_state = None
self.po_neigh_map = dict()
self.msg_prefix = ['Postboot', 'Preboot']
self.memb_str = 'member' if 'member' in self.oper_type else ''
def populate_bgp_state(self):
[self.dut_needed.setdefault(vm, self.dut_bgps[vm]) for vm in self.neigh_vms]
if self.oper_type == 'neigh_bgp_down':
self.neigh_bgps['changed_state'] = 'down'
self.dut_bgps['changed_state'] = 'Active'
[self.dut_needed.update({vm:None}) for vm in self.neigh_vms]
elif self.oper_type == 'dut_bgp_down':
self.neigh_bgps['changed_state'] = 'Active,OpenSent'
self.dut_bgps['changed_state'] = 'Idle'
elif 'neigh_lag' in self.oper_type:
# on the DUT side, bgp states are different pre and post boot. hence passing multiple values
self.neigh_bgps['changed_state'] = 'Idle'
self.dut_bgps['changed_state'] = 'Connect,Active,Idle'
elif 'dut_lag' in self.oper_type:
self.neigh_bgps['changed_state'] = 'Idle'
self.dut_bgps['changed_state'] = 'Active,Connect,Idle'
def sad_setup(self, is_up=True):
self.log = []
if not is_up:
if 'vlan' in self.oper_type:
self.select_vlan_ports()
self.down_vlan_ports()
else:
self.setup()
self.populate_bgp_state()
if 'lag' in self.oper_type:
self.populate_lag_state()
elif 'routing' in self.oper_type:
if self.bp_ip and self.bp_ip6:
self.generate_ips()
self.build_route_config()
neigh_rt_v4_info, ret = self.get_bgp_route_cnt(is_up=is_up)
neigh_rt_v6_info, ret1 = self.get_bgp_route_cnt(is_up=is_up, v4=False)
if not ret and not ret1:
self.build_neigh_rt_map(neigh_rt_v4_info + neigh_rt_v6_info)
if 'routing' in self.oper_type:
if self.bp_ip:
for vm in self.neigh_vms:
if not is_up:
# Need to add the routes which will be removed during the the boot
if 'routing_del' in self.oper_type:
self.log.append('Adding %d routes from VM %s' % (2 * self.ip_cnt, vm))
self.vm_handles[vm].change_bgp_route(self.route_cfg)
else:
self.log.append('Removing %d routes from VM %s' % (2 * self.ip_cnt, vm))
self.vm_handles[vm].change_bgp_route(self.no_route_cfg)
elif 'bgp' in self.oper_type:
self.log.append('BGP state change will be for %s' % ", ".join(self.neigh_vms))
if self.oper_type == 'neigh_bgp_down':
for vm in self.neigh_vms:
self.log.append('Changing state of AS %s to shut' % self.neigh_bgps[vm]['asn'])
self.vm_handles[vm].change_bgp_neigh_state(self.neigh_bgps[vm]['asn'], is_up=is_up)
elif self.oper_type == 'dut_bgp_down':
self.change_bgp_dut_state(is_up=is_up)
time.sleep(30)
elif 'lag' in self.oper_type:
self.log.append('LAG %s state change will be for %s' % (self.memb_str, ", ".join(self.neigh_vms)))
if 'neigh_lag' in self.oper_type:
for vm in self.neigh_vms:
# populate entity to be brought down on neigh end (portchannel/portchannel members)
if 'member' in self.oper_type:
down_intfs = self.neigh_lag_members_down[self.neigh_names[vm]]
else:
down_intfs = [self.vm_dut_map[self.neigh_names[vm]]['neigh_portchannel']]
self.log.append('Changing state of LAG %s %s to shut' % (self.memb_str, ", ".join(down_intfs)))
self.vm_handles[vm].change_neigh_intfs_state(down_intfs, is_up=is_up)
elif 'dut_lag' in self.oper_type:
self.change_dut_lag_state(is_up=is_up)
# wait for sometime for lag members state to sync
time.sleep(120)
elif 'vlan' in self.oper_type:
self.change_vlan_port_state(is_up=is_up)
def generate_ips(self):
'''
Generates the prefixes that will be added to the neighbor
'''
self.start_ip_pfx = '192.168.3.11/25'
self.start_ip6_pfx = '20d0:a808:0:80::/120'
self.ip_pfx_list = list(ipaddress.ip_network(u'%s' % self.start_ip_pfx).hosts())[0:self.ip_cnt]
self.ip_pfx_list = [str(ip) for ip in self.ip_pfx_list]
self.ip6_pfx_list = list(ipaddress.IPv6Network(u'%s' % self.start_ip6_pfx).hosts())[0:self.ip_cnt]
self.ip6_pfx_list = [str(ip) for ip in self.ip6_pfx_list]
def build_route_config(self):
# cmds for adding routes
self.route_cfg = []
# cmds for deleting routes
self.no_route_cfg = []
for cnt, ip in enumerate(zip(self.ip_pfx_list, self.ip6_pfx_list)):
# add route cfg
self.route_cfg.append('ip route %s/32 %s' % (ip[0], self.bp_ip))
self.route_cfg.append('ipv6 route %s/128 %s' % (ip[1], self.bp_ip6))
# remove route cfg
self.no_route_cfg.append('no ip route %s/32 %s' % (ip[0], self.bp_ip))
self.no_route_cfg.append('no ipv6 route %s/128 %s' % (ip[1], self.bp_ip6))
self.route_cfg.append('router bgp %s' % self.neigh_bgps[self.neigh_vms[-1]]['asn'])
self.route_cfg.append('redistribute static')
self.route_cfg.append('exit')
self.no_route_cfg.append('router bgp %s' % self.neigh_bgps[self.neigh_vms[-1]]['asn'])
self.no_route_cfg.append('redistribute static route-map PREPENDAS')
self.no_route_cfg.append('exit')
def get_bgp_route_cnt(self, is_up=True, v4=True):
# extract the neigh ip and current number of routes
if v4:
cmd = 'show ip bgp summary | sed \'1,/Neighbor/d;/^$/,$d;/^-/d\' | sed \'s/\s\s*/ /g\' | cut -d\' \' -f 1,10'
else:
cmd = 'show ipv6 bgp summary | sed \'1,/Neighbor/d;/^$/,$d;/^-/d\' | sed \'s/\s\s*/ /g\' | cut -d\' \' -f 1,10'
stdout, stderr, return_code = self.dut_connection.execCommand(cmd)
if return_code != 0:
self.fails['dut'].add('%s: Failed to retreive BGP route info from DUT' % self.msg_prefix[1 - is_up])
self.fails['dut'].add('%s: Return code: %d' % (self.msg_prefix[1 - is_up], return_code))
self.fails['dut'].add('%s: Stderr: %s' % (self.msg_prefix[1 - is_up], stderr))
return stdout, return_code
def build_neigh_rt_map(self, neigh_rt_info):
# construct neigh to route cnt map
self.neigh_rt_map = dict()
for line in neigh_rt_info:
key, value = line.strip().split(' ')
self.neigh_rt_map.update({key:value})
def verify_route_cnt(self, rt_incr, is_up=True, v4=True):
neigh_rt_info, ret = self.get_bgp_route_cnt(is_up=is_up, v4=v4)
if not ret:
for line in neigh_rt_info:
neigh_ip, rt_cnt | |
<filename>pysigview/widgets/signal_display.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 21 10:09:46 2017
Ing.,Mgr. (MSc.) <NAME>
Biomedical engineering
International Clinical Research Center
St. Anne's University Hospital in Brno
Czech Republic
&
Mayo systems electrophysiology lab
Mayo Clinic
200 1st St SW
Rochester, MN
United States
"""
# Std lib imports
from time import time, sleep
import pickle
# Third party imports
from PyQt5.QtWidgets import (QWidget, QHBoxLayout, QVBoxLayout, QPushButton,
QDialog, QFileDialog, QComboBox)
from PyQt5.QtCore import Qt, pyqtSignal, QSize, QThread
from vispy import scene, color
from vispy.scene import (LinearRegion, Image, Mesh, GridLines, Markers, Axis,
Line, Text)
from vispy.util.event import Event
import numpy as np
from scipy.io import savemat
from PIL import Image as pil_Image
# Local imports
from pysigview.cameras.signal_camera import SignalCamera
from pysigview.core.visual_container import SignalContainer
from pysigview.visuals.multiline_visual import Multiline
from pysigview.visuals.crosshair_visual import Crosshair
from pysigview.config.main import CONF
from pysigview.config.utils import get_home_dir
from pysigview.core import source_manager as sm
from pysigview.core.thread_workers import TimerWorker
from pysigview.core.source_manager import DataMap
from pysigview.utils.qthelpers import (hex2rgba, create_toolbutton,
create_plugin_layout)
class SignalDisplay(QWidget):
# Attributes - tehcnically this is not a plugin but has the same attributes
CONF_SECTION = 'signal_display'
CONFIGWIDGET_CLASS = None
IMG_PATH = 'images'
DISABLE_ACTIONS_WHEN_HIDDEN = True
shortcut = None
# Signals
data_map_changed = pyqtSignal(DataMap, name='data_map_changed')
plots_changed = pyqtSignal(name='plots_changed')
# TODO: This will send a signal to event eveluator in the future
input_recieved = pyqtSignal(Event, name='input_recieved')
canvas_resized = pyqtSignal(name='canvas_resized')
subview_changed = pyqtSignal(name='subview_changed')
stop_slide_worker = pyqtSignal()
start_slide_worker = pyqtSignal()
def __init__(self, parent):
super(SignalDisplay, self).__init__(parent)
# Covenience transcripts
self.main = self.parent()
# Widget behavior
self.setAcceptDrops(True)
# Plot variables
self.sample_map = []
self.plot_containers = []
# TODO: Selected signal plot used for data shifting, colors, etc
self.master_pc = None
self.master_plot = None # TODO - to be deleted
self.curr_pc = None
self.rect_rel_w_pos = None
self.rect_rel_h_pos = None
self.resize_flag = False
self.highlight_mode = False
self.measurement_mode = False
self.autoscale = False
self.disconts_processed = False
self.data_map = DataMap()
self.data_source = sm.ODS
self.data_array = None
# Widget layout
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
# These variables are assigned in channels plugin
self.hidden_channels = None
self.visible_channels = None
# Setup camera
self.camera = SignalCamera()
# Autoslide
self.slide_worker_stopped = True
# TODO: this should be i config
self.slide_worker = TimerWorker(1)
self.slide_worker_thread = QThread()
self.slide_worker.moveToThread(self.slide_worker_thread)
self.start_slide_worker.connect(self.slide_worker.run)
self.stop_slide_worker.connect(self.slide_worker.interupt)
self.slide_worker.time_passed.connect(self.autoslide)
self.slide_worker_thread.start()
# Vispy canvas
self.canvas = scene.SceneCanvas(show=True, keys='interactive',
parent=self,
bgcolor=CONF.get(self.CONF_SECTION,
'bgcolor'))
self.canvas.connect(self.on_key_press)
self.canvas.connect(self.on_key_release)
self.canvas.connect(self.on_mouse_move)
self.canvas.connect(self.on_mouse_press)
self.canvas.connect(self.on_mouse_release)
self.canvas.connect(self.on_mouse_wheel)
self.canvas.connect(self.on_resize)
# Timer to let the scene redraw if key is hit
self.event_time = time()
self.plot_update_done = False
# ??? Create two viewboxes - for labels and signals
self.signal_view = self.canvas.central_widget.add_view(
camera=self.camera)
self.cong_discontinuities = None
self.color_coding_mode = 0
self.color_palette = CONF.get(self.CONF_SECTION, 'color_palette')
self.update_cam_state()
# ----- Initial visuals operations-----
# TODO - Add crosshair color to CONF
# Measurements
ch_color = CONF.get(self.CONF_SECTION, 'init_crosshair_color')
self.crosshair = Crosshair(parent=self.signal_view.scene,
color=hex2rgba(ch_color))
m_color = CONF.get(self.CONF_SECTION, 'init_marker_color')
# TODO marker color
self.marker = Markers(parent=self.signal_view.scene)
self.xaxis = Axis(parent=self.signal_view.scene,
tick_direction=(0., 1.),
axis_width=1, tick_width=1,
anchors=('center', 'top'),
axis_color=m_color,
tick_color=m_color,
text_color=m_color)
self.x_tick_spacing = 1000
self.yaxis = Axis(parent=self.signal_view.scene,
tick_direction=(1., 0.),
axis_width=1, tick_width=1,
anchors=('left', 'center'),
axis_color=m_color,
tick_color=m_color,
text_color=m_color)
self.y_tick_spacing = 100
self.measure_line = Line(parent=self.signal_view.scene,
width=3, color=m_color)
# TODO - textbox
self.describe_text = Text(anchor_x='left',
anchor_y='bottom',
parent=self.signal_view.scene,
color=m_color)
# Signal highlighting
self.highlight_rec = Mesh(parent=self.signal_view.scene,
color=np.array([0., 0., 0., 0.]),
mode='triangle_fan')
# Grid
self.grid = None
# Discontinuity
self.disc_marker = LinearRegion(np.array([0, 0]),
np.array([[0., 0., 0., 0.],
[0., 0., 0., 0.]]),
parent=self.signal_view.scene)
self.signal_label_dict = {}
# Main signal visal with labels
w = CONF.get(self.CONF_SECTION, 'init_line_width')
self.signal_visual = Multiline(width=w,
parent=self.signal_view.scene)
self.label_visual = Text(anchor_x='left',
anchor_y='top',
parent=self.signal_view.scene)
# TODO - one set of x and y axes for measurements
# ----- Tool bar -----
btn_layout = QHBoxLayout()
for btn in self.setup_buttons():
if btn is None:
continue
btn.setAutoRaise(True)
btn.setIconSize(QSize(20, 20))
btn_layout.addWidget(btn)
# if options_button:
# btn_layout.addStretch()
# btn_layout.addWidget(options_button, Qt.AlignRight)
# TODO - this is temporary - solve the rendering in different thread
select_mode = QComboBox(self)
select_mode.insertItems(0, ['Browse', 'Research'])
antialias = CONF.get(self.CONF_SECTION, 'antialiasing')
if antialias == 'filter':
select_mode.setCurrentIndex(0)
elif antialias == 'min_max':
select_mode.setCurrentIndex(1)
select_mode.currentIndexChanged.connect(self.switch_display_mode)
btn_layout.addWidget(select_mode)
# Color coding
color_code = QComboBox(self)
color_code.insertItems(0, ['None', 'Channel', 'Group', 'Amplitude'])
color_code.currentIndexChanged.connect(self.switch_cc_mode)
btn_layout.addWidget(color_code)
# Metadata reload button
btn_layout.setAlignment(Qt.AlignLeft)
layout = create_plugin_layout(btn_layout)
# ----- Set layout -----
layout.addWidget(self.canvas.native)
# Set the whole layout
self.setLayout(layout)
# Connect signals
self.main.sig_file_opened.connect(self.initialize_data_map)
self.main.metadata_reloaded.connect(self.create_conglomerate_disconts)
self.plots_changed.connect(self.set_plot_update)
self.plots_changed.connect(self.subsample)
self.plots_changed.connect(self.rescale_grid)
self.input_recieved.connect(self.set_highlight_mode)
self.input_recieved.connect(self.show_measure_line)
self.canvas_resized.connect(self.update_subsample)
# ----- Setup functions -----
def setup_buttons(self):
take_screenshot = create_toolbutton(self, icon='camera.svg',
tip='Take screenshot',
triggered=self.take_screenshot)
show_grid = create_toolbutton(self, icon='grid.svg',
tip='Show grid',
triggered=self.show_grid)
autoscale = create_toolbutton(self, icon='autoscale.svg',
tip='Autoscale',
triggered=self.set_autoscale)
save_data = create_toolbutton(self, icon='floppy-disk.svg',
tip='Save displayed data',
triggered=self.save_displayed_data)
# ---- TEMP !!! ------- -> move to the right side (add stretch)
reload_metadata = create_toolbutton(self, icon='reload.svg',
tip='Reload metadata',
toggled=self.main.
ss_reaload_worker)
autoslide = create_toolbutton(self, icon='play.svg',
tip='Autoslide',
toggled=self.ss_autoslide_worker)
# --------------
return (take_screenshot, show_grid, autoscale, save_data,
reload_metadata, autoslide)
# ----- Key bindings -----
def set_plot_update(self):
self.plot_update_done = True
def check_event_timer(self, time):
if time - self.event_time < .1:
return False
else:
return True
def set_event_timer(self):
self.event_time = time()
def on_key_press(self, event):
if event.handled:
return
modifiers = event.modifiers
plot_data_operators = ['up', 'down', 'left', 'right', 'q', 'a']
# TODO: there should be a key mapper in the future! - python dictionary
if event.type == 'key_press':
if (event.key in plot_data_operators
and self.plot_update_done
and self.check_event_timer(time())):
if event.key == 'Up':
self.scale_plot_data(True)
if event.key == 'Down':
self.scale_plot_data(False)
# These operations require data pull, introduced a timer
# NOTE: we now know that rendering of big data takes time
if event.key == 'Left':
self.plot_update_done = False
if 'shift' in modifiers: # Partial shift
self.shift_plot_data(False, 0.5)
else:
self.shift_plot_data(False)
if event.key == 'Right':
self.plot_update_done = False
if 'shift' in modifiers: # Partial shift
self.shift_plot_data(True, 0.5)
else:
self.shift_plot_data(True)
if event.key == 'q':
self.plot_update_done = False
self.change_time_span(True)
if event.key == 'a':
self.plot_update_done = False
self.change_time_span(False)
self.set_event_timer()
event.handled = True
else:
self.input_recieved.emit(event)
else:
event.handled = False
def on_key_release(self, event):
if event.handled:
return
if event.type == 'key_release':
self.input_recieved.emit(event)
event.handled = True
else:
event.handled = False
# ----- Highlight / measurement mode , canvas behavior -----
def set_highlight_mode(self, event):
if event.type not in ('key_press', 'key_release'):
return
if event.key not in ('shift', 'control'):
return
if event.type == 'key_press' and event.key == 'shift':
self.highlight_mode = True
self.highlight_rec.visible = True
elif event.type == 'key_press' and event.key == 'control':
self.measurement_mode = True
self.crosshair.visible = True
self.marker.visible = True
self.xaxis.visible = True
self.yaxis.visible = True
elif event.type == 'key_release' and event.key == 'shift':
self.highlight_mode = False
self.highlight_rec.visible = False
elif event.type == 'key_release' and event.key == 'control':
self.measurement_mode = False
self.crosshair.visible = False
self.marker.visible = False
self.xaxis.visible = False
self.yaxis.visible = False
self.measure_line.visible = False
self.describe_text.visible = False
def on_mouse_move(self, event):
if 1 in event.buttons or 2 in event.buttons and not event.modifiers:
self.subview_changed.emit()
# Get position relative to zoom
pos = event.pos[:2]
w = self.signal_view.width
h = self.signal_view.height
rel_w_pos = pos[0] / w
# TODO: flip Vispy axis
rel_h_pos = (h-pos[1]) / h
rect = self.camera.rect
self.rect_rel_w_pos = rect.left + (rel_w_pos * rect.width)
self.rect_rel_h_pos = rect.bottom + (rel_h_pos * rect.height)
# Determine the signal plot
rows = self.visible_channels.get_row_count()
cols = self.visible_channels.get_col_count()
sig_w_pos = self.rect_rel_w_pos * cols
sig_h_pos = self.rect_rel_h_pos * rows
for pc in self.get_plot_containers():
if ((pc.plot_position[0]
< sig_w_pos
< pc.plot_position[0]+1)
and (pc.plot_position[1]
< sig_h_pos
< pc.plot_position[1]+1)):
self.curr_pc = pc
break
# ??? Instead of modes use event.modifiers???
if self.highlight_mode:
self.highlight_signal(self.curr_pc)
if self.measurement_mode:
self.crosshair.set_data([self.rect_rel_w_pos,
self.rect_rel_h_pos])
n_channels = self.visible_channels.get_row_count()
# Get the location of data point
s_y = self.curr_pc.ufact*self.curr_pc.scale_factor
t_y = ((-np.nanmean(self.curr_pc.data)
* self.curr_pc.ufact
* self.curr_pc.scale_factor)
+ ((0.5+self.curr_pc.plot_position[1]) / n_channels))
data_pos = self.curr_pc.data[int(self.rect_rel_w_pos
* len(self.curr_pc.data))]
data_pos *= s_y
data_pos += t_y
self.marker.set_data(np.array([[self.rect_rel_w_pos, data_pos]]))
# TODO: determine margins
# Axes
t_y = (self.curr_pc.plot_position[1] / n_channels)
y_margin = 0
self.xaxis.pos = [[rect.left,
t_y + y_margin],
[rect.left+(rect.width*self.x_tick_spacing),
t_y + y_margin]]
rel_diff = (rect.right - rect.left) * np.diff(pc.uutc_ss)
self.xaxis.domain = tuple([0, rel_diff/1000000])
s = [1/self.x_tick_spacing, 1]
t = [rect.left-rect.left*s[0], 0]
self.xaxis.transform = scene.transforms.STTransform(s, t)
x_margin = 0
self.yaxis.pos = [[rect.left + x_margin,
t_y],
[rect.left + x_margin,
t_y + ((1/n_channels)*self.y_tick_spacing)]]
s = [1, 1/self.y_tick_spacing]
t = [0, t_y-t_y*s[1]]
self.yaxis.transform = scene.transforms.STTransform(s, t)
lpos = self.measure_line.pos
if lpos is not None:
fixed = lpos[0]
right_angle = np.array([fixed[0], data_pos])
moving = np.array([self.rect_rel_w_pos, data_pos])
whole_line = np.vstack([fixed, right_angle, moving])
self.measure_line.set_data(pos=whole_line)
# Time
max_step = 1/self.curr_pc.fsamp
time_dist = moving[0]-fixed[0]
time_dist *= np.diff(self.curr_pc.uutc_ss)[0] / 1e6
time_dist -= time_dist % max_step
oround = int(np.ceil((np.log10(self.curr_pc.fsamp))))
time_str = format(time_dist, '.'+str(oround)+'f')+' s'
time_str_pos = moving.copy()
# Amplitude
max_step = self.curr_pc.ufact
| |
"Methods for scraping comic book covers and metadata from htpps://www.comics.org"
import datetime
import random
import re
import urllib.request
from contextlib import closing
from functools import reduce
from os import path
from re import search
from time import sleep
from typing import List, Optional, Union
import jsonlines
import pandas as pd
from bs4 import BeautifulSoup
from pandas import DataFrame
from requests import get
from requests.exceptions import RequestException
# gloabl vals
URL = "https://www.comics.org"
def read_jsonl(path: str) -> List[dict]:
"""
Read a jsonlines file and return a list of dicts.
"""
data = []
with jsonlines.open(path, mode="r") as reader:
for item in reader:
data.append(item)
return data
def log_error(e):
"""
It is always a good idea to log errors.
This function just prints them, but you can
make it do anything.
"""
print(e)
def simple_get(url: str) -> Union[bytes, None]:
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
def is_good_response(resp):
"""
Returns true if the response seems to be HTML, false otherwise
"""
content_type = resp.headers["Content-Type"].lower()
return (
resp.status_code == 200
and content_type is not None
and content_type.find("html") > -1
)
try:
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
log_error("Error during requests to {0} : {1}".format(url, str(e)))
return None
# TODO: deprecate this...
def transform_simple_get_html(raw_html: Optional[bytes]) -> BeautifulSoup:
"""
Takes the raw HTML response of a GET request and returns a tree-based
interface for parsing HTML.
"""
return BeautifulSoup(raw_html, "html.parser")
def get_soup(url: str) -> BeautifulSoup:
"""
Given a url returns a tree-based interface for parsing HTML.
"""
html = simple_get(url)
return BeautifulSoup(html, "html.parser")
# TODO: rename this method
def cover_gallery_pages(cover_gallery_soup: BeautifulSoup) -> int:
"""
Return the number of pages in the cover gallery.
"""
cover_gallery_pages = list(
filter(
lambda x: x.isdigit(),
[
x.contents[0]
for x in cover_gallery_soup.find_all(
"a", {"class": "btn btn-default btn-sm"}
)
],
)
)
if cover_gallery_pages == list():
return 1
else:
return max([int(x) for x in cover_gallery_pages])
def get_issue_title(issue_soup: BeautifulSoup) -> str:
"""
Return the title of the issue page.
"""
return (
issue_soup.find("title")
.contents[0]
.replace("\n", "")
.strip()
.split(" :: ")[-1]
.replace("/", "|")
)
# TODO: rename name -> key
def get_issue_metadata(issue_soup: BeautifulSoup, name: str) -> str:
"""
Return the value of the key
"""
if len(issue_soup.find_all("dd", id=name)) > 0:
if (name != "issue_indicia_publisher") & (name != "issue_brand"):
return issue_soup.find_all("dd", id=name)[0].contents[0].strip()
else:
try:
return issue_soup.find_all("dd", id=name)[0].find("a").contents[0]
except:
return ""
else:
return ""
def get_all_issue_metadata(issue_soup) -> dict:
d = dict()
d["on_sale_date"] = get_issue_metadata(issue_soup, "on_sale_date")
d["indicia_frequency"] = get_issue_metadata(issue_soup, "indicia_frequency")
d["issue_indicia_publisher"] = get_issue_metadata(
issue_soup, "issue_indicia_publisher"
)
d["issue_brand"] = get_issue_metadata(issue_soup, "issue_brand")
d["issue_price"] = get_issue_metadata(issue_soup, "issue_price")
d["issue_pages"] = get_issue_metadata(issue_soup, "issue_pages")
d["format_color"] = get_issue_metadata(issue_soup, "format_color")
d["format_dimensions"] = get_issue_metadata(issue_soup, "format_dimensions")
d["format_paper_stock"] = get_issue_metadata(issue_soup, "format_paper_stock")
d["format_binding"] = get_issue_metadata(issue_soup, "format_binding")
d["format_publishing_format"] = get_issue_metadata(
issue_soup, "format_publishing_format"
)
d["rating"] = get_issue_metadata(issue_soup, "rating")
d["indexer_notes"] = " | ".join(
[x.contents[0].replace("\n", "").strip() for x in issue_soup.find_all("p")]
)
all_issue_credits = list(
zip(
issue_soup.find_all("span", {"class": "credit_label"}),
issue_soup.find_all("span", {"class": "credit_value"}),
)
)
try:
d["synopsis"] = " | ".join(
list(
filter(
lambda x: x != "",
[
x[1].contents[0] if x[0].contents[0] == "Synopsis" else ""
for x in all_issue_credits
],
)
)
)
except:
d["synopsis"] = ""
return d
def get_issue_cover_metadata(issue_soup):
"""
Gets all the metadata from the cover section of the issue page.
"""
d = dict()
# get cover section
cover_soup = issue_soup.find("div", {"class": "cover"})
# cover credits: editing, script, pencils, inks, colors, letters, characters, etc...
cover_credits = list(
zip(
[
x.contents[0]
for x in cover_soup.find_all("span", {"class": "credit_label"})
],
[
x.contents[0]
for x in cover_soup.find_all("span", {"class": "credit_value"})
],
)
)
d.update({"cover_{}".format(x.lower()): y for x, y in cover_credits})
d.pop("cover_reprints", None)
d.pop("cover_awards", None)
return d
# get image divs from cover page
def get_cover_credits_from_cover_page(cover_img_soup, metadata) -> dict:
cover_divs = cover_img_soup.find_all("div", {"class": "issue_covers"})[0].find_all(
"div"
)
# go into variant url and pull metadata
cover_images = [x.find_all("a")[0].contents[0]["src"] for x in cover_divs]
cover_names = [
get_variant_cover_name(x.find_all("a")[1].contents[0]) for x in cover_divs
]
cover_urls = [URL + x.find_all("a")[0]["href"] for x in cover_divs]
covers = list((zip(cover_names, cover_urls, cover_images)))
covers_dict: dict = dict()
for cover in covers:
name = cover[0]
url = cover[1]
image = cover[2]
covers_dict[name] = {}
covers_dict[name]["cover_url"] = url
covers_dict[name]["image_url"] = image
issue_cover_credits: dict = dict()
issue_cover_credits["covers"] = {}
for variant_name in covers_dict:
if is_reprinting(variant_name) | is_newsstand_or_canadian(variant_name):
pass
else:
issue_url = covers_dict[variant_name]["cover_url"]
image_url = covers_dict[variant_name]["image_url"]
# get issue page
issue_html = simple_get(issue_url)
issue_soup = transform_simple_get_html(issue_html)
cover = issue_soup.find("div", {"class": "cover"})
cover_credits_list = list(
zip(
[
x.contents[0]
for x in cover.find_all("span", {"class": "credit_label"})
],
[
x.contents[0]
for x in cover.find_all("span", {"class": "credit_value"})
],
)
)
issue_title = get_issue_title(issue_soup)
issue_title_variant = get_variant_cover_name(issue_title)
cover_credits: dict = {
"cover_{}".format(x[0].lower()): x[1] for x in cover_credits_list
}
cover_credits.pop("cover_reprints", None)
cover_credits.pop("cover_awards", None)
save_as = "{}: {} {} ({})".format(
metadata["series_name"],
strip_brackets(metadata["title"]),
variant_name,
metadata["on_sale_date"],
).replace("/", "|")
# Example of save_as...
# Aquaman: Aquaman #2 Direct (1985-11-19)
save_to = "./covers/" + save_as + ".jpg"
# cover_credits["cover_image_file_name"] = save_as
cover_credits["save_to"] = save_to
cover_credits["image_url"] = image_url
issue_cover_credits["covers"][issue_title_variant] = cover_credits
return issue_cover_credits
def get_non_redundant_hrefs_from_cover_gallery(cover_gallery_soup):
"""
Given a cover gallery bs4 object, return the non-redundant hrefs for each issue.
"""
cover_refs = [
(x.get_text(), x["href"]) for x in cover_gallery_soup.find_all("a", href=True)
]
cover_refs = filter(
lambda x: "/issue/" in x[1] and "/cover/" not in x[1], cover_refs
)
cover_refs = [(get_brackets(x[0]), x[1]) for x in cover_refs]
return list(filter(lambda x: not is_redundant(x[0]), cover_refs))
def get_brackets(title: str) -> Union[str, None]:
"""
Return the substring of the first instance of bracketed text.
"""
regex_brackets = re.search(r"\[(.*?)\]", title)
if regex_brackets is None:
return None
else:
return regex_brackets.group()
def strip_brackets(title: str) -> str:
"""
Return the string without the first instance of bracketed text.
"""
brackets = get_brackets(title)
if brackets is None:
if "--" in title:
return title.split("--")[0].strip()
else:
return title
else:
debracketed_title = title.split(brackets)[0].strip()
if "--" in debracketed_title:
return debracketed_title.split("--")[0].strip()
else:
return debracketed_title
def is_reprinting(title: str) -> bool:
"""
Check if a string contains some substrings.
"""
is_2nd_printing = ("2nd Printing" in title) | ("Second Printing" in title)
is_3rd_printing = ("3rd Printing" in title) | ("Third Printing" in title)
is_4th_printing = ("4th Printing" in title) | ("Fourth Printing" in title)
is_5th_printing = ("5th Printing" in title) | ("Fifth Printing" in title)
is_6th_printing = "6th Printing" in title
is_7th_printing = "7th Printing" in title
is_8th_printing = "8th Printing" in title
is_9th_printing = "9th Printing" in title
is_10th_printing = "10th Printing" in title
return (
is_2nd_printing
| is_3rd_printing
| is_4th_printing
| is_5th_printing
| is_6th_printing
| is_7th_printing
| is_8th_printing
| is_9th_printing
| is_10th_printing
)
def is_newsstand_or_canadian(title) -> bool:
"""
Check if an issue is a duplicate Newsstand issue.
"""
return (
("newsstand" in title.lower())
| ("canadian" in title.lower())
| ("whitman" in title.lower())
| ("british " in title.lower())
)
def is_variant(title) -> bool:
"""
Check if an issue is variant cover.
"""
return "variant" in title.lower()
def is_redundant(title: str) -> bool:
"""
Check if an issue is a redundant to a direct sale issue.
"""
if title is None:
return False
else:
return (
is_reprinting(title) | is_newsstand_or_canadian(title) | is_variant(title)
) | (("cover" in title.lower()) & ("direct" not in title.lower()))
def is_duplicate(title: str, on_sale_date: str, metadata_path: str) -> bool:
"""
Check if an issue is a redundant to a direct sale issue.
"""
metadata = read_jsonl(metadata_path)
df = pd.DataFrame(metadata)
titles = list(df["title"].unique())
# check if title is duplicate
title_is_duplicate = reduce(
lambda x, y: x | y, [strip_brackets(title) in strip_brackets(x) for x in titles]
)
# check if
if title_is_duplicate:
on_sale_dates = df[df["title"].apply(strip_brackets) == strip_brackets(title)][
"on_sale_date"
].values
date_is_duplicate = on_sale_date in on_sale_dates
return title_is_duplicate & date_is_duplicate
else:
return False
def get_variant_cover_name(cover_name: str) -> str:
"""
Generate the name of the cover to use in metadata.
"""
variant_name = get_brackets(cover_name)
if variant_name is None:
return "Original"
else:
return variant_name.replace("[", "").replace("]", "")
def parse_series_from_publisher_page(publisher_soup: BeautifulSoup, series: str=None) -> DataFrame:
"""
Parse series table from publisher page.
"""
name = [
x.find("a").contents[0]
for x in publisher_soup.find_all("td", {"class": "name"})
]
href = [
x.find("a")["href"] for x in publisher_soup.find_all("td", {"class": "name"})
]
year = [x.contents[0] for x in publisher_soup.find_all("td", {"class": "year"})]
issue_count = [
x.contents[0] for x in publisher_soup.find_all("td", {"class": "issue_count"})
]
published = [
| |
import json
import logging
import re
from pathlib import Path
from typing import Optional, Union
import pandas as pd
from . import DOCSURL, DS_URL_PREFIX, readers
# configure logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
DEFAULT_READERS = {
"loom": readers.read_loom_to_anndata,
"rds": readers.read_seurat_to_anndata,
"h5ad": readers.read_anndata_to_anndata,
"hdf5": readers.read_10xhdf5_to_anndata,
"h5": readers.read_10xhdf5_to_anndata,
"tsv": readers.read_densetsv_to_anndata,
"csv": readers.read_densecsv_to_anndata,
}
DATA_DIR = Path("/fastgenomics/data")
DF_SORT_ORDER = [
"title",
"id",
"organism",
"tissue",
"numberOfCells",
"numberOfGenes",
"path",
"numberOfExpressionDataFiles",
"expressionDataFileNames",
"numberOfMetaDataFiles",
"metaDataFileNames",
"expressionDataFileInfos",
"metaDataFileInfos",
]
def get_datasets_df(data_dir: Path = DATA_DIR) -> pd.DataFrame:
"""Constructs a :py:func:`pandas.DataFrame` from all available datasets.
Parameters
----------
data_dir : Path, optional
Directory containing the datasets, e.g. ``fastgenomics/data``, by default DATA_DIR
Returns
-------
pd.DataFrame
A pandas DataFrame containing all available datasets
"""
ds_paths = get_ds_paths(data_dir=data_dir)
ds_df = pd.DataFrame()
for ds_path in ds_paths:
with open(ds_path / "dataset_info.json") as f:
info_df = json.load(f)
info_df["path"] = str(ds_path)
info_df["numberOfExpressionDataFiles"] = len(
info_df["expressionDataFileInfos"]
)
info_df["numberOfMetaDataFiles"] = len(info_df["metaDataFileInfos"])
_ = info_df.pop("schemaVersion", None)
ds_df = ds_df.append(info_df, ignore_index=True)
# sort colnames
col_names = ds_df.columns.values.tolist()
col_names_sorted = [name for name in DF_SORT_ORDER if name in col_names]
[col_names.remove(name) for name in DF_SORT_ORDER if name in col_names]
col_names_sorted.extend(col_names)
ds_df = ds_df[col_names_sorted]
# Format types
ds_df = ds_df.astype(
{
"numberOfCells": "int32",
"numberOfGenes": "int32",
"numberOfExpressionDataFiles": "int32",
"numberOfMetaDataFiles": "int32",
}
)
return ds_df
def ds_info(
ds: Optional[str] = None,
pretty: bool = None,
output: bool = None,
data_dir: Path = DATA_DIR,
) -> pd.DataFrame:
"""Get information on all available datasets in this analysis.
Parameters
----------
ds : Optional[str], optional
A single dataset ID or dataset title. If set, only this dataset will be displayed. Recommended to use with ``pretty``, by default None
pretty : bool, optional
Whether to display some nicely formatted output, by default True
output : bool, optional
Whether to return a DataFrame or not, by default True
data_dir : Path, optional
Directory containing the datasets, e.g. ``fastgenomics/data``, by default DATA_DIR
Returns
-------
pd.DataFrame
A pandas DataFrame containing all, or a single dataset (depends on ``ds``)
"""
if pretty is None:
pretty = ds is not None
if output is None:
output = ds is None
if not pretty and not output:
logger.warning(
'You have set "pretty" and "output" to false. Hence, this function will do/return nothing.'
)
return
try:
ds_df = get_datasets_df(data_dir=data_dir)
except NoDatasetsError as err:
logger.warning(err)
return pd.DataFrame()
def add_url(title, id):
return f'<a href="{DS_URL_PREFIX}{id}" target="_blank">{title}</a>'
def disp_pretty_df(df, index=True, header=True):
try:
from IPython.display import display, Markdown
df_html = df.to_html(
render_links=True,
escape=False,
header=header,
index=index,
justify="center",
)
display(Markdown(df_html))
except:
logger.warning(
"IPython not available. Pretty printing only works in Jupyter Notebooks."
)
if ds:
single_ds_df = select_ds_id(ds, df=ds_df)
single_ds_df["expressionDataFileNames"] = ", ".join(
[
expr["name"]
for expr in single_ds_df.loc[0, "expressionDataFileInfos"]
]
)
single_ds_df["metaDataFileNames"] = ", ".join(
[expr["name"] for expr in single_ds_df.loc[0, "metaDataFileInfos"]]
)
# Sort columns
single_col_names = single_ds_df.columns.values.tolist()
single_col_names_sorted = [
name for name in DF_SORT_ORDER if name in single_col_names
]
[
single_col_names.remove(name)
for name in DF_SORT_ORDER
if name in single_col_names
]
single_col_names_sorted.extend(single_col_names)
single_ds_df = single_ds_df[single_col_names_sorted]
if pretty:
pretty_df = single_ds_df
pretty_df["expressionDataFileNames"] = "<br>".join(
[
expr["name"]
for expr in pretty_df.loc[0, "expressionDataFileInfos"]
]
)
pretty_df["metaDataFileNames"] = ", ".join(
[expr["name"] for expr in pretty_df.loc[0, "metaDataFileInfos"]]
)
empty_cols = [
col for col in pretty_df.columns if pretty_df.loc[0, col] == ""
]
pretty_df = pretty_df.drop(
labels=["expressionDataFileInfos", "metaDataFileInfos"]
+ empty_cols,
axis=1,
errors="ignore",
)
pretty_df.loc[0, "title"] = pretty_df.apply(
lambda x: add_url(x.title, x.id), axis=1
).squeeze()
disp_pretty_df(pretty_df.T, header=False)
if output:
return single_ds_df
else:
if pretty:
pretty_df = ds_df.drop(
labels=[
"description",
"license",
"preprocessing",
"citation",
"webLink",
"file",
"expressionDataFileInfos",
"metaDataFileInfos",
],
axis=1,
errors="ignore",
)
pretty_df["title"] = pretty_df.apply(
lambda x: add_url(x.title, x.id), axis=1
)
disp_pretty_df(pretty_df)
if output:
return ds_df
def load_data(
ds: Optional[str] = None,
data_dir: Path = DATA_DIR,
additional_readers: dict = {},
expression_file: Optional[str] = None,
as_format: Optional[str] = None,
):
"""This function loads a single dataset into an AnnData object.
If there are multiple datasets available you need to specify one by setting
``ds`` to a dataset `id` or dataset `title`.
To get an overview of availabe dataset use :py:func:`ds_info`
Parameters
----------
ds : str, optional
A single dataset ID or dataset title to select a dataset to be loaded.
If only one dataset is available you do not need to set this parameter, by default None
data_dir : Path, optional
Directory containing the datasets, e.g. ``fastgenomics/data``, by default DATA_DIR
additional_readers : dict, optional
Used to specify your own readers for the specific data set format.
Dict key needs to be file extension (e.g., h5ad), dict value a function.
Still experimental, by default {}
expression_file: str, Optional
The name of the expression file to load.
Only needed when there are multiple expression files in a dataset.
as_format: str, optional
Specifies which reader should be uses for this dataset. Overwrites the auto-detection
of the format. Possible parameters are the file extensions of our supported data
formats: ``h5ad``, ``h5``, ``hdf5``, ``loom``, ``rds``, ``csv``, ``tsv``.
Returns
-------
AnnData Object
A single AnnData object with dataset id in `obs` and all dataset metadata in `uns`
Examples
--------
To use a custom reader for files with the extension ".fg", you have to define a function first:
>>> def my_loader(file):
... anndata = magic_file_loading(file)
... return anndata
You can then use this reader like this:
>>> fgread.load_data("my_dataset", additional_readers={"fg": my_loader})
"""
readers = {**DEFAULT_READERS, **additional_readers}
if ds:
single_df = select_ds_id(ds, df=get_datasets_df(data_dir=data_dir))
else:
single_df = get_datasets_df(data_dir=data_dir)
if len(single_df) > 1:
raise RuntimeError(
"There is more than one dataset available in this analysis. "
"Please select one by its ID or title. "
'You can list available datasets by using "fgread.ds_info()".'
)
exp_count = single_df.loc[0, "numberOfExpressionDataFiles"]
meta_count = single_df.loc[0, "numberOfMetaDataFiles"]
if exp_count == 0:
raise TypeError(
f"There is no expression data available in this data set.\n"
f"Metadata files: {meta_count}."
)
exp_files = [
exp["name"] for exp in single_df.loc[0, "expressionDataFileInfos"]
]
if expression_file:
if expression_file in exp_files:
file = expression_file
else:
raise KeyError(
f'Expression file "{expression_file}" not found in dataset. '
f"Available expression files are: {exp_files}."
)
else:
if exp_count == 1:
file = single_df.loc[0, "expressionDataFileInfos"][0]["name"]
else:
raise TypeError(
f"There are {exp_count} expression data files in this dataset. "
'Please specify which one you want to load using the parameter "expression_file". '
f"Available expression files are: {exp_files}."
)
title = single_df.loc[0, "title"]
ds_id = single_df.loc[0, "id"]
path = single_df.loc[0, "path"]
metadata_dict = single_df.loc[0].to_dict()
if as_format:
format = as_format.lower()
else:
try:
format = file.rsplit(".", 1)[1].lower()
logger.info(f'Expression file "{file}" with format "{format}".')
except ValueError as e:
raise ValueError(
f'The expression file "{file}" has no valid file suffix.'
).with_traceback(e.__traceback__)
if format in readers:
if meta_count != 0:
logger.info(
f"There are {meta_count} metadata files in this dataset. "
"This data will not be integrated into the anndata object."
)
logger.info(
f'Loading file "{file}" from dataset "{title}" in format "{format}" from directory "{path}"...\n'
)
adata = readers[format](Path(path) / file)
adata.uns["ds_metadata"] = {ds_id: {"title": title}}
adata.uns["ds_metadata_raw"] = {ds_id: str(metadata_dict)}
adata.obs["fg_id"] = ds_id
n_genes = adata.shape[1]
n_cells = adata.shape[0]
logger.info(
f'Loaded dataset "{title}" with {n_cells} cells and {n_genes} genes.\n'
f"==================================================================\n"
)
return adata
else:
raise KeyError(
f'Unsupported file format "{format}", use one of {list(readers)}. '
f'You can force the usage of a specific reader by setting "as_format" to a supported format. '
f"In addition, you can also implement your own reading function. See {DOCSURL} for more information."
)
def select_ds_id(ds: str, df: pd.DataFrame = None) -> pd.DataFrame:
"""Select a single dataset from a pandas DataFrame by its ID or title
Parameters
----------
ds : str
A single dataset ID or dataset title for selection
df : pd.DataFrame, optional
A pandas DataFrame from which a single entry is selected, by default None
Returns
-------
pd.DataFrame
A pandas DataFrame with only the selected dataset.
"""
single_df = df.loc[(df["id"] == ds) | (df["title"] == ds)].reset_index(
drop=True
)
len_df = len(single_df)
if len_df == 1:
return single_df.copy()
elif len_df == 0:
add_err = ""
if not ds.startswith("dataset-"):
add_err = " Please note that dataset titles can be changed by the owner. To be safe, you might want to consider dataset IDs instead."
raise KeyError("Your selection matches no datasets." + add_err)
else:
display(single_df)
raise KeyError(
f"Your | |
inject=False):
def __init__(self, **kwargs) -> None:
super().__init__()
st = to_type(self)
with st.enter():
for k, t in st.fields.items():
if k not in kwargs:
v = default(t)
else:
v = kwargs.pop(k)
setattr(self, k, v)
if kwargs:
raise TypeError('unrecognized fields: {}'.format(', '.join(kwargs)))
def __iter__(self) -> Sequence[Any]:
return iter(self.__slots__)
def __hash__(self) -> int:
return hash(tuple((k, getattr(self, k)) for k in self))
def __eq__(self, other) -> bool:
if type(self) != type(other):
return False
if self.__slots__ != other.__slots__:
return False
for k in self:
ov = getattr(self, k)
tv = getattr(other, k)
if ov != tv:
return False
return True
def __fmt__(self, fieldfunc: Callable[[Any], str]) -> str:
args = []
for k in self:
if k.startswith('_'):
continue
val = getattr(self, k)
val = format_value(val, fieldfunc, 2)
args.append(' {}: {}'.format(k, val))
args = ',\n'.join(args)
# Format final value.
if args:
return '{} {{\n{}\n}}'.format(class_name(self), args)
else:
return '{} {{}}'.format(class_name(self))
def __str__(self) -> str:
return self.__fmt__(str)
def __repr__(self) -> str:
return self.__fmt__(repr)
class Union(Struct, metaclass=MetaStruct, union=True, inject=False):
def __setattr__(self, name, value) -> None:
super().__setattr__(name, value)
io = BytesIO()
t = to_type(self)
try:
emit(t.fields[name], value, io=io)
except:
return
for fname, ftype in t.fields.items():
if fname == name:
continue
io.seek(0)
try:
fvalue = parse(ftype, io)
super().__setattr__(fname, fvalue)
except:
pass
class Tuple(Type):
__slots__ = ('types',)
def __init__(self, types: Sequence[Type]) -> None:
self.types = types
def parse(self, io: IO, context: Context) -> Sequence[Any]:
value = []
for i, type in enumerate(self.types):
type = to_type(type, i)
with context.enter(i, type):
value.append(parse(type, io, context))
return tuple(value)
def emit(self, value: Sequence[Any], io: IO, context: Context) -> None:
for i, (type, val) in enumerate(zip(self.types, value)):
type = to_type(type, i)
with context.enter(i, type):
emit(type, val, io, context)
def sizeof(self, value: O[Sequence[Any]], context: Context) -> O[int]:
l = []
if value is None:
value = [None] * len(self.types)
for i, (type, val) in enumerate(zip(self.types, value)):
type = to_type(type, i)
with context.enter(i, type):
size = _sizeof(type, val, context)
l.append(size)
return ceil_sizes(add_sizes(*l))
def offsetof(self, path: Sequence[U[int, str]], value: O[T], context: Context) -> O[int]:
idx, fpath = path[0], path[1:]
if not isinstance(idx, int):
raise ValueError('not a sequence index: {}'.format(idx))
if idx >= len(self.types):
raise ValueError('sequence index out of bounds: {}'.format(idx))
l = []
if value is None:
value = [None] * len(self.types)
for i, (type, val) in enumerate(zip(self.types, value)):
type = to_type(type, i)
with context.enter(i, type):
if i == idx:
offset = _offsetof(type, fpath, val, context)
l.append(offset)
break
else:
size = _sizeof(type, val, context)
l.append(size)
return ceil_sizes(add_sizes(*l))
def default(self, context: Context) -> Sequence[Any]:
value = []
for i, type in enumerate(self.types):
type = to_type(type, i)
with context.enter(i, type):
value.append(default(type, context))
return tuple(value)
def __repr__(self) -> str:
return '<(' + ', '.join(repr(to_type(t)) for t in self.types) + ')>'
class Any(Type):
__slots__ = ('types')
def __init__(self, types: Sequence[Type]) -> None:
self.types = types
def parse(self, io: IO, context: Context) -> Any:
errors = []
types = []
start = io.tell()
for i, type in enumerate(self.types):
io.seek(start, os.SEEK_SET)
type = to_type(type, i)
with context.enter(i, type):
try:
return parse(type, io, context)
except Exception as e:
if isinstance(e, Error):
e = e.exception
types.append(type)
errors.append(e)
raise Error(context, 'Failed to parse using any of the following:\n' + '\n'.join(
' - {!r} => {}: {}'.format(t, class_name(e), indent(str(e), 2))
for (t, e) in zip(types, errors)
))
def emit(self, value: Any, io: IO, context: Context) -> None:
errors = []
types = []
start = io.tell()
for i, type in enumerate(self.types):
io.seek(start, os.SEEK_SET)
type = to_type(type, i)
with context.enter(i, type):
try:
return emit(type, val, io, context)
except Exception as e:
types.append(type)
errors.append(e)
raise Error(context, 'Failed to emit using any of the following:\n' + '\n'.join(
' - {!r} => {}: {}'.format(t, class_name(e), indent(str(e), 2))
for (t, e) in zip(types, errors)
))
def sizeof(self, value: O[Any], context: Context) -> O[int]:
return None
def offsetof(self, path: Sequence[U[int, str]], value: O[Any], context: Context) -> O[int]:
return None
def default(self, context: Context) -> O[Any]:
return None
def __str__(self) -> str:
return 'Any[' + ', '.join(format_value(to_type(t, i), str) for i, t in enumerate(self.types)) + ']'
def __repr__(self) -> str:
return '<Any[' + ', '.join(format_value(to_type(t, i), repr) for i, t in enumerate(self.types)) + ']>'
class Arr(Type, G[T]):
__slots__ = ('type', 'count', 'stop_value', 'separator')
def __init__(self, type: T, count: O[int] = None, stop_value: O[Any] = None, separator: O[bytes] = None) -> None:
self.type = type
self.count = count
self.stop_value = stop_value
self.separator = separator
def parse(self, io: IO, context: Context) -> Sequence[T]:
value = []
count = get_value(self.count, context)
stop_value = get_value(self.stop_value, context)
separator = get_value(self.separator, context)
i = 0
while count is None or i < count:
if isinstance(self.type, list):
type = to_type(self.type[i], i)
else:
type = to_type(self.type, i)
with context.enter(i, type):
eof = False
pos = io.tell()
if separator:
data = b''
while True:
b = io.read(1)
if not b:
eof = True
break
data += b
if data[-len(separator):] == separator:
data = data[:-len(separator)]
break
eio = data
else:
eio = io
try:
elem = parse(type, eio, context)
except Exception:
# Check EOF.
if not eio or (io.tell() == pos and not io.read(1)):
break
io.seek(-1, os.SEEK_CUR)
raise
if eof or elem == stop_value:
break
value.append(elem)
i += 1
return value
def emit(self, value: Sequence[T], io: IO, context: Context) -> None:
set_value(self.count, len(value), io, context)
stop_value = get_value(self.stop_value, context)
separator = get_value(self.separator, context)
if stop_value is not None:
value = value + [stop_value]
start = io.tell()
for i, elem in enumerate(value):
if isinstance(self.type, list):
type = to_type(self.type[i], i)
else:
type = to_type(self.type, i)
with context.enter(i, type):
emit(type, elem, io, context)
if separator and i < len(value) - 1:
io.write(separator)
def sizeof(self, value: O[Sequence[T]], context: Context) -> int:
if value is None:
count = peek_value(self.count, context)
else:
count = len(value)
stop_value = peek_value(self.stop_value, context)
separator = peek_value(self.separator, context)
if count is None:
return None
l = []
for i in range(count):
if isinstance(self.type, list):
type = to_type(self.type[i], i)
else:
type = to_type(self.type, i)
if value is not None:
val = value[i]
else:
val = None
size = _sizeof(type, val, context)
l.append(size)
if stop_value is not None:
if isinstance(self.type, list):
type = to_type(self.type[count], count)
else:
type = to_type(self.type, count)
size = _sizeof(type, stop_value, context)
l.append(size)
if separator:
l.append(to_size((count - 1) * len(separator), context))
return ceil_sizes(add_sizes(*l))
def offsetof(self, path: Sequence[U[int, str]], value: O[T], context: Context) -> O[int]:
idx, fpath = path[0], path[1:]
if not isinstance(idx, int):
raise ValueError('not a sequence index: {}'.format(idx))
if value is None:
count = peek_value(self.count, context)
else:
count = len(value)
stop_value = peek_value(self.stop_value, context)
separator = peek_value(self.separator, context)
if count is not None and idx >= count:
raise ValueError('sequence index out of bounds: {}'.format(idx))
l = []
for i in range(idx + 1):
if isinstance(self.type, list):
type = to_type(self.type[i], i)
else:
type = to_type(self.type, i)
if value is not None:
val = value[i]
else:
val = None
if idx == i:
offset = _offsetof(type, fpath, val, context)
l.append(offset)
break
else:
size = _sizeof(type, val, context)
l.append(size)
if separator:
l.append(to_size((idx - 1) * len(separator), context))
return ceil_sizes(add_sizes(*l))
def default(self, context: Context) -> Sequence[T]:
return []
def __str__(self) -> str:
return str(to_type(self.type)) + (('[' + str(self.count) + ']') if self.count is not None else '[]')
def __repr__(self) -> str:
return '<{}({!r}{}{})>'.format(
class_name(self), to_type(self.type),
('[' + str(self.count) + ']') if self.count is not None else '',
(', stop: ' + repr(self.stop_value)) if self.stop_value is not None else '',
)
class Switch(Type):
__slots__ = ('options', 'selector', 'default_key', 'fallback')
def __init__(self, default: O[Any] = None, fallback: O[T] = None, options: Mapping[Any, T] = None) -> None:
self.options = options or {}
self.selector = None
self.default_key = default
self.fallback = fallback
def _get(self, sel) -> T:
if sel not in self.options and not self.fallback:
raise ValueError('Selector {} is invalid! [options: {}]'.format(
sel, ', '.join(repr(x) for x in self.options.keys())
))
if sel is not None and sel in self.options:
return self.options[sel]
else:
return self.fallback
def peek_value(self, context: Context) -> O[T]:
selector = self.selector
if selector is not None:
selector = | |
comf.close()
def setup_mgr_common(commandslist, dirmap, filesmap, machines, node, targetdir, storagedir, serverdir):
mach = machines.get(node['ip'])
addToDirMap(dirmap, node['ip'], "%s/%s" % (mach['basedir'], targetdir))
addToDirMap(dirmap, node['ip'], "%s/%s/util" % (mach['basedir'], targetdir))
addToDirMap(dirmap, node['ip'], "%s/instance_binaries" % mach['basedir'])
addNodeToFilesListMap(filesmap, node, "prometheus.tgz", targetdir)
addToCommandsList(commandslist, node['ip'], targetdir, "tar -xzf prometheus.tgz")
addNodeToFilesListMap(filesmap, node, "%s.tgz" % storagedir, targetdir)
addNodeToFilesListMap(filesmap, node, "%s.tgz" % serverdir, targetdir)
addToCommandsList(commandslist, node['ip'], targetdir, "tar -xzf %s.tgz" % storagedir)
addToCommandsList(commandslist, node['ip'], targetdir, "tar -xzf %s.tgz" % serverdir)
addToCommandsList(commandslist, node['ip'], targetdir, "rm -f %s.tgz" % storagedir)
addToCommandsList(commandslist, node['ip'], targetdir, "rm -f %s.tgz" % serverdir)
addToCommandsList(commandslist, node['ip'], "%s/%s/lib" %(targetdir, storagedir), "bash %s/process_deps.sh" % mach['basedir'])
addToCommandsList(commandslist, node['ip'], "%s/%s/lib" %(targetdir, serverdir), "bash %s/process_deps.sh" % mach['basedir'])
#addToCommandsList(commandslist, node['ip'], targetdir, "rm -f %s.tgz" % storagedir)
#addToCommandsList(commandslist, node['ip'], targetdir, "tar -czf %s.tgz %s" % (storagedir, storagedir))
#addToCommandsList(commandslist, node['ip'], targetdir, "rm -f %s.tgz" % serverdir)
#addToCommandsList(commandslist, node['ip'], targetdir, "tar -czf %s.tgz %s" % (serverdir, serverdir))
def install_with_config(jscfg, comf, machines, args):
meta = jscfg['meta']
clustermgr = jscfg['cluster_manager']
nodemgr = jscfg['node_manager']
ha_mode = meta.get('ha_mode', '')
storagedir = "kunlun-storage-%s" % args.product_version
serverdir = "kunlun-server-%s" % args.product_version
clustermgrdir = "kunlun-cluster-manager-%s" % args.product_version
nodemgrdir = "kunlun-node-manager-%s" % args.product_version
sudopfx=""
if args.sudo:
sudopfx="sudo "
filesmap = {}
commandslist = []
dirmap = {}
cluster_name = 'meta'
extraopt = " --ha_mode=%s" % ha_mode
meta_addrs = []
for node in meta['nodes']:
meta_addrs.append("%s:%s" % (node['ip'], str(node['port'])))
metaseeds = meta.get('group_seeds', '')
if metaseeds == '':
metaseeds=",".join(meta_addrs)
print 'metaseeds:%s' % metaseeds
nodemgrmaps = {}
for node in nodemgr['nodes']:
nodemgrmaps[node['ip']] = node
clustermgrips = set()
members=[]
for node in clustermgr['nodes']:
clustermgrips.add(node['ip'])
members.append("%s:%d:0" % (node['ip'], node['brpc_raft_port']))
initmember = clustermgr.get('raft_group_member_init_config', '')
if initmember == '':
initmember = "%s," % ",".join(members)
# used for install storage nodes
my_metaname = 'mysql_meta.json'
reg_metaname = 'reg_meta.json'
if not meta.has_key('group_uuid'):
meta['group_uuid'] = getuuid()
if len(meta['nodes']) > 0:
metaf = open(r'clustermgr/%s' % my_metaname,'w')
json.dump(meta, metaf, indent=4)
metaf.close()
metaf = open(r'clustermgr/%s' % reg_metaname, 'w')
objs = []
for node in meta['nodes']:
mach = machines.get(node['ip'])
obj = {}
obj['is_primary'] = node.get('is_primary', False)
obj['data_dir_path'] = node['data_dir_path']
obj['nodemgr_bin_path'] = "%s/%s/bin" % (mach['basedir'], nodemgrdir)
obj['ip'] = node['ip']
obj['port'] = node['port']
obj['user'] = "pgx"
obj['password'] = "<PASSWORD>"
objs.append(obj)
json.dump(objs, metaf, indent=4)
metaf.close()
cmdpat = '%spython2 install-mysql.py --config=./%s --target_node_index=%d --cluster_id=%s --shard_id=%s'
if args.small:
cmdpat += ' --dbcfg=./template-small.cnf'
# commands like:
# python2 install-mysql.py --config=./mysql_meta.json --target_node_index=0
shard_id = 'meta'
pries = []
secs = []
i = 0
for node in meta['nodes']:
targetdir='%s/%s/dba_tools' % (node['program_dir'], storagedir)
node['nodemgr'] = nodemgrmaps.get(node['ip'])
mach = machines.get(node['ip'])
absenvfname = '%s/env.sh.%d' % (mach['basedir'], node['nodemgr']['brpc_http_port'])
envpfx = "test -f %s && . %s;" % (absenvfname, absenvfname)
addNodeToFilesListMap(filesmap, node, reg_metaname, "%s/%s/scripts" % (node['program_dir'], serverdir))
addNodeToFilesListMap(filesmap, node, my_metaname, targetdir)
cmd = cmdpat % (envpfx, my_metaname, i, cluster_name, shard_id)
if node.get('is_primary', False):
pries.append([node['ip'], targetdir, cmd])
else:
secs.append([node['ip'], targetdir, cmd])
addToDirMap(dirmap, node['ip'], node['data_dir_path'])
addToDirMap(dirmap, node['ip'], node['log_dir_path'])
addToDirMap(dirmap, node['ip'], node['innodb_log_dir_path'])
generate_storage_startstop(args, machines, node, i, filesmap)
if args.autostart:
generate_storage_service(args, machines, commandslist, node, i, filesmap)
i+=1
for item in pries:
addToCommandsList(commandslist, item[0], item[1], item[2] + extraopt)
for item in secs:
addToCommandsList(commandslist, item[0], item[1], item[2] + extraopt)
# bootstrap the cluster
if len(meta['nodes']) > 0:
firstmeta = meta['nodes'][0]
targetdir='%s/%s/scripts' % (firstmeta['program_dir'], serverdir)
cmdpat=r'python2 bootstrap.py --config=./%s --bootstrap_sql=./meta_inuse.sql' + extraopt
addToCommandsList(commandslist, firstmeta['ip'], targetdir, cmdpat % reg_metaname, "storage")
if len(nodemgr['nodes']) > 0:
nodemgrjson = "nodemgr.json"
nodemgrf = open('clustermgr/%s' % nodemgrjson, 'w')
json.dump(nodemgr['nodes'], nodemgrf, indent=4)
nodemgrf.close()
worknode = None
if len(meta['nodes']) > 0:
worknode = meta['nodes'][0]
elif len(nodemgr['nodes']) > 0:
worknode = nodemgr['nodes'][0]
else:
worknode = clustermgr['nodes'][0]
if worknode is not None:
ip = worknode['ip']
mach = machines.get(ip)
addNodeToFilesListMap(filesmap, worknode, 'modify_servernodes.py', '.')
addNodeToFilesListMap(filesmap, worknode, nodemgrjson, '.')
addToCommandsList(commandslist, ip, machines.get(worknode['ip'])['basedir'],
"python2 modify_servernodes.py --config %s --action=add --seeds=%s" % (nodemgrjson, metaseeds))
i = 0
nodemgrips = set()
for node in nodemgr['nodes']:
nodemgrips.add(node['ip'])
setup_nodemgr_commands(args, i, machines, node, commandslist, dirmap, filesmap, metaseeds)
generate_nodemgr_env(args, machines, node, i, filesmap)
generate_nodemgr_startstop(args, machines, node, i, filesmap)
if args.autostart:
generate_nodemgr_service(args, machines, commandslist, node, i, filesmap)
i += 1
i = 0
for node in clustermgr['nodes']:
setup_clustermgr_commands(args, i, machines, node, commandslist, dirmap, filesmap, metaseeds, initmember, ip not in nodemgrips)
if args.autostart:
generate_clustermgr_service(args, machines, commandslist, node, i, filesmap)
i += 1
# start the nodemgr and clustermgr process finally.
for node in nodemgr['nodes']:
addToCommandsList(commandslist, node['ip'], ".", "bash start-nodemgr-%d.sh </dev/null >& run.log &" % node['brpc_http_port'])
for node in clustermgr['nodes']:
addToCommandsList(commandslist, node['ip'], "%s/bin" % clustermgrdir, "bash start_cluster_mgr.sh </dev/null >& start.log &")
workips = set()
workips.update(nodemgrips)
workips.update(clustermgrips)
print "workips:%s" % str(workips)
for ip in workips:
mach = machines.get(ip)
if args.sudo:
process_command_noenv(comf, args, machines, ip, '/',
'sudo mkdir -p %s && sudo chown -R %s:\`id -gn %s\` %s' % (mach['basedir'],
mach['user'], mach['user'], mach['basedir']))
else:
process_command_noenv(comf, args, machines, ip, '/', 'mkdir -p %s' % mach['basedir'])
process_file(comf, args, machines, ip, 'env.sh.template', mach['basedir'])
extstr = "sed -s 's#KUNLUN_BASEDIR#%s#g' env.sh.template > env.sh" % mach['basedir']
process_command_noenv(comf, args, machines, ip, mach['basedir'], extstr)
extstr = "sed -i 's#KUNLUN_VERSION#%s#g' env.sh" % args.product_version
process_command_noenv(comf, args, machines, ip, mach['basedir'], extstr)
process_file(comf, args, machines, ip, 'install/process_deps.sh', mach['basedir'])
process_file(comf, args, machines, ip, 'install/change_config.sh', mach['basedir'])
process_file(comf, args, machines, ip, 'install/build_driver_formysql.sh', mach['basedir'])
process_file(comf, args, machines, ip, 'clustermgr/mysql-connector-python-2.1.3.tar.gz', mach['basedir'])
process_command_noenv(comf, args, machines, ip, mach['basedir'], 'bash ./build_driver_formysql.sh %s' % mach['basedir'])
# setup env for meta
for node in meta['nodes']:
install_meta_env(comf, node, machines, args)
# setup env for nodemgr
for ip in nodemgrips:
mach = machines.get(ip)
install_nodemgr_env(comf, mach, machines, args)
# setup env for nodemgr
for ip in clustermgrips:
mach = machines.get(ip)
install_clustermgr_env(comf, mach, machines, args)
# dir making
for ip in dirmap:
mach = machines.get(ip)
dirs=dirmap[ip]
for d in dirs:
if args.sudo:
process_command_noenv(comf, args, machines, ip, '/',
'sudo mkdir -p %s && sudo chown -R %s:\`id -gn %s\` %s' % (d, mach['user'], mach['user'], d))
else:
process_command_noenv(comf, args, machines, ip, '/', 'mkdir -p %s' % d)
# files copy.
for ip in filesmap:
mach = machines.get(ip)
fmap = filesmap[ip]
for fpair in fmap:
process_file(comf, args, machines, ip, 'clustermgr/%s' % fpair[0], '%s/%s' % (mach['basedir'], fpair[1]))
# The reason for not using commands map is that, we need to keep the order for the commands.
process_commandslist_setenv(comf, args, machines, commandslist)
def generate_systemctl_clean(servname, ip, commandslist):
syscmdpat1 = "sudo systemctl stop %s"
syscmdpat2 = "sudo systemctl disable %s"
syscmdpat3 = "sudo rm -f /usr/lib/systemd/system/%s"
addToCommandsList(commandslist, ip, '/', syscmdpat1 % servname)
addToCommandsList(commandslist, ip, '/', syscmdpat2 % servname)
addToCommandsList(commandslist, ip, '/', syscmdpat3 % servname)
def clean_with_config(jscfg, comf, machines, args):
meta = jscfg['meta']
clustermgr = jscfg['cluster_manager']
nodemgr = jscfg['node_manager']
storagedir = "kunlun-storage-%s" % args.product_version
clustermgrdir = "kunlun-cluster-manager-%s" % args.product_version
nodemgrdir = "kunlun-node-manager-%s" % args.product_version
sudopfx=""
if args.sudo:
sudopfx="sudo "
filesmap = {}
commandslist = []
dirmap = {}
meta_addrs = []
for node in meta['nodes']:
meta_addrs.append("%s:%s" % (node['ip'], str(node['port'])))
metaseeds = meta.get('group_seeds', '')
if metaseeds == '':
metaseeds=",".join(meta_addrs)
# clean the nodemgr processes
for node in nodemgr['nodes']:
mach = machines.get(node['ip'])
addToCommandsList(commandslist, node['ip'], "%s/bin" % nodemgrdir, "bash stop_node_mgr.sh")
#for item in ["server_datadirs", "storage_datadirs", "storage_logdirs", "storage_waldirs"]:
# nodedirs = node[item].strip()
# for d in nodedirs.split(","):
# cmdpat = '%srm -fr %s/*'
# addToCommandsList(commandslist, node['ip'], "/", cmdpat % (sudopfx, d))
addNodeToFilesListMap(filesmap, node, 'clear_instances.sh', '.')
addToCommandsList(commandslist, node['ip'], ".", 'bash ./clear_instances.sh %d %s %s >& clear.log || true' % (
node['brpc_http_port'], mach['basedir'], args.product_version))
addToCommandsList(commandslist, node['ip'], "", '%srm -fr %s/%s' % (sudopfx, mach['basedir'], nodemgrdir))
if args.autostart:
servname = 'kunlun-node-manager-%d.service' % node['brpc_http_port']
generate_systemctl_clean(servname, node['ip'], commandslist)
# clean the nodemgr processes
for node in clustermgr['nodes']:
addToCommandsList(commandslist, node['ip'], "%s/bin" % clustermgrdir, "bash stop_cluster_mgr.sh")
addToCommandsList(commandslist, node['ip'], "", '%srm -fr %s/%s' % (sudopfx, mach['basedir'], clustermgrdir))
if args.autostart:
servname = 'kunlun-cluster-manager-%d.service' % node['brpc_raft_port']
generate_systemctl_clean(servname, node['ip'], commandslist)
if len(nodemgr['nodes']) > 0 and meta.has_key('group_seeds'):
nodemgrjson = "nodemgr.json"
nodemgrf = open('clustermgr/%s' % nodemgrjson, 'w')
json.dump(nodemgr['nodes'], nodemgrf, indent=4)
nodemgrf.close()
worknode = None
if len(meta['nodes']) > 0:
worknode = meta['nodes'][0]
elif len(nodemgr['nodes']) > 0:
worknode = nodemgr['nodes'][0]
else:
worknode = clustermgr['nodes'][0]
if worknode is not None:
ip = worknode['ip']
mach = machines.get(ip)
addNodeToFilesListMap(filesmap, worknode, 'modify_servernodes.py', '.')
addNodeToFilesListMap(filesmap, worknode, nodemgrjson, '.')
addToCommandsList(commandslist, ip, machines.get(worknode['ip'])['basedir'],
"python2 modify_servernodes.py --config %s --action=remove --seeds=%s" % (nodemgrjson, metaseeds))
# clean the meta nodes
for node in meta['nodes']:
targetdir='%s/%s/dba_tools' % (node['program_dir'], storagedir)
cmdpat = r'bash stopmysql.sh %d'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % node['port'], "storage")
cmdpat = r'%srm -fr %s'
addToCommandsList(commandslist, node['ip'], ".", cmdpat % (sudopfx, node['log_dir_path']))
addToCommandsList(commandslist, node['ip'], ".", cmdpat % (sudopfx, node['data_dir_path']))
addToCommandsList(commandslist, node['ip'], ".", cmdpat % (sudopfx, node['innodb_log_dir_path']))
addToCommandsList(commandslist, node['ip'], ".", cmdpat % (sudopfx, node['program_dir']))
if | |
<gh_stars>0
#!/bin/env python
# Python
import re
import unittest
from unittest.mock import Mock
# ATS
from pyats.topology import Device
# Parser
from genie.libs.parser.iosxe.show_vrf import ShowVrf, \
ShowVrfDetail
# Metaparser
from genie.metaparser.util.exceptions import SchemaEmptyParserError
# ================================
# Unit test for
# * 'show vrf'
# * 'show vrf {vrf}'
# * 'show vrf detail'
# * 'show vrf detail <vrf>'
# ================================
class TestShowVrf(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_output = {'execute.return_value': '''
[2019-05-10 06:56:37,856] +++ R1_xe: executing command 'show vrf' +++
show vrf
Name Default RD Protocols Interfaces
Mgmt-intf <not set> ipv4,ipv6 Gi1
VRF1 65000:1 ipv4,ipv6 Tu1
Lo300
Gi2.390
Gi2.410
Gi2.415
Gi2.420
Gi3.390
Gi3.410
Gi3.415
Tu3
Tu4
Tu6
Tu8
Gi3.420
'''}
golden_parsed_output = {
'vrf': {
'Mgmt-intf': {
'protocols': ['ipv4', 'ipv6'],
'interfaces': ['GigabitEthernet1'],
},
'VRF1': {
'route_distinguisher': '65000:1',
'protocols': ['ipv4', 'ipv6'],
'interfaces': ['Tunnel1',
'Loopback300',
'GigabitEthernet2.390',
'GigabitEthernet2.410',
'GigabitEthernet2.415',
'GigabitEthernet2.420',
'GigabitEthernet3.390',
'GigabitEthernet3.410',
'GigabitEthernet3.415',
'Tunnel3',
'Tunnel4',
'Tunnel6',
'Tunnel8',
'GigabitEthernet3.420'],
}
}
}
golden_output_vrf = {'execute.return_value': '''
[2019-05-10 06:56:43,272] +++ R1_xe: executing command 'show vrf VRF1' +++
show vrf VRF1
Name Default RD Protocols Interfaces
VRF1 65000:1 ipv4,ipv6 Tu1
Lo300
Gi2.390
Gi2.410
Gi2.415
Gi2.420
Gi3.390
Gi3.410
Gi3.415
Tu3
Tu4
Tu6
Tu8
Gi3.420
'''}
golden_parsed_output_vrf = {
'vrf': {
'VRF1': {
'route_distinguisher': '65000:1',
'protocols': ['ipv4', 'ipv6'],
'interfaces': ['Tunnel1',
'Loopback300',
'GigabitEthernet2.390',
'GigabitEthernet2.410',
'GigabitEthernet2.415',
'GigabitEthernet2.420',
'GigabitEthernet3.390',
'GigabitEthernet3.410',
'GigabitEthernet3.415',
'Tunnel3',
'Tunnel4',
'Tunnel6',
'Tunnel8',
'GigabitEthernet3.420'],
}
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowVrf(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowVrf(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
def test_golden_vrf(self):
self.device = Mock(**self.golden_output_vrf)
obj = ShowVrf(device=self.device)
parsed_output = obj.parse(vrf='VRF1')
self.assertEqual(parsed_output, self.golden_parsed_output_vrf)
class TestShowVrfDetail(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"Mgmt-vrf": {
"vrf_id": 1,
"interfaces": [
"GigabitEthernet0/0"
],
"interface": {
"GigabitEthernet0/0": {'vrf': 'Mgmt-vrf'}
},
"address_family": {
"ipv4 unicast": {
"table_id": "0x1",
"flags": "0x0",
"vrf_label": {
'allocation_mode': 'per-prefix'
}
},
"ipv6 unicast": {
"table_id": "0x1E000001",
"flags": "0x0",
"vrf_label": {
"allocation_mode": "per-prefix",
}
}
},
"flags": "0x1808",
"cli_format": "New",
"support_af": "multiple address-families",
},
"VRF1": {
"interfaces": [
"GigabitEthernet0/0"
],
"interface": {
"GigabitEthernet0/0": {'vrf': 'VRF1'}
},
"address_family": {
"ipv4 unicast": {
"export_to_global": {
"export_to_global_map": "export_to_global_map",
"prefix_limit": 1000
},
"import_from_global": {
"prefix_limit": 1000,
"import_from_global_map": "import_from_global_map"
},
"table_id": "0x1",
"routing_table_limit": {
"routing_table_limit_action": {
"enable_alert_limit_number": {
"alert_limit_number": 10000
}
}
},
"route_targets": {
"200:1": {
"rt_type": "both",
"route_target": "200:1"
},
"100:1": {
"rt_type": "both",
"route_target": "100:1"
}
},
"flags": "0x2100",
"vrf_label": {
'allocation_mode': 'per-prefix'
}
},
"ipv6 unicast": {
"export_to_global": {
"export_to_global_map": "export_to_global_map",
"prefix_limit": 1000
},
"table_id": "0x1E000001",
"routing_table_limit": {
"routing_table_limit_action": {
"enable_alert_percent": {
"alert_percent_value": 70
},
"enable_alert_limit_number": {
"alert_limit_number": 7000
}
},
"routing_table_limit_number": 10000
},
"route_targets": {
"200:1": {
"rt_type": "import",
"route_target": "200:1"
},
"400:1": {
"rt_type": "import",
"route_target": "400:1"
},
"300:1": {
"rt_type": "export",
"route_target": "300:1"
},
"100:1": {
"rt_type": "export",
"route_target": "100:1"
}
},
"flags": "0x100",
"vrf_label": {
'allocation_mode': 'per-prefix'
}
}
},
"flags": "0x180C",
"cli_format": "New",
"support_af": "multiple address-families",
"route_distinguisher": "100:1",
"vrf_id": 1
}
}
golden_output = {'execute.return_value': '''
VRF VRF1 (VRF Id = 1); default RD 100:1; default VPNID <not set>
New CLI format, supports multiple address-families
Flags: 0x180C
Interfaces:
Gi0/0
Address family ipv4 unicast (Table ID = 0x1):
Flags: 0x2100
Export VPN route-target communities
RT:100:1 RT:200:1
Import VPN route-target communities
RT:100:1 RT:200:1
Import route-map for ipv4 unicast: import_from_global_map (prefix limit: 1000)
Global export route-map for ipv4 unicast: export_to_global_map (prefix limit: 1000)
No export route-map
Route warning limit 10000, current count 0
VRF label distribution protocol: not configured
VRF label allocation mode: per-prefix
Address family ipv6 unicast (Table ID = 0x1E000001):
Flags: 0x100
Export VPN route-target communities
RT:100:1 RT:300:1
Import VPN route-target communities
RT:200:1 RT:400:1
No import route-map
Global export route-map for ipv6 unicast: export_to_global_map (prefix limit: 1000)
No export route-map
Route limit 10000, warning limit 70% (7000), current count 1
VRF label distribution protocol: not configured
VRF label allocation mode: per-prefix
Address family ipv4 multicast not active
VRF Mgmt-vrf (VRF Id = 1); default RD <not set>; default VPNID <not set>
New CLI format, supports multiple address-families
Flags: 0x1808
Interfaces:
Gi0/0
Address family ipv4 unicast (Table ID = 0x1):
Flags: 0x0
No Export VPN route-target communities
No Import VPN route-target communities
No import route-map
No global export route-map
No export route-map
VRF label distribution protocol: not configured
VRF label allocation mode: per-prefix
Address family ipv6 unicast (Table ID = 0x1E000001):
Flags: 0x0
No Export VPN route-target communities
No Import VPN route-target communities
No import route-map
No global export route-map
No export route-map
VRF label distribution protocol: not configured
VRF label allocation mode: per-prefix
Address family ipv4 multicast not active
Address family ipv6 multicast not active
'''}
golden_parsed_output1 = {
'Mgmt-intf': {
'vrf_id': 1,
'flags': '0x1808',
"cli_format": "New",
"support_af": "multiple address-families",
'interface': {
'GigabitEthernet1': {
'vrf': 'Mgmt-intf',
},
},
'interfaces': ['GigabitEthernet1'],
'address_family': {
'ipv4 unicast': {
'flags': '0x0',
'table_id': '0x1',
'vrf_label': {
'allocation_mode': 'per-prefix',
}
}
}
}
}
golden_output1 = {'execute.return_value': '''
VRF Mgmt-intf (VRF Id = 1); default RD <not set>; default VPNID <not set>
New CLI format, supports multiple address-families
Flags: 0x1808
Interfaces:
Gi1
Address family ipv4 unicast (Table ID = 0x1):
Flags: 0x0
No Export VPN route-target communities
No Import VPN route-target communities
No import route-map
No global export route-map
No export route-map
VRF label distribution protocol: not configured
VRF label allocation mode: per-prefix
'''
}
golden_parsed_output2 = {
"GENIE": {
"address_family": {
"ipv4 unicast": {
"flags": "0x0",
"route_targets": {
"65109:1": {
"route_target": "65109:1",
"rt_type": "export"
},
"65109:110": {
"route_target": "65109:110",
"rt_type": "both"
},
"65109:4094": {
"route_target": "65109:4094",
"rt_type": "import"
}
},
"table_id": "0x11",
"vrf_label": {
"allocation_mode": "per-prefix"
}
}
},
"cli_format": "New",
"description": "VPN for GENIE parser",
"flags": "0x180C",
"interface": {
"GigabitEthernet0/0/0.110": {
"vrf": "GENIE"
},
"TenGigabitEthernet0/1/2.1042": {
"vrf": "GENIE"
},
"vasileft110": {
"vrf": "GENIE"
}
},
"interfaces": [
"GigabitEthernet0/0/0.110",
"TenGigabitEthernet0/1/2.1042",
"vasileft110"
],
"route_distinguisher": "65109:110",
"support_af": "multiple address-families",
"vrf_id": 17,
}
}
golden_output2 = {'execute.return_value': '''
VRF GENIE (VRF Id = 17); default RD 65109:110; default VPNID <not set>
Description: VPN for GENIE parser
New CLI format, supports multiple address-families
Flags: 0x180C
Interfaces:
Gi0/0/0.110 Te0/1/2.1042 vl110
Address family ipv4 unicast (Table ID = 0x11):
Flags: 0x0
Export VPN route-target communities
RT:65109:1 RT:65109:110
Import VPN route-target communities
RT:65109:4094 RT:65109:110
No import route-map
No global export route-map
No export route-map
VRF label distribution protocol: not configured
VRF label allocation mode: per-prefix
Address family ipv6 unicast not active
Address family ipv4 multicast not active
Address family ipv6 multicast not active
'''}
golden_output_3 = {'execute.return_value': '''
VRF GENIE-BACKUP (VRF Id = 12); default RD 50998:106; default VPNID <not set>
Description: VPN for CHRH (Backup network)
New CLI format, supports multiple address-families
Flags: 0x180C
Interfaces:
BD106
Address family ipv4 unicast (Table ID = 0xC):
Flags: 0x0
Export VPN route-target communities
RT:50998:1 RT:50998:106
Import VPN route-target communities
RT:50998:106 RT:50998:4094
No import route-map
No global export route-map
No export route-map
VRF label distribution protocol: not configured
VRF label allocation mode: per-prefix
Address family ipv6 unicast not active
Address family ipv4 multicast not active
Address family ipv6 multicast not active
VRF GENIE-LAB (VRF Id = 76); default RD 50998:11; default VPNID <not set>
Description: VPN for Internet Direct Link Out (Internal FW)
New CLI format, supports multiple address-families
Flags: 0x180C
Interfaces:
Te0/1/1.11 vr92 vr110
Address family ipv4 unicast (Table ID = 0x4C):
Flags: 0x0
Export VPN route-target communities
RT:50998:11
Import VPN route-target communities
RT:50998:11
No import route-map
No global export route-map
No export route-map
VRF label distribution protocol: not configured
VRF label allocation mode: per-prefix
Address family ipv6 unicast (Table ID = 0x1E000003):
Flags: 0x0
Export VPN route-target communities
RT:50998:11
Import VPN route-target communities
RT:50998:11
No import route-map
No global export route-map
No export route-map
VRF label distribution protocol: not configured
VRF label allocation mode: per-prefix
Address family ipv4 multicast not active
Address family ipv6 multicast not | |
radius = 0.0
transforms = []
for i, bone in enumerate(bones):
loc, quat, scale, mat = frame[i]
if bone.parent:
mat *= transforms[bone.parent.index]
transforms.append(mat)
for i, mat in enumerate(transforms):
transforms[i] = invbase[i] * mat
for mesh in meshes:
for v in mesh.verts:
pos = Blender.Mathutils.Vector(0.0, 0.0, 0.0)
for (weight, bone) in v.weights:
if weight > 0:
pos += (v.coord * transforms[bone]) * (weight / 255.0)
if bbmin:
bbmin.x = min(bbmin.x, pos.x)
bbmin.y = min(bbmin.y, pos.y)
bbmin.z = min(bbmin.z, pos.z)
bbmax.x = max(bbmax.x, pos.x)
bbmax.y = max(bbmax.y, pos.y)
bbmax.z = max(bbmax.z, pos.z)
else:
bbmin = pos.copy()
bbmax = pos.copy()
pradius = pos.x*pos.x + pos.y*pos.y
if pradius > xyradius:
xyradius = pradius
pradius += pos.z*pos.z
if pradius > radius:
radius = pradius
if bbmin:
xyradius = math.sqrt(xyradius)
radius = math.sqrt(radius)
else:
bbmin = bbmax = Blender.Mathutils.Vector(0.0, 0.0, 0.0)
return IQM_BOUNDS.pack(bbmin.x, bbmin.y, bbmin.z, bbmax.x, bbmax.y, bbmax.z, xyradius, radius)
def boundsData(self, bones, meshes):
invbase = []
for bone in bones:
invbase.append(bone.matrix.copy().invert())
data = ''
for i, frame in enumerate(self.frames):
print "Calculating bounding box for %s:%d" % (self.name, i)
data += self.frameBoundsData(bones, meshes, frame, invbase)
return data
class IQMFile:
def __init__(self):
self.textoffsets = {}
self.textdata = ''
self.meshes = []
self.meshdata = []
self.numverts = 0
self.numtris = 0
self.joints = []
self.jointdata = []
self.numframes = 0
self.framesize = 0
self.anims = []
self.posedata = []
self.animdata = []
self.framedata = []
self.vertdata = []
def addText(self, str):
if not self.textdata:
self.textdata += '\x00'
self.textoffsets[''] = 0
try:
return self.textoffsets[str]
except:
offset = len(self.textdata)
self.textoffsets[str] = offset
self.textdata += str + '\x00'
return offset
def addJoints(self, bones):
for bone in bones:
self.joints.append(bone)
if self.meshes:
self.jointdata.append(bone.jointData(self))
def addMeshes(self, meshes):
self.meshes += meshes
for mesh in meshes:
mesh.firstvert = self.numverts
mesh.firsttri = self.numtris
self.meshdata.append(mesh.meshData(self))
self.numverts += len(mesh.verts)
self.numtris += len(mesh.tris)
def addAnims(self, anims):
self.anims += anims
for anim in anims:
anim.firstframe = self.numframes
self.animdata.append(anim.animData(self))
self.numframes += len(anim.frames)
def calcFrameSize(self):
for anim in self.anims:
anim.calcFrameLimits(self.joints)
self.framesize = 0
for joint in self.joints:
self.framesize += joint.calcChannelMask()
for joint in self.joints:
if self.anims:
self.posedata.append(joint.poseData(self))
print 'Exporting %d frames of size %d' % (self.numframes, self.framesize)
def writeVerts(self, file, offset):
if self.numverts <= 0:
return
file.write(IQM_VERTEXARRAY.pack(IQM_POSITION, 0, IQM_FLOAT, 3, offset))
offset += self.numverts * struct.calcsize('<3f')
file.write(IQM_VERTEXARRAY.pack(IQM_TEXCOORD, 0, IQM_FLOAT, 2, offset))
offset += self.numverts * struct.calcsize('<2f')
file.write(IQM_VERTEXARRAY.pack(IQM_NORMAL, 0, IQM_FLOAT, 3, offset))
offset += self.numverts * struct.calcsize('<3f')
file.write(IQM_VERTEXARRAY.pack(IQM_TANGENT, 0, IQM_FLOAT, 4, offset))
offset += self.numverts * struct.calcsize('<4f')
if self.joints:
file.write(IQM_VERTEXARRAY.pack(IQM_BLENDINDEXES, 0, IQM_UBYTE, 4, offset))
offset += self.numverts * struct.calcsize('<4B')
file.write(IQM_VERTEXARRAY.pack(IQM_BLENDWEIGHTS, 0, IQM_UBYTE, 4, offset))
offset += self.numverts * struct.calcsize('<4B')
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<3f', *v.coord))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<2f', *v.uv))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<3f', *v.normal))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<4f', v.tangent.x, v.tangent.y, v.tangent.z, v.bitangent))
if self.joints:
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<4B', v.weights[0][1], v.weights[1][1], v.weights[2][1], v.weights[3][1]))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<4B', v.weights[0][0], v.weights[1][0], v.weights[2][0], v.weights[3][0]))
def calcNeighbors(self):
edges = {}
for mesh in self.meshes:
for i, (v0, v1, v2) in enumerate(mesh.tris):
e0 = v0.neighborKey(v1)
e1 = v1.neighborKey(v2)
e2 = v2.neighborKey(v0)
tri = mesh.firsttri + i
try: edges[e0].append(tri)
except: edges[e0] = [tri]
try: edges[e1].append(tri)
except: edges[e1] = [tri]
try: edges[e2].append(tri)
except: edges[e2] = [tri]
neighbors = []
for mesh in self.meshes:
for i, (v0, v1, v2) in enumerate(mesh.tris):
e0 = edges[v0.neighborKey(v1)]
e1 = edges[v1.neighborKey(v2)]
e2 = edges[v2.neighborKey(v0)]
tri = mesh.firsttri + i
match0 = match1 = match2 = -1
if len(e0) == 2: match0 = e0[e0.index(tri)^1]
if len(e1) == 2: match1 = e1[e1.index(tri)^1]
if len(e2) == 2: match2 = e2[e2.index(tri)^1]
neighbors.append((match0, match1, match2))
self.neighbors = neighbors
def writeTris(self, file):
for mesh in self.meshes:
for (v0, v1, v2) in mesh.tris:
file.write(struct.pack('<3I', v0.index + mesh.firstvert, v1.index + mesh.firstvert, v2.index + mesh.firstvert))
for (n0, n1, n2) in self.neighbors:
if n0 < 0: n0 = 0xFFFFFFFF
if n1 < 0: n1 = 0xFFFFFFFF
if n2 < 0: n2 = 0xFFFFFFFF
file.write(struct.pack('<3I', n0, n1, n2))
def export(self, file, usebbox = True):
self.filesize = IQM_HEADER.size
if self.textdata:
while len(self.textdata) % 4:
self.textdata += '\x00'
ofs_text = self.filesize
self.filesize += len(self.textdata)
else:
ofs_text = 0
if self.meshdata:
ofs_meshes = self.filesize
self.filesize += len(self.meshdata) * IQM_MESH.size
else:
ofs_meshes = 0
if self.numverts > 0:
ofs_vertexarrays = self.filesize
num_vertexarrays = 4
if self.joints:
num_vertexarrays += 2
self.filesize += num_vertexarrays * IQM_VERTEXARRAY.size
ofs_vdata = self.filesize
self.filesize += self.numverts * struct.calcsize('<3f2f3f4f')
if self.joints:
self.filesize += self.numverts * struct.calcsize('<4B4B')
else:
ofs_vertexarrays = 0
num_vertexarrays = 0
ofs_vdata = 0
if self.numtris > 0:
ofs_triangles = self.filesize
self.filesize += self.numtris * IQM_TRIANGLE.size
ofs_neighbors = self.filesize
self.filesize += self.numtris * IQM_TRIANGLE.size
else:
ofs_triangles = 0
ofs_neighbors = 0
if self.jointdata:
ofs_joints = self.filesize
self.filesize += len(self.jointdata) * IQM_JOINT.size
else:
ofs_joints = 0
if self.posedata:
ofs_poses = self.filesize
self.filesize += len(self.posedata) * IQM_POSE.size
else:
ofs_poses = 0
if self.animdata:
ofs_anims = self.filesize
self.filesize += len(self.animdata) * IQM_ANIMATION.size
else:
ofs_anims = 0
falign = 0
if self.framesize * self.numframes > 0:
ofs_frames = self.filesize
self.filesize += self.framesize * self.numframes * struct.calcsize('<H')
falign = (4 - (self.filesize % 4)) % 4
self.filesize += falign
else:
ofs_frames = 0
if usebbox and self.numverts > 0 and self.numframes > 0:
ofs_bounds = self.filesize
self.filesize += self.numframes * IQM_BOUNDS.size
else:
ofs_bounds = 0
file.write(IQM_HEADER.pack('INTERQUAKEMODEL', 2, self.filesize, 0, len(self.textdata), ofs_text, len(self.meshdata), ofs_meshes, num_vertexarrays, self.numverts, ofs_vertexarrays, self.numtris, ofs_triangles, ofs_neighbors, len(self.jointdata), ofs_joints, len(self.posedata), ofs_poses, len(self.animdata), ofs_anims, self.numframes, self.framesize, ofs_frames, ofs_bounds, 0, 0, 0, 0))
file.write(self.textdata)
for mesh in self.meshdata:
file.write(IQM_MESH.pack(*mesh))
self.writeVerts(file, ofs_vdata)
self.writeTris(file)
for joint in self.jointdata:
file.write(IQM_JOINT.pack(*joint))
for pose in self.posedata:
file.write(IQM_POSE.pack(*pose))
for anim in self.animdata:
file.write(IQM_ANIMATION.pack(*anim))
for anim in self.anims:
file.write(anim.frameData(self.joints))
file.write('\x00' * falign)
if usebbox and self.numverts > 0 and self.numframes > 0:
for anim in self.anims:
file.write(anim.boundsData(self.joints, self.meshes))
def findArmature():
armature = None
for obj in Blender.Object.GetSelected():
data = obj.getData()
if type(data) is Blender.Types.ArmatureType:
armature = obj
return armature
def collectBones(armature, scale):
data = armature.getData()
bones = {}
matrix = armature.getMatrix('worldspace')
worklist = [ bone for bone in data.bones.values() if not bone.parent ]
for index, bone in enumerate(worklist):
bmatrix = bone.matrix['ARMATURESPACE'] * matrix
if scale != 1.0:
bmatrix[3][0] *= scale
bmatrix[3][1] *= scale
bmatrix[3][2] *= scale
bones[bone.name] = Bone(bone.name, index, bone.parent and bones.get(bone.parent.name), bmatrix)
for child in bone.children:
if child not in worklist:
worklist.append(child)
print 'Collected %d bones' % len(worklist)
return bones
def collectAnim(armature, scale, bones, action, startframe = None, endframe = None):
if not startframe or not endframe:
frames = action.getFrameNumbers()
if not startframe:
startframe = min(frames)
if not endframe:
endframe = max(frames)
print 'Exporting action "%s" frames %d-%d' % (action.getName(), startframe, endframe)
scene = Blender.Scene.GetCurrent()
context = scene.getRenderingContext()
worldmatrix = armature.getMatrix('worldspace')
action.setActive(armature)
outdata = []
for time in xrange(startframe, endframe+1):
context.currentFrame(int(time))
scene.makeCurrent()
Blender.Set('curframe', time)
Blender.Window.Redraw()
pose = armature.getPose()
outframe = []
for bone in bones:
posematrix = pose.bones[bone.name].poseMatrix
if bone.parent:
posematrix *= pose.bones[bone.parent.name].poseMatrix.copy().invert()
else:
posematrix *= worldmatrix
if scale != 1.0:
posematrix[3][0] *= scale
posematrix[3][1] *= scale
posematrix[3][2] *= scale
loc = posematrix.translationPart()
quat = posematrix.toQuat().normalize()
if quat.w > 0:
quat.negate()
pscale = posematrix.scalePart()
pscale.x = round(pscale.x*0x10000)/0x10000
pscale.y = round(pscale.y*0x10000)/0x10000
pscale.z = round(pscale.z*0x10000)/0x10000
outframe.append((loc, quat, pscale, posematrix))
outdata.append(outframe)
return outdata
def collectAnims(armature, scale, bones, animspecs):
actions = Blender.Armature.NLA.GetActions()
animspecs = map(lambda spec: spec.strip(), animspecs.split(','))
anims = []
for animspec in animspecs:
animspec = map(lambda arg: arg.strip(), animspec.split(':'))
animname = animspec[0]
if animname not in actions:
print 'Action "%s" not found in current armature' % animname
continue
try:
startframe = int(animspec[1])
except:
startframe = None
try:
endframe = int(animspec[2])
except:
endframe = None
try:
fps = float(animspec[3])
except:
fps = 0.0
try:
flags = int(animspec[4])
except:
flags = 0
framedata = collectAnim(armature, scale, bones, actions[animname], startframe, endframe)
anims.append(Animation(animname, framedata, fps, flags))
return anims
def collectMeshes(bones, scale, useskel = True, filetype = 'IQM'):
vertwarn = []
meshes = []
for obj in Blender.Object.GetSelected():
data = | |
# -*- coding: utf-8 -*-
"""
This module defines :class:`DataObject`, the abstract base class
used by all :module:`neo.core` classes that can contain data (i.e. are not container classes).
It contains basic functionality that is shared among all those data objects.
"""
from copy import deepcopy
import warnings
import quantities as pq
import numpy as np
from neo.core.baseneo import BaseNeo, _check_annotations
def _normalize_array_annotations(value, length):
"""Check consistency of array annotations
Recursively check that value is either an array or list containing only "simple" types
(number, string, date/time) or is a dict of those.
Args:
:value: (np.ndarray, list or dict) value to be checked for consistency
:length: (int) required length of the array annotation
Returns:
np.ndarray The array_annotations from value in correct form
Raises:
ValueError: In case value is not accepted as array_annotation(s)
"""
# First stage, resolve dict of annotations into single annotations
if isinstance(value, dict):
for key in value.keys():
if isinstance(value[key], dict):
raise ValueError("Nested dicts are not allowed as array annotations")
value[key] = _normalize_array_annotations(value[key], length)
elif value is None:
raise ValueError("Array annotations must not be None")
# If not array annotation, pass on to regular check and make it a list, that is checked again
# This covers array annotations with length 1
elif not isinstance(value, (list, np.ndarray)) or (
isinstance(value, pq.Quantity) and value.shape == ()):
_check_annotations(value)
value = _normalize_array_annotations(np.array([value]), length)
# If array annotation, check for correct length, only single dimension and allowed data
else:
# Get length that is required for array annotations, which is equal to the length
# of the object's data
own_length = length
# Escape check if empty array or list and just annotate an empty array (length 0)
# This enables the user to easily create dummy array annotations that will be filled
# with data later on
if len(value) == 0:
if not isinstance(value, np.ndarray):
value = np.ndarray((0,))
val_length = own_length
else:
# Note: len(o) also works for np.ndarray, it then uses the first dimension,
# which is exactly the desired behaviour here
val_length = len(value)
if not own_length == val_length:
raise ValueError(
"Incorrect length of array annotation: {} != {}".format(val_length, own_length))
# Local function used to check single elements of a list or an array
# They must not be lists or arrays and fit the usual annotation data types
def _check_single_elem(element):
# Nested array annotations not allowed currently
# If element is a list or a np.ndarray, it's not conform except if it's a quantity of
# length 1
if isinstance(element, list) or (isinstance(element, np.ndarray) and not (
isinstance(element, pq.Quantity) and (
element.shape == () or element.shape == (1,)))):
raise ValueError("Array annotations should only be 1-dimensional")
if isinstance(element, dict):
raise ValueError("Dictionaries are not supported as array annotations")
# Perform regular check for elements of array or list
_check_annotations(element)
# Arrays only need testing of single element to make sure the others are the same
if isinstance(value, np.ndarray):
# Type of first element is representative for all others
# Thus just performing a check on the first element is enough
# Even if it's a pq.Quantity, which can be scalar or array, this is still true
# Because a np.ndarray cannot contain scalars and sequences simultaneously
# If length of data is 0, then nothing needs to be checked
if len(value):
# Perform check on first element
_check_single_elem(value[0])
return value
# In case of list, it needs to be ensured that all data are of the same type
else:
# Conversion to numpy array makes all elements same type
# Converts elements to most general type
try:
value = np.array(value)
# Except when scalar and non-scalar values are mixed, this causes conversion to fail
except ValueError as e:
msg = str(e)
if "setting an array element with a sequence." in msg:
raise ValueError("Scalar values and arrays/lists cannot be "
"combined into a single array annotation")
else:
raise e
# If most specialized data type that possibly fits all elements is object,
# raise an Error with a telling error message, because this means the elements
# are not compatible
if value.dtype == object:
raise ValueError("Cannot convert list of incompatible types into a single"
" array annotation")
# Check the first element for correctness
# If its type is correct for annotations, all others are correct as well
# Note: Emtpy lists cannot reach this point
_check_single_elem(value[0])
return value
class DataObject(BaseNeo, pq.Quantity):
'''
This is the base class from which all objects containing data inherit
It contains common functionality for all those objects and handles array_annotations.
Common functionality that is not included in BaseNeo includes:
- duplicating with new data
- rescaling the object
- copying the object
- returning it as pq.Quantity or np.ndarray
- handling of array_annotations
Array_annotations are a kind of annotation that contains metadata for every data point,
i.e. per timestamp (in SpikeTrain, Event and Epoch) or signal channel (in AnalogSignal
and IrregularlySampledSignal).
They can contain the same data types as regular annotations, but are always represented
as numpy arrays of the same length as the number of data points of the annotated neo object.
Args:
name (str, optional): Name of the Neo object
description (str, optional): Human readable string description of the Neo object
file_origin (str, optional): Origin of the data contained in this Neo object
array_annotations (dict, optional): Dictionary containing arrays / lists which annotate
individual data points of the Neo object.
kwargs: regular annotations stored in a separate annotation dictionary
'''
def __init__(self, name=None, description=None, file_origin=None, array_annotations=None,
**annotations):
"""
This method is called by each data object and initializes the newly created object by
adding array annotations and calling __init__ of the super class, where more annotations
and attributes are processed.
"""
if not hasattr(self, 'array_annotations') or not self.array_annotations:
self.array_annotations = ArrayDict(self._get_arr_ann_length())
if array_annotations is not None:
self.array_annotate(**array_annotations)
BaseNeo.__init__(self, name=name, description=description, file_origin=file_origin,
**annotations)
def array_annotate(self, **array_annotations):
"""
Add array annotations (annotations for individual data points) as arrays to a Neo data
object.
Example:
>>> obj.array_annotate(code=['a', 'b', 'a'], category=[2, 1, 1])
>>> obj.array_annotations['code'][1]
'b'
"""
self.array_annotations.update(array_annotations)
def array_annotations_at_index(self, index):
"""
Return dictionary of array annotations at a given index or list of indices
:param index: int, list, numpy array: The index (indices) from which the annotations
are extracted
:return: dictionary of values or numpy arrays containing all array annotations
for given index/indices
Example:
>>> obj.array_annotate(code=['a', 'b', 'a'], category=[2, 1, 1])
>>> obj.array_annotations_at_index(1)
{code='b', category=1}
"""
# Taking only a part of the array annotations
# Thus not using ArrayDict here, because checks for length are not needed
index_annotations = {}
# Use what is given as an index to determine the corresponding annotations,
# if not possible, numpy raises an Error
for ann in self.array_annotations.keys():
# NO deepcopy, because someone might want to alter the actual object using this
try:
index_annotations[ann] = self.array_annotations[ann][index]
except IndexError as e:
# IndexError caused by 'dummy' array annotations should not result in failure
# Taking a slice from nothing results in nothing
if len(self.array_annotations[ann]) == 0 and not self._get_arr_ann_length() == 0:
index_annotations[ann] = self.array_annotations[ann]
else:
raise e
return index_annotations
def _merge_array_annotations(self, other):
'''
Merges array annotations of 2 different objects.
The merge happens in such a way that the result fits the merged data
In general this means concatenating the arrays from the 2 objects.
If an annotation is only present in one of the objects, it will be omitted
:return Merged array_annotations
'''
merged_array_annotations = {}
omitted_keys_self = []
# Concatenating arrays for each key
for key in self.array_annotations:
try:
value = deepcopy(self.array_annotations[key])
other_value = deepcopy(other.array_annotations[key])
# Quantities need to be rescaled to common unit
if isinstance(value, pq.Quantity):
try:
other_value = other_value.rescale(value.units)
except ValueError:
raise ValueError("Could not merge array annotations "
"due to different units")
merged_array_annotations[key] = np.append(value, other_value) * value.units
else:
merged_array_annotations[key] = np.append(value, other_value)
except KeyError:
# Save the omitted keys to be able to print them
omitted_keys_self.append(key)
continue
# | |
<filename>definitions_exercises.py
# -*- coding: utf-8 -*-
"""
Created on Sat May 04 14:20:51 2013
@author: VHOEYS
"""
#Load packages for calculation and making plots
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
#IMPORT NORMAL DISTRIBUTION:
from scipy.stats import norm
from matplotlib import cm, colors
import matplotlib.axes as maxes
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.axes_grid import make_axes_locatable
#------------------------------------------------------------------------------
#DEFINITIONS TO RUN THE MODEL--------------------------------------------------
def deriv_works(u,t,Pars,Const):
'''
Differential equations of the respirometric model
'''
#Define the parameters
mumax = np.float64(Pars[0])
Y = np.float64(Pars[1])
Ks = np.float64(Pars[2])
tau = np.float64(Pars[3])
## print 'mumax is %f ;Y is %f; Ks is %f and tau is %f' %(mumax,Y,Ks,tau)
b = np.float64(Const[0])
kla = np.float64(Const[1])
SOeq = np.float64(Const[2])
## print ' de kla is %f en b is %f en SOeq is %f' %(kla,b,SOeq)
Monod=mumax*(u[1])/(u[1]+Ks) #Monod Kinetic
Expo=1.0-np.exp(-t/tau) #Helpfunction
dXdt = (Expo*Monod-b)*u[0] #Biomassa
dSsdt = -(1.0/Y)*Expo*Monod*u[0] #Substraat
dOdt = kla*(SOeq-u[2])-((1-Y)/Y)*Expo*Monod*u[0] #Oxygen
return np.array([dXdt,dSsdt,dOdt]) #
def RespiroModel(Pars,Init_unc,time):
'''
Run the respirometric model
'''
#Define the constants
b = 0.62
kla = 369.7334962
SOeq = 8.4
Constt = np.array([b,kla,SOeq])
#Define the initial conditions (Constants)Ss0
Ss0 = 58.4899
#Define the initial conditions (Uncertain) -> X0
X0=Init_unc[0]
yinit = np.array([X0,Ss0,SOeq])
#Define the necessary parameters
mumax = np.float64(Pars[0])
Y = np.float64(Pars[1])
Ks = np.float64(Pars[2])
tau = np.float64(Pars[3])
#Solve with LSODA scheme
y,infodic=odeint(deriv_works,yinit,time,full_output=True, printmessg=False, args=(Pars,Constt))
#Get outputs
X = y[:,0]
Ss = y[:,1]
O = y[:,2]
OUR_ex=((1-np.exp(-time/tau))*mumax*(1-Y)/Y*Ss/(Ss+Ks)*X)/(24*60)
#when using this deifnition, we get the time, biomass X, substrate S, oxygen O and Oxygen uptake rate OUR_ex back,
#together with some information about the model
return [time,X,Ss,O,OUR_ex,infodic]
#END OF DEFINITIONS TO RUN THE MODEL-------------------------------------------
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#DEFINITIONS TO RUN THE SAMPLING-----------------------------------------------
def segments_part(nsegments):
'''lhs help function
Function used in our lhs-sampling procedures
Parameters
-----------
nsegments : int
The number of samples to take
Returns
--------
samples : narray
An array with the different samples taken (in each segment between [0,1])
'''
#SAMPLING UNIFORM IN SEGMENTS----------------------------------------------
#prepare the samples
segments = np.linspace(0,1.,nsegments+1)
#we now sample as we would sample in a uniform distribution in each segment:
samples=np.zeros(nsegments) #here we will store our sampled values
for i in range(nsegments):
samples[i]=np.random.uniform(segments[i],segments[i+1],1)
#--------------------------------------------------------------------------
#we shuffle the values for higher order purposes---------------------------
np.random.shuffle(samples)
#--------------------------------------------------------------------------
return samples
#function for Latin-hypercube sampling of uniform distribution
def lhs_uniform(nsamples,pmin,pmax):
'''lhs of uniform distribution
'''
#Run the help function to get the samples from the segments
unif_samples = segments_part(nsamples)
#transform into uniform samples of the requested interval [pmin, pmax]
lhs_samples = pmin + unif_samples* (pmax-pmin)
return lhs_samples
#function for Latin-hypercube sampling of normal distribution
def lhs_normal(nsamples, ploc, pscale):
'''lhs of normal distribution
'''
#Run the help function to get the samples from the segments
unif_samples = segments_part(nsamples)
#transform into random samples with requested mean (ploc) and std (pscale)
norm_samples = norm.ppf(unif_samples, ploc, pscale)
return norm_samples
#END OF DEFINITIONS TO RUN THE SAMPLING----------------------------------------
#------------------------------------------------------------------------------
#RIVER MODEL
#------------------------------------------------------------------------------
#DEFINITIONS TO RUN THE MODEL--------------------------------------------------
def deriv_river(u,t,Pars, dummy):
'''
Differential equations of the river contamination model
'''
#Define the parameters
k1 = np.float64(Pars[0])
k2 = np.float64(Pars[1])
BZVin = np.float64(Pars[2])
DOsat = np.float64(Pars[3])
dBZVdt = BZVin - k1*u[0] #BZV
dDOdt = k2 *(DOsat - u[1])-k1*u[0] #DO
return np.array([dBZVdt,dDOdt])
def RiverModel(Pars,time):
'''
Run the river contamination model
'''
#Define the initial conditions (Constants)
BZV0 = 7.33
DO0 = 8.5
yinit = np.array([BZV0, DO0])
#Solve with LSODA scheme
y,infodic=odeint(deriv_river,yinit,time,full_output=True, printmessg=False, args=(Pars,[]))
#Get outputs
BZV = y[:,0]
DO = y[:,1]
return [time,BZV,DO,infodic]
#END OF DEFINITIONS TO RUN THE MODEL-------------------------------------------
#------------------------------------------------------------------------------
#GLUE VISUALISATION
def Scatter_hist(data1, data2, data1b=False, data2b=False, xbinwidth = 0.5,
ybinwidth=0.5, SSE=None, SSEb=None, vmax=1000., colormaps=cm.YlOrBr,
cleanstyle = False, roodlichter=0.5, *args, **kwargs):
'''
Three-parts plot with the two parameter sitdirbutions plotted on the sides
Parameters
-----------
data1: ndarray
dataset 1 for x-axis
data2: ndarray
dataset 2 for y-axis
data1b: ndarray
dataset to plot along the data 1 set
data2b: ndarray
dataset to plot along the data 2 set
binwidth: float
defines the width of the bins relative to the data used
cleanstyle: bool True|False
if True, a more minimalistic version of the plot is given
*args, **kwargs: args
arguments given toi the scatter plot
example: s=15, marker='o', edgecolors= 'k',facecolor = 'white'
Returns
---------
fig: matplotlib.figure.Figure object
the resulting figure
axScatter: matplotlib.axes.AxesSubplot object
the scatter plot with the datapoints, can be used to add labels or
change the current ticks settings
axHistx: matplotlib.axes.AxesSubplot object
the x-axis histogram
axHisty: matplotlib.axes.AxesSubplot object
the y-axis histogram
Examples
----------
>>> nMC = 1000
>>> parval1 = np.random.gamma(5.5,size=nMC)
>>> parval2 = np.random.gamma(8.0,size=nMC)
>>> parnames = ['par1','par2']
>>> fig,axScatter,axHistx,axHisty = Scatter_hist(parval1,parval2,
cleanstyle = True, s=48,
marker='o', edgecolors= 'k',
facecolor = 'none',alpha=0.7)
>>> parval1b = np.random.uniform(low=0.0, high=30.0,size=nMC)
>>> parval2b = np.random.uniform(low=0.0, high=30.0,size=nMC)
>>> fig,axScatter,axHistx,axHisty = Scatter_hist(parval1,parval2,parval1b,
parval2b, cleanstyle = True,
s=48, marker='o',
edgecolors= 'k',
facecolor = 'none',
alpha=0.7)
Notes
------
Typical application is to check the dependency of two posterior
parameter distrbutions, eventually compared with their selected posteriors
If a second dataset is added to compare, the style options of the scatter
plot are fixed and the *args, **kwargs have no influence
'''
if not isinstance(data1, np.ndarray):
raise Exception('dataset 1 need to be numpy ndarray')
if not isinstance(data2, np.ndarray):
raise Exception('dataset 2 need to be numpy ndarray')
if isinstance(data1b, np.ndarray):
if not isinstance(data2b, np.ndarray):
raise Exception('Always combine the data of both')
if isinstance(data2b, np.ndarray):
if not isinstance(data1b, np.ndarray):
raise Exception('Always combine the data of both')
fig = plt.figure(figsize=(10,10))
axScatter = plt.subplot(111)
divider = make_axes_locatable(axScatter)
#axScatter.set_aspect('equal')
axScatter.set_autoscale_on(True)
# create a new axes with above the axScatter
axHistx = divider.new_vertical(1.5, pad=0.0001, sharex=axScatter)
# create a new axes on the right side of the
# axScatter
axHisty = divider.new_horizontal(1.5, pad=0.0001, sharey=axScatter)
fig.add_axes(axHistx)
fig.add_axes(axHisty)
# now determine nice limits by hand:
# binwidth = binwidth
xmin = np.min(data1)
xmax = np.max(data1)
ymin = np.min(data2)
ymax = np.max(data2)
#xymax = np.max( [np.max(np.fabs(data1)), np.max(np.fabs(data2))] )
#lim = (int(xymax/binwidth) + 1) * binwidth
binsx = np.arange(xmin, xmax + xbinwidth, xbinwidth)
binsy = np.arange(ymin, ymax + ybinwidth, ybinwidth)
#bins = np.arange(-lim, lim + binwidth, binwidth)
# the scatter plot:
if isinstance(data1b, np.ndarray): #TWO DATA ENTRIES
if SSE == None:
print '*args, **kwargs do not have any influcence when using two\
options'
axScatter.scatter(data1, data2, facecolor = 'none',
edgecolor='k',s=25)
axScatter.scatter(data1b, data2b, facecolor='none',
edgecolor='grey',s=25)
xminb = np.min(data1b)
xmaxb = np.max(data1b)
yminb = np.min(data2b)
ymaxb = np.max(data2b)
binsxb = np.arange(xminb, xmaxb + xbinwidth, xbinwidth)
binsyb = np.arange(yminb, ymaxb + ybinwidth, ybinwidth)
axHistx.hist(data1b, bins=binsxb, edgecolor='None',
color='grey',normed=True)
axHisty.hist(data2b, bins=binsyb, orientation='horizontal',
edgecolor='None', color='grey', normed=True)
axHistx.hist(data1, bins=binsx, edgecolor='None',
color='k', normed=True)
axHisty.hist(data2, bins=binsy, orientation='horizontal',
edgecolor='None', color='k', normed=True)
else:
print '*args, **kwargs do not have any influcence when using two\
options'
sc1 = axScatter.scatter(data1b, data2b, c=SSEb, vmax=vmax,alpha=roodlichter,
edgecolors= 'none', cmap = colormaps, *args, **kwargs)
axScatter.scatter(data1, data2, c=SSE, vmax=vmax,
edgecolors= 'none', cmap = colormaps, *args, **kwargs)
xminb = np.min(data1b)
xmaxb = np.max(data1b)
yminb = np.min(data2b)
ymaxb = np.max(data2b)
binsxb = np.arange(xminb, xmaxb + xbinwidth, xbinwidth)
binsyb = np.arange(yminb, ymaxb + ybinwidth, ybinwidth)
axHistx.hist(data1b, bins=binsxb, edgecolor='None',
color=colormaps(1.),normed=True)
axHisty.hist(data2b, bins=binsyb, orientation='horizontal', color=colormaps(1.),
edgecolor='None', normed=True)
axHistx.hist(data1, bins=binsx, edgecolor='None',
color=colormaps(0.), normed=True)
axHisty.hist(data2, bins=binsy, orientation='horizontal',
edgecolor='None', color=colormaps(0.), normed=True)
else: #ONLY ONE DATA1 and DATA2
if SSE == None:
axScatter.scatter(data1, data2, c= 'black', *args, **kwargs)
axHistx.hist(data1, bins=binsx, edgecolor='None', color='k')
axHisty.hist(data2, bins=binsy, orientation='horizontal',
edgecolor='None', color='k')
else:
axScatter.scatter(data1, data2, c=SSE, vmax=vmax,
edgecolors= 'none', cmap = colormaps, *args, **kwargs)
axHistx.hist(data1, bins=binsx, edgecolor='None', color='k')
axHisty.hist(data2, bins=binsy, orientation='horizontal',
edgecolor='None', color='k')
# the xaxis of axHistx and yaxis of axHisty are shared with axScatter,
# thus there is no need to manually adjust the xlim and ylim of these
# axis.
majloc1 = MaxNLocator(nbins=4, prune='lower')
axScatter.yaxis.set_major_locator(majloc1)
majloc2 = MaxNLocator(nbins=4)
axScatter.xaxis.set_major_locator(majloc2)
axScatter.grid(linestyle = 'dashed', color = '0.75',linewidth = 1.)
axScatter.set_axisbelow(True)
axHisty.set_axisbelow(True)
axHistx.set_axisbelow(True)
# The 'clean' environment
if cleanstyle == True:
plt.setp(axHistx.get_xticklabels() | |
_parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_754(self):
inp = '''- 1.0'''
fmt = '''(G3.2E5)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_755(self):
inp = '''1d12'''
fmt = '''(G3.2E5)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_756(self):
inp = '''1D12'''
fmt = '''(G3.2E5)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_757(self):
inp = '''-1 d12'''
fmt = '''(G3.2E5)'''
result = [-1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_758(self):
inp = '''.'''
fmt = '''(G3.2E5)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_759(self):
inp = '''.1'''
fmt = '''(G3.2E5)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_760(self):
inp = '''0.1E+200'''
fmt = '''(G3.2E5)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_761(self):
inp = '''3.'''
fmt = '''(G4.2E5)'''
result = [3.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_762(self):
inp = '''-3.'''
fmt = '''(G4.2E5)'''
result = [-3.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_763(self):
inp = '''10.'''
fmt = '''(G4.2E5)'''
result = [1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_764(self):
inp = '''-10.'''
fmt = '''(G4.2E5)'''
result = [-1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_765(self):
inp = '''100.'''
fmt = '''(G4.2E5)'''
result = [1.0000000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_766(self):
inp = '''-100.'''
fmt = '''(G4.2E5)'''
result = [-1.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_767(self):
inp = '''1000.'''
fmt = '''(G4.2E5)'''
result = [1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_768(self):
inp = '''-1000.'''
fmt = '''(G4.2E5)'''
result = [-1.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_769(self):
inp = '''10000.'''
fmt = '''(G4.2E5)'''
result = [1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_770(self):
inp = '''-10000.'''
fmt = '''(G4.2E5)'''
result = [-1.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_771(self):
inp = '''100000.'''
fmt = '''(G4.2E5)'''
result = [1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_772(self):
inp = '''-100000.'''
fmt = '''(G4.2E5)'''
result = [-1.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_773(self):
inp = '''123456789.'''
fmt = '''(G4.2E5)'''
result = [1.2340000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_774(self):
inp = '''0.1'''
fmt = '''(G4.2E5)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_775(self):
inp = '''-0.1'''
fmt = '''(G4.2E5)'''
result = [-1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_776(self):
inp = '''0.01'''
fmt = '''(G4.2E5)'''
result = [1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_777(self):
inp = '''-0.01'''
fmt = '''(G4.2E5)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_778(self):
inp = '''0.001'''
fmt = '''(G4.2E5)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_779(self):
inp = '''-0.001'''
fmt = '''(G4.2E5)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_780(self):
inp = '''0.0001'''
fmt = '''(G4.2E5)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_781(self):
inp = '''-0.0001'''
fmt = '''(G4.2E5)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_782(self):
inp = '''-1.96e-16'''
fmt = '''(G4.2E5)'''
result = [-1.8999999999999999e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_783(self):
inp = '''3.14159'''
fmt = '''(G4.2E5)'''
result = [3.1400000000000001e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_784(self):
inp = '''- 1.0'''
fmt = '''(G4.2E5)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_785(self):
inp = '''1d12'''
fmt = '''(G4.2E5)'''
result = [1.0000000000000000e+10]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_786(self):
inp = '''1D12'''
fmt = '''(G4.2E5)'''
result = [1.0000000000000000e+10]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_787(self):
inp = '''-1 d12'''
fmt = '''(G4.2E5)'''
result = [-1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_788(self):
inp = '''.'''
fmt = '''(G4.2E5)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_789(self):
inp = '''.1'''
fmt = '''(G4.2E5)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_790(self):
inp = '''0.1E+200'''
fmt = '''(G4.2E5)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_791(self):
inp = '''3.'''
fmt = '''(G5.2E5)'''
result = [3.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_792(self):
inp = '''-3.'''
fmt = '''(G5.2E5)'''
result = [-3.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_793(self):
inp = '''10.'''
fmt = '''(G5.2E5)'''
result = [1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_794(self):
inp = '''-10.'''
fmt = '''(G5.2E5)'''
result = [-1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_795(self):
inp = '''100.'''
fmt = '''(G5.2E5)'''
result = [1.0000000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_796(self):
inp = '''-100.'''
fmt = '''(G5.2E5)'''
result = [-1.0000000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_797(self):
inp = '''1000.'''
fmt = '''(G5.2E5)'''
result = [1.0000000000000000e+03]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_798(self):
inp = '''-1000.'''
fmt = '''(G5.2E5)'''
result = [-1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_799(self):
inp = '''10000.'''
fmt = '''(G5.2E5)'''
result = [1.0000000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_800(self):
inp = '''-10000.'''
fmt = '''(G5.2E5)'''
result = [-1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_801(self):
inp = '''100000.'''
fmt = '''(G5.2E5)'''
result = [1.0000000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_802(self):
inp = '''-100000.'''
fmt = '''(G5.2E5)'''
result = [-1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_803(self):
inp = '''123456789.'''
fmt = '''(G5.2E5)'''
result = [1.2345000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_804(self):
inp = '''0.1'''
fmt = '''(G5.2E5)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_805(self):
inp = '''-0.1'''
fmt = '''(G5.2E5)'''
result = [-1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_806(self):
inp = '''0.01'''
fmt = '''(G5.2E5)'''
result = [1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_807(self):
inp = '''-0.01'''
fmt = '''(G5.2E5)'''
result = [-1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_808(self):
inp = '''0.001'''
fmt = '''(G5.2E5)'''
result = [1.0000000000000000e-03]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_809(self):
inp = '''-0.001'''
fmt = '''(G5.2E5)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_810(self):
inp = '''0.0001'''
fmt = '''(G5.2E5)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='G')
def test_g_ed_input_811(self):
inp = '''-0.0001'''
fmt = '''(G5.2E5)'''
result = [0.0000000000000000e+00]
eds, rev_eds = | |
TEMPLATE_SUBSTITUTIONS
]
PATHLIB_SUBSTITUTIONS = [
field.replace("{", "").replace("}", "") for field in TEMPLATE_SUBSTITUTIONS_PATHLIB
]
MULTI_VALUE_SUBSTITUTIONS = [
field.replace("{", "").replace("}", "")
for field in TEMPLATE_SUBSTITUTIONS_MULTI_VALUED
]
FIELD_NAMES = (
SINGLE_VALUE_SUBSTITUTIONS + MULTI_VALUE_SUBSTITUTIONS + PATHLIB_SUBSTITUTIONS
)
# default values for string manipulation template options
INPLACE_DEFAULT = ","
# PATH_SEP_DEFAULT = os.path.sep
PUNCTUATION = {
"comma": ",",
"semicolon": ";",
"pipe": "|",
"openbrace": "{",
"closebrace": "}",
"openparens": "(",
"closeparens": ")",
"openbracket": "[",
"closebracket": "]",
"questionmark": "?",
"newline": "\n",
"lf": "\n",
"cr": "\r",
"crlf": "\r\n",
}
@dataclass
class RenderOptions:
"""Options for PhotoTemplate.render
tag: tag name being processed
none_str: str to use default for None values
path_sep: optional string to use as path separator, default is os.path.sep
expand_inplace: expand multi-valued substitutions in-place as a single string
instead of returning individual strings
inplace_sep: optional string to use as separator between multi-valued keywords
with expand_inplace; default is ','
filename: if True, template output will be sanitized to produce valid file name
dirname: if True, template output will be sanitized to produce valid directory name
strip: if True, strips leading/trailing whitespace from rendered templates
export_dir: set to the export directory if you want to evalute {export_dir} template
dest_path: set to the destination path of the photo (for use by {function} template), only valid with --filename
filepath: set to value for filepath of the photo being processed if you want to evaluate {filepath} template
quote: quote path templates for execution in the shell
exiftool: an ExifToolCaching instance
"""
tag: Optional[str] = None
none_str: str = NONE_STR_SENTINEL
expand_inplace: bool = False
inplace_sep: Optional[str] = INPLACE_DEFAULT
filename: bool = False
dirname: bool = False
strip: bool = False
export_dir: Optional[str] = None
dest_path: Optional[str] = None
filepath: Optional[str] = None
quote: bool = False
exiftool: Optional[ExifTool] = None
class PhotoTemplateParser:
"""Parser for PhotoTemplate"""
# implemented as Singleton
def __new__(cls, *args, **kwargs):
"""create new object or return instance of already created singleton"""
if not hasattr(cls, "instance") or not cls.instance:
cls.instance = super().__new__(cls)
return cls.instance
def __init__(self):
"""return existing singleton or create a new one"""
if hasattr(self, "metamodel"):
return
self.metamodel = metamodel_from_file(OTL_GRAMMAR_MODEL, skipws=False)
def parse(self, template_statement):
"""Parse a template_statement string"""
return self.metamodel.model_from_str(template_statement)
def fields(self, template_statement):
"""Return list of fields found in a template statement; does not verify that fields are valid"""
model = self.parse(template_statement)
return [ts.template.field for ts in model.template_strings if ts.template]
class PhotoTemplate:
"""PhotoTemplate class to render a template string from a PhotoInfo object"""
def __init__(self, photopath: str, exiftool_path: Optional[str] = None):
"""Inits PhotoTemplate class with photo
Args:
photo: a PhotoInfo instance.
exiftool_path: optional path to exiftool for use with {exiftool:} template; if not provided, will look for exiftool in $PATH
"""
if not pathlib.Path(photopath).exists():
raise FileNotFoundError(f"Photo path {photopath} does not exist")
if exiftool_path and not pathlib.Path(exiftool_path).exists():
raise FileNotFoundError(f"Exiftool path {exiftool_path} does not exist")
self.photopath = photopath
self.exiftool_path = exiftool_path
# holds value of current date/time for {today.x} fields
# gets initialized in get_template_value
self.today = None
# get parser singleton
self.parser = PhotoTemplateParser()
# initialize render options
# this will be done in render() but for testing, some of the lookup functions are called directly
options = RenderOptions()
self.options = options
self.tag = options.tag
self.group, self.tagname = "", ""
self.inplace_sep = options.inplace_sep
self.none_str = options.none_str
self.expand_inplace = options.expand_inplace
self.filename = options.filename
self.dirname = options.dirname
self.strip = options.strip
self.export_dir = options.export_dir
self.filepath = options.filepath
self.quote = options.quote
self.dest_path = options.dest_path
self.exiftool = options.exiftool or ExifToolCaching(
self.photopath, exiftool=self.exiftool_path
)
def render(
self,
template: str,
options: RenderOptions,
):
"""Render a filename or directory template
Args:
template: str template
options: a RenderOptions instance
Returns:
([rendered_strings], [unmatched]): tuple of list of rendered strings and list of unmatched template values
"""
if type(template) is not str:
raise TypeError(f"template must be type str, not {type(template)}")
self.options = options
self.tag = options.tag
self.group, self.tagname = split_group_tag(self.tag) if self.tag else ("", "")
self.inplace_sep = options.inplace_sep
self.none_str = options.none_str
self.expand_inplace = options.expand_inplace
self.filename = options.filename
self.dirname = options.dirname
self.strip = options.strip
self.export_dir = options.export_dir
self.dest_path = options.dest_path
self.filepath = options.filepath
self.quote = options.quote
self.dest_path = options.dest_path
self.exiftool = options.exiftool or self.exiftool
try:
model = self.parser.parse(template)
except TextXSyntaxError as e:
raise ValueError(f"SyntaxError: {e}")
if not model:
# empty string
return [], []
rendered, unmatched = self._render_statement(model)
rendered = [r for r in rendered if NONE_STR_SENTINEL not in r]
return rendered, unmatched
def _render_statement(
self,
statement,
):
results = []
unmatched = []
for ts in statement.template_strings:
results, unmatched = self._render_template_string(
ts,
results=results,
unmatched=unmatched,
)
rendered_strings = results
if self.filename:
rendered_strings = [
sanitize_filename(rendered_str) for rendered_str in rendered_strings
]
if self.strip:
rendered_strings = [
rendered_str.strip() for rendered_str in rendered_strings
]
return rendered_strings, unmatched
def _render_template_string(
self,
ts,
results=None,
unmatched=None,
):
"""Render a TemplateString object"""
results = results or [""]
unmatched = unmatched or []
if ts.template:
# have a template field to process
field = ts.template.field
field_part = field.split(".")[0]
# if field not in FIELD_NAMES and field_part not in FIELD_NAMES:
# unmatched.append(field)
# return [], unmatched
subfield = ts.template.subfield
# process filters
filters = []
if ts.template.filter is not None:
filters = ts.template.filter.value
# # process path_sep
# if ts.template.pathsep is not None:
# path_sep = ts.template.pathsep.value
# process delim
if ts.template.delim is not None:
# if value is None, means format was {+field}
delim = ts.template.delim.value or ""
else:
delim = None
if ts.template.bool is not None:
is_bool = True
if ts.template.bool.value is not None:
bool_val, u = self._render_statement(
ts.template.bool.value,
)
unmatched.extend(u)
else:
# blank bool value
bool_val = [""]
else:
is_bool = False
bool_val = None
# process default
if ts.template.default is not None:
# default is also a TemplateString
if ts.template.default.value is not None:
default, u = self._render_statement(
ts.template.default.value,
)
unmatched.extend(u)
else:
# blank default value
default = [""]
else:
default = []
# process conditional
if ts.template.conditional is not None:
operator = ts.template.conditional.operator
negation = ts.template.conditional.negation
if ts.template.conditional.value is not None:
# conditional value is also a TemplateString
conditional_value, u = self._render_statement(
ts.template.conditional.value,
# path_sep=path_sep,
)
unmatched.extend(u)
else:
# this shouldn't happen
conditional_value = [""]
else:
operator = None
negation = None
conditional_value = []
vals = []
if (
field in SINGLE_VALUE_SUBSTITUTIONS
or field.split(".")[0] in SINGLE_VALUE_SUBSTITUTIONS
):
vals = self.get_template_value(
field,
default=default,
subfield=subfield,
# delim=delim or self.inplace_sep,
# path_sep=path_sep,
)
# elif field == "function":
# if subfield is None:
# raise ValueError(
# "SyntaxError: filename and function must not be null with {function::filename.py:function_name}"
# )
# vals = self.get_template_value_function(
# subfield,
# )
elif field in MULTI_VALUE_SUBSTITUTIONS:
vals = self.get_template_value_multi(field, subfield, default=default)
elif field.split(".")[0] in PATHLIB_SUBSTITUTIONS:
vals = self.get_template_value_pathlib(field)
else:
# assume it's an exif field in form "tag" or "group:tag"
exiftag = f"{field}:{subfield}" if subfield else f"{field}"
vals = self.get_template_value_exiftool(tag=exiftag)
vals = [val for val in vals if val is not None]
if self.expand_inplace or delim is not None:
sep = delim if delim is not None else self.inplace_sep
vals = [sep.join(sorted(vals))] if vals else []
for filter_ in filters:
vals = self.get_template_value_filter(filter_, vals)
# process find/replace
if ts.template.findreplace:
new_vals = []
for val in vals:
for pair in ts.template.findreplace.pairs:
find = pair.find or ""
repl = pair.replace or ""
val = val.replace(find, repl)
new_vals.append(val)
vals = new_vals
if operator:
# have a conditional operator
def string_test(test_function):
"""Perform string comparison using test_function; closure to capture conditional_value, vals, negation"""
match = False
for c in conditional_value:
for v in vals:
if test_function(v, c):
match = True
break
if match:
break
if (match and not negation) or (negation and not match):
return ["True"]
else:
return []
def comparison_test(test_function):
"""Perform numerical comparisons using test_function; closure to capture conditional_val, vals, negation"""
if len(vals) != 1 or len(conditional_value) != 1:
raise ValueError(
f"comparison operators may only be used with a single value: {vals} {conditional_value}"
)
try:
match = bool(
test_function(float(vals[0]), float(conditional_value[0]))
)
if (match and not negation) or (negation and not match):
return ["True"]
else:
return []
except ValueError as e:
raise ValueError(
f"comparison operators may only be used with values that can be converted to numbers: {vals} {conditional_value}"
| |
# =============================================================================
#
# Main file for CellProfiler-Analyst
#
# Run python setup.py py2app to build a dmg
#
# =============================================================================
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import with_statement
import sys
import sys
import os
import os.path
import logging
import re
import javabridge
import bioformats
from cpa import __version__
import cpa.helpmenu
from cpa.properties import Properties
from cpa.dbconnect import DBConnect
class FuncLog(logging.Handler):
'''A logging handler that sends logs to an update function.
'''
def __init__(self, update):
logging.Handler.__init__(self)
self.update = update
def emit(self, record):
self.update(self.format(record))
def setup_frozen_logging():
# py2exe has a version of this in boot_common.py, but it causes an
# error window to appear if any messages are actually written.
class Stderr(object):
softspace = 0 # python uses this for printing
_file = None
_error = None
def write(self, text, fname=sys.executable + '.log'):
if self._file is None and self._error is None:
try:
self._file = open(fname, 'w')
except Exception as details:
self._error = details
if self._file is not None:
self._file.write(text)
self._file.flush()
def flush(self):
if self._file is not None:
self._file.flush()
# send everything to logfile
sys.stderr = Stderr()
sys.stdout = sys.stderr
if hasattr(sys, 'frozen') and sys.platform.startswith('win'):
# on windows, log to a file (Mac goes to console)
setup_frozen_logging()
logging.basicConfig(level=logging.DEBUG)
# Handles args to MacOS "Apps"
if len(sys.argv) > 1 and sys.argv[1].startswith('-psn'):
del sys.argv[1]
if len(sys.argv) > 1:
# Load a properties file if passed in args
p = Properties.getInstance()
if sys.argv[1] == '--incell':
# GE Incell xml wrapper
# LOOP
p.LoadIncellFiles(sys.argv[2], sys.argv[3], sys.argv[4:])
else:
p.LoadFile(sys.argv[1])
import threading
from cpa.classifier import Classifier
from cpa.tableviewer import TableViewer
from cpa.plateviewer import PlateViewer
from cpa.imageviewer import ImageViewer
from cpa.imagegallery import ImageGallery
from cpa.boxplot import BoxPlot
from cpa.scatter import Scatter
from cpa.histogram import Histogram
from cpa.density import Density
from cpa.querymaker import QueryMaker
from cpa.normalizationtool import NormalizationUI
import cpa.icons
import cpa.cpaprefs
from cpa.cpatool import CPATool
import inspect
from cpa.icons import get_cpa_icon
import cpa.multiclasssql
# ---
import wx
ID_CLASSIFIER = wx.NewId()
ID_IMAGE_GALLERY = wx.NewId()
ID_PLATE_VIEWER = wx.NewId()
ID_TABLE_VIEWER = wx.NewId()
ID_IMAGE_VIEWER = wx.NewId()
ID_SCATTER = wx.NewId()
ID_HISTOGRAM = wx.NewId()
ID_DENSITY = wx.NewId()
ID_BOXPLOT = wx.NewId()
ID_NORMALIZE = wx.NewId()
def get_cpatool_subclasses():
'''returns a list of CPATool subclasses.
'''
class_objs = inspect.getmembers(sys.modules[__name__], inspect.isclass)
return [klass for name, klass in class_objs
if issubclass(klass, CPATool) and klass!=CPATool]
class MainGUI(wx.Frame):
'''Main GUI frame for CellProfiler Analyst
'''
def __init__(self, properties, parent, id=-1, **kwargs):
#wx.Frame.__init__(self, parent, id=id, title='CellProfiler Analyst 2.1.0 (r%s)'%(__version__), **kwargs)
wx.Frame.__init__(self, parent, id=id, title='CellProfiler Analyst %s'%(__version__), **kwargs)
self.properties = properties
self.SetIcon(get_cpa_icon())
self.tbicon = None
self.SetName('CPA')
self.Center(wx.HORIZONTAL)
self.CreateStatusBar()
#
# Setup toolbar
#
tb = self.CreateToolBar(wx.TB_HORZ_TEXT|wx.TB_FLAT)
tb.SetToolBitmapSize((32,32))
tb.SetSize((-1,132))
tb.AddLabelTool(ID_IMAGE_GALLERY, 'Image Gallery', cpa.icons.image_gallery.ConvertToBitmap(), shortHelp='Image Gallery', longHelp='Launch Image Gallery Viewer')
tb.AddLabelTool(ID_CLASSIFIER, 'Classifier', cpa.icons.classifier.ConvertToBitmap(), shortHelp='Classifier', longHelp='Launch Classifier')
# tb.AddLabelTool(ID_CLASSIFIER, 'PixelClassifier', cpa.icons.pixelclassifier.ConvertToBitmap(), shortHelp='Pixel-based Classifier', longHelp='Launch pixel-based Classifier')
tb.AddLabelTool(ID_PLATE_VIEWER, 'Plate Viewer', cpa.icons.platemapbrowser.ConvertToBitmap(), shortHelp='Plate Viewer', longHelp='Launch Plate Viewer')
# tb.AddLabelTool(ID_IMAGE_VIEWER, 'ImageViewer', cpa.icons.image_viewer.ConvertToBitmap(), shortHelp='Image Viewer', longHelp='Launch ImageViewer')
tb.AddLabelTool(ID_SCATTER, 'Scatter Plot', cpa.icons.scatter.ConvertToBitmap(), shortHelp='Scatter Plot', longHelp='Launch Scatter Plot')
tb.AddLabelTool(ID_HISTOGRAM, 'Histogram', cpa.icons.histogram.ConvertToBitmap(), shortHelp='Histogram', longHelp='Launch Histogram')
tb.AddLabelTool(ID_DENSITY, 'Density Plot', cpa.icons.density.ConvertToBitmap(), shortHelp='Density Plot', longHelp='Launch Density Plot')
tb.AddLabelTool(ID_BOXPLOT, 'Box Plot', cpa.icons.boxplot.ConvertToBitmap(), shortHelp='Box Plot', longHelp='Launch Box Plot')
tb.AddLabelTool(ID_TABLE_VIEWER, 'Table Viewer', cpa.icons.data_grid.ConvertToBitmap(), shortHelp='Table Viewer', longHelp='Launch Table Viewer')
# tb.AddLabelTool(ID_NORMALIZE, 'Normalize', cpa.icons.normalize.ConvertToBitmap(), shortHelp='Normalization Tool', longHelp='Launch Feature Normalization Tool')
tb.Realize()
# TODO: IMG-1071 - The following was meant to resize based on the toolbar size but GetEffectiveMinSize breaks on Macs
# Not the Case anymore with wx.Python 3
# self.SetDimensions(-1, -1, tb.GetEffectiveMinSize().width, -1, wx.SIZE_USE_EXISTING)
#
# Setup menu items
#
self.SetMenuBar(wx.MenuBar())
fileMenu = wx.Menu()
savePropertiesMenuItem = fileMenu.Append(-1, 'Save properties\tCtrl+S', help='Save the properties.')
## loadWorkspaceMenuItem = fileMenu.Append(-1, 'Load properties\tCtrl+O', help='Open another properties file.')
fileMenu.AppendSeparator()
saveWorkspaceMenuItem = fileMenu.Append(-1, 'Save workspace\tCtrl+Shift+S', help='Save the currently open plots and settings.')
loadWorkspaceMenuItem = fileMenu.Append(-1, 'Load workspace\tCtrl+Shift+O', help='Open plots saved in a previous workspace.')
fileMenu.AppendSeparator()
saveLogMenuItem = fileMenu.Append(-1, 'Save log', help='Save the contents of the log window.')
fileMenu.AppendSeparator()
self.exitMenuItem = fileMenu.Append(wx.ID_EXIT, 'Exit\tCtrl+Q', help='Exit classifier')
self.GetMenuBar().Append(fileMenu, 'File')
toolsMenu = wx.Menu()
imageGalleryMenuItem = toolsMenu.Append(ID_IMAGE_GALLERY, 'Image Gallery Viewer\tCtrl+Shift+I', help='Launches the Image Gallery')
classifierMenuItem = toolsMenu.Append(ID_CLASSIFIER, 'Classifier\tCtrl+Shift+C', help='Launches Classifier.')
plateMapMenuItem = toolsMenu.Append(ID_PLATE_VIEWER, 'Plate Viewer\tCtrl+Shift+P', help='Launches the Plate Viewer tool.')
#imageViewerMenuItem = toolsMenu.Append(ID_IMAGE_VIEWER, 'Image Viewer\tCtrl+Shift+I', help='Launches the ImageViewer tool.')
scatterMenuItem = toolsMenu.Append(ID_SCATTER, 'Scatter Plot\tCtrl+Shift+A', help='Launches the Scatter Plot tool.')
histogramMenuItem = toolsMenu.Append(ID_HISTOGRAM, 'Histogram Plot\tCtrl+Shift+H', help='Launches the Histogram Plot tool.')
densityMenuItem = toolsMenu.Append(ID_DENSITY, 'Density Plot\tCtrl+Shift+D', help='Launches the Density Plot tool.')
boxplotMenuItem = toolsMenu.Append(ID_BOXPLOT, 'Box Plot\tCtrl+Shift+B', help='Launches the Box Plot tool.')
dataTableMenuItem = toolsMenu.Append(ID_TABLE_VIEWER, 'Table Viewer\tCtrl+Shift+T', help='Launches the Table Viewer tool.')
normalizeMenuItem = toolsMenu.Append(ID_NORMALIZE, 'Normalization Tool\tCtrl+Shift+T', help='Launches a tool for generating normalized values for measurement columns in your tables.')
self.GetMenuBar().Append(toolsMenu, 'Tools')
logMenu = wx.Menu()
debugMenuItem = logMenu.AppendRadioItem(-1, 'Debug\tCtrl+1', help='Logging window will display debug-level messages.')
infoMenuItem = logMenu.AppendRadioItem(-1, 'Info\tCtrl+2', help='Logging window will display info-level messages.')
warnMenuItem = logMenu.AppendRadioItem(-1, 'Warnings\tCtrl+3', help='Logging window will display warning-level messages.')
errorMenuItem = logMenu.AppendRadioItem(-1, 'Errors\tCtrl+4', help='Logging window will display error-level messages.')
criticalMenuItem = logMenu.AppendRadioItem(-1, 'Critical\tCtrl+5', help='Logging window will only display critical messages.')
infoMenuItem.Check()
self.GetMenuBar().Append(logMenu, 'Logging')
advancedMenu = wx.Menu()
#normalizeMenuItem = advancedMenu.Append(-1, 'Launch feature normalization tool', help='Launches a tool for generating normalized values for measurement columns in your tables.')
queryMenuItem = advancedMenu.Append(-1, 'Launch SQL query tool', help='Opens a tool for making SQL queries to the CPA database. Advanced users only.')
clearTableLinksMenuItem = advancedMenu.Append(-1, 'Clear table linking information', help='Removes the tables from your database that tell CPA how to link your tables.')
self.GetMenuBar().Append(advancedMenu, 'Advanced')
self.GetMenuBar().Append(cpa.helpmenu.make_help_menu(self), 'Help')
# console and logging
self.console = wx.TextCtrl(self, -1, '', style=wx.TE_MULTILINE|wx.TE_READONLY|wx.TE_RICH2)
self.console.SetFont(wx.Font(10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
# Black background and white font
self.console.SetDefaultStyle(wx.TextAttr(wx.WHITE,wx.BLACK))
self.console.SetBackgroundColour('#000000')
log_level = logging.INFO # INFO is the default log level
self.logr = logging.getLogger()
self.set_log_level(log_level)
self.log_text = ''
def update(x):
self.log_text += x+'\n'
hdlr = FuncLog(update)
# hdlr.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
# hdlr.setFormatter(logging.Formatter('%(levelname)s | %(name)s | %(message)s [@ %(asctime)s in %(filename)s:%(lineno)d]'))
self.logr.addHandler(hdlr)
# log_levels are 10,20,30,40,50
logMenu.GetMenuItems()[(log_level/10)-1].Check()
self.Bind(wx.EVT_MENU, lambda _:self.set_log_level(logging.DEBUG), debugMenuItem)
self.Bind(wx.EVT_MENU, lambda _:self.set_log_level(logging.INFO), infoMenuItem)
self.Bind(wx.EVT_MENU, lambda _:self.set_log_level(logging.WARN), warnMenuItem)
self.Bind(wx.EVT_MENU, lambda _:self.set_log_level(logging.ERROR), errorMenuItem)
self.Bind(wx.EVT_MENU, lambda _:self.set_log_level(logging.CRITICAL), criticalMenuItem)
self.Bind(wx.EVT_MENU, self.on_save_properties, savePropertiesMenuItem)
self.Bind(wx.EVT_MENU, self.on_save_workspace, saveWorkspaceMenuItem)
self.Bind(wx.EVT_MENU, self.on_load_workspace, loadWorkspaceMenuItem)
self.Bind(wx.EVT_MENU, self.save_log, saveLogMenuItem)
self.Bind(wx.EVT_MENU, self.launch_normalization_tool, normalizeMenuItem)
self.Bind(wx.EVT_MENU, self.clear_link_tables, clearTableLinksMenuItem)
self.Bind(wx.EVT_MENU, self.launch_query_maker, queryMenuItem)
self.Bind(wx.EVT_TOOL, self.launch_classifier, id=ID_CLASSIFIER)
self.Bind(wx.EVT_TOOL, self.launch_plate_map_browser, id=ID_PLATE_VIEWER)
self.Bind(wx.EVT_TOOL, self.launch_table_viewer, id=ID_TABLE_VIEWER)
self.Bind(wx.EVT_TOOL, self.launch_image_viewer, id=ID_IMAGE_VIEWER)
self.Bind(wx.EVT_TOOL, self.launch_image_gallery, id=ID_IMAGE_GALLERY)
self.Bind(wx.EVT_TOOL, self.launch_scatter_plot, id=ID_SCATTER)
self.Bind(wx.EVT_TOOL, self.launch_histogram_plot, id=ID_HISTOGRAM)
self.Bind(wx.EVT_TOOL, self.launch_density_plot, id=ID_DENSITY)
self.Bind(wx.EVT_TOOL, self.launch_box_plot, id=ID_BOXPLOT)
self.Bind(wx.EVT_TOOL, self.launch_normalization_tool, id=ID_NORMALIZE)
self.Bind(wx.EVT_MENU, self.on_close, self.exitMenuItem)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.Bind(wx.EVT_IDLE, self.on_idle)
def launch_classifier(self, evt=None):
# Brave new world! Allowing multiple classifiers
# classifier = wx.FindWindowById(ID_CLASSIFIER) or wx.FindWindowByName('Classifier')
# if classifier:
# classifier.Show()
# classifier.SetFocus()
# logging.warn('You may only run one instance of Classifier at a time.')
# return
classifier = Classifier(parent=self, properties=self.properties)
classifier.Show(True)
def launch_plate_map_browser(self, evt=None):
self.pv = PlateViewer(parent=self)
self.pv.Show(True)
def launch_table_viewer(self, evt=None):
table = TableViewer(parent=self)
table.new_blank_table(100,10)
table.Show(True)
def launch_scatter_plot(self, evt=None):
scatter = Scatter(parent=self)
scatter.Show(True)
def launch_histogram_plot(self, evt=None):
hist = Histogram(parent=self)
hist.Show(True)
def launch_density_plot(self, evt=None):
density = Density(parent=self)
density.Show(True)
def launch_image_viewer(self, evt=None):
imviewer = ImageViewer(parent=self)
imviewer.Show(True)
def launch_image_gallery(self, evt=None):
colViewer = ImageGallery(parent=self, properties=self.properties)
colViewer.Show(True)
def launch_box_plot(self, evt=None):
boxplot = BoxPlot(parent=self)
boxplot.Show(True)
def launch_query_maker(self, evt=None):
querymaker = QueryMaker(parent=self)
querymaker.Show(True)
def launch_normalization_tool(self, evt=None):
normtool = NormalizationUI(parent=self)
normtool.Show(True)
def on_save_properties(self, evt):
p = Properties.getInstance()
dirname, filename = os.path.split(p._filename)
ext = os.path.splitext(p._filename)[-1]
dlg = wx.FileDialog(self, message="Save properties as...", defaultDir=dirname,
defaultFile=filename, wildcard=ext,
style=wx.SAVE|wx.FD_OVERWRITE_PROMPT|wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
p.save_file(dlg.GetPath())
def on_save_workspace(self, evt):
p = Properties.getInstance()
dlg = wx.FileDialog(self, message="Save workspace as...", defaultDir=os.getcwd(),
defaultFile='%s_%s.workspace'%(os.path.splitext(os.path.split(p._filename)[1])[0], p.image_table),
style=wx.SAVE|wx.FD_OVERWRITE_PROMPT|wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
wx.GetApp().save_workspace(dlg.GetPath())
def on_load_workspace(self, evt):
dlg = wx.FileDialog(self, "Select the file containing your CPAnalyst workspace...", wildcard="Workspace file (*.workspace)|*.workspace",
defaultDir=os.getcwd(), style=wx.OPEN|wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
wx.GetApp().load_workspace(dlg.GetPath())
def save_log(self, evt=None):
dlg = wx.FileDialog(self, message="Save log as...", defaultDir=os.getcwd(),
defaultFile='CPA_log.txt', wildcard='txt',
style=wx.SAVE|wx.FD_OVERWRITE_PROMPT|wx.FD_CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
f = open(filename, 'w')
f.write(self.console.Value)
logging.info('Log saved to "%s"'%filename)
def set_log_level(self, level):
self.logr.setLevel(level)
# cheat the logger so these always get displayed
self.console.AppendText('Logging level: %s\n'%(logging.getLevelName(level)))
def clear_link_tables(self, evt=None):
p = Properties.getInstance()
dlg = wx.MessageDialog(self, 'This will delete the tables '
'"%s" and "%s" from your database. '
'CPA will automatically recreate these tables as it '
'discovers how your database is linked. Are you sure you '
'want to proceed?'
%(p.link_tables_table, p.link_columns_table),
'Clear table linking information?',
wx.YES_NO|wx.NO_DEFAULT|wx.ICON_QUESTION)
response = dlg.ShowModal()
if response != wx.ID_YES:
return
db = DBConnect.getInstance()
db.execute('DROP TABLE IF EXISTS %s'%(p.link_tables_table))
db.execute('DROP TABLE IF EXISTS %s'%(p.link_columns_table))
db.Commit()
def on_close(self, evt=None):
# Classifier needs to be told to close so it can clean up it's threads
classifier = wx.FindWindowById(ID_CLASSIFIER) or wx.FindWindowByName('Classifier')
if classifier and classifier.Close() == False:
return
if any(wx.GetApp().get_plots()):
dlg = wx.MessageDialog(self, | |
for lines in pipfunctionlines:
pipelinesfile += ' ' + lines + '\n'
with open(scraperpipelinefile, 'w') as f:
f.write(pipelinesfile)
# creating a settings.py file for scraper
with open(basepath + '/scrapy_templates/settings.py.tmpl', 'r') as f:
settingspy = Template(f.read()).substitute(project_name=projectnameonfile)
settingspy += '\n' + project.settings_scraper
settingspy += '\nSCHEDULER = "%s"' % (projectnameonfile + settings.SCHEDULER)
settingspy += '\nSCHEDULER_PERSIST = %s' % settings.SCHEDULER_PERSIST
settingspy += '\nRABBITMQ_HOST = "%s"' % settings.RABBITMQ_HOST
settingspy += '\nRABBITMQ_PORT = %s' % settings.RABBITMQ_PORT
settingspy += '\nRABBITMQ_USERNAME = "%s"' % settings.RABBITMQ_USERNAME
settingspy += '\nRABBITMQ_PASSWORD = "%s"' % settings.RABBITMQ_PASSWORD
settingspy += '\nMONGODB_URI = "%s"' % settings.MONGODB_URI
settingspy += '\nMONGODB_SHARDED = %s' % settings.MONGODB_SHARDED
settingspy += '\nMONGODB_BUFFER_DATA = %s' % settings.MONGODB_BUFFER_DATA
settingspy += '\nMONGODB_USER = "%s"' % settings.MONGODB_USER
settingspy += '\nMONGODB_PASSWORD = "%s"' % settings.MONGODB_PASSWORD
settingspy += '\nITEM_PIPELINES = { "%s.mongodb.scrapy_mongodb.MongoDBPipeline": 999, \n' % projectnameonfile
for key in pipelinedict:
settingspy += '"%s.pipelines.%s": %s, \n' % (projectnameonfile, key, pipelinedict[key])
settingspy += '}'
with open(scrapersettingsfile, 'w') as f:
f.write(settingspy)
#putting setup.py files in appropriate folders
with open(basepath + '/scrapy_templates/setup.py', 'r') as f:
setuppy = Template(f.read()).substitute(projectname=projectnameonfile)
with open(linkgenouterfolder + '/setup.py', 'w') as f:
f.write(setuppy)
with open(scraperouterfolder + '/setup.py', 'w') as f:
f.write(setuppy)
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
with cd(linkgenouterfolder):
os.system("python setup.py bdist_egg")
with cd(scraperouterfolder):
os.system("python setup.py bdist_egg")
linkgeneggfile = glob.glob(linkgenouterfolder + "/dist/*.egg")
scrapereggfile = glob.glob(scraperouterfolder + "/dist/*.egg")
linkgenlastdeploy = LinkgenDeploy.objects.filter(project=project).order_by('-version')[:1]
if linkgenlastdeploy:
linkgenlastdeploy = linkgenlastdeploy[0].version
else:
linkgenlastdeploy = 0
scraperslastdeploy = ScrapersDeploy.objects.filter(project=project).order_by('-version')[:1]
if scraperslastdeploy:
scraperslastdeploy = scraperslastdeploy[0].version
else:
scraperslastdeploy = 0
try:
with open(linkgeneggfile[0], 'rb') as f:
files = {'egg': f}
payload = {'project': '%s' % (projectnameonfile), 'version': (linkgenlastdeploy + 1)}
r = requests.post('%s/addversion.json' % settings.LINK_GENERATOR, data=payload, files=files, timeout=(3, None))
result = r.json()
deploylinkgen = LinkgenDeploy()
deploylinkgen.project = project
deploylinkgen.version = linkgenlastdeploy + 1
if result["status"] != "ok":
deploylinkgen.success = False
else:
deploylinkgen.success = True
deploylinkgen.save()
except:
deploylinkgen = LinkgenDeploy()
deploylinkgen.project = project
deploylinkgen.version = linkgenlastdeploy + 1
deploylinkgen.success = False
deploylinkgen.save()
with open(scrapereggfile[0], 'rb') as f:
eggfile = f.read()
files = {'egg' : eggfile}
payload = {'project': '%s' % (projectnameonfile), 'version': (scraperslastdeploy + 1)}
deployscraper = ScrapersDeploy()
deployscraper.project = project
deployscraper.version = scraperslastdeploy + 1
deployedscraperslist = []
scrapercounter = 1
for onescraper in settings.SCRAPERS:
try:
r = requests.post('%s/addversion.json' % onescraper, data=payload, files=files, timeout=(3, None))
result = r.json()
if result['status'] == 'ok':
deployedscraperslist.append("worker%s" %scrapercounter)
except:
pass
scrapercounter += 1
deployscraper.success = json.dumps(deployedscraperslist)
deployscraper.save()
return HttpResponseRedirect(reverse('deploystatus', args=(projectname,)))
@login_required
def deployment_status(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
workers = []
counter = 1
workers.append({'name': 'linkgenerator', 'status': 'Loading...', 'version': 'Loading...'})
for worker in settings.SCRAPERS:
workers.append({'name': 'worker%s' % counter, 'status': 'Loading...', 'version': 'Loading...'})
counter += 1
return render(request, "deployment_status.html", {'project': projectname, 'username': request.user.username, 'workers': workers})
@login_required
def get_project_status_from_all_workers(request, projectname):
uniqueprojectname = request.user.username + '_' + projectname
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
counter = 1
if request.method == 'POST':
allworkers = []
workerstatus = {}
workerstatus['name'] = 'linkgenerator'
try:
r = requests.get('%s/listprojects.json' % settings.LINK_GENERATOR,timeout=(3, None))
result = r.json()
if uniqueprojectname in result['projects']:
workerstatus['status'] = 'ready'
try:
q = requests.get('%s/listversions.json' % settings.LINK_GENERATOR, params={'project': uniqueprojectname},timeout=(3, None))
qresult = q.json()
version = qresult['versions'][-1]
workerstatus['version'] = version
except:
workerstatus['version'] = 'unknown'
try:
s = requests.get('%s/listjobs.json' % settings.LINK_GENERATOR, params={'project': uniqueprojectname}, timeout=(3, None))
sresult = s.json()
if sresult['finished']:
workerstatus['status'] = 'finished'
if sresult['pending']:
workerstatus['status'] = 'pending'
if sresult['running']:
workerstatus['status'] = 'running'
except:
workerstatus['status'] = 'unknown'
else:
workerstatus['status'] = 'not delpoyed'
workerstatus['version'] = 'unknown'
except:
workerstatus['status'] = 'unreachable'
workerstatus['version'] = 'unknown'
allworkers.append(workerstatus)
for worker in settings.SCRAPERS:
workerstatus = {}
workerstatus['name'] = 'worker%s' % counter
try:
r = requests.get('%s/listprojects.json' % worker, timeout=(3, None))
result = r.json()
if uniqueprojectname in result['projects']:
workerstatus['status'] = 'ready'
try:
q = requests.get('%s/listversions.json' % worker,
params={'project': uniqueprojectname}, timeout=(3, None))
qresult = q.json()
version = qresult['versions'][-1]
workerstatus['version'] = version
except:
workerstatus['version'] = 'unknown'
try:
s = requests.get('%s/listjobs.json' % worker,
params={'project': uniqueprojectname}, timeout=(3, None))
sresult = s.json()
if sresult['finished']:
workerstatus['status'] = 'finished'
if sresult['pending']:
workerstatus['status'] = 'pending'
if sresult['running']:
workerstatus['status'] = 'running'
except:
workerstatus['status'] = 'unknown'
else:
workerstatus['status'] = 'not delpoyed'
workerstatus['version'] = 'unknown'
except:
workerstatus['status'] = 'unreachable'
workerstatus['version'] = 'unknown'
allworkers.append(workerstatus)
counter += 1
return JsonResponse(allworkers, safe=False)
@login_required
def start_project(request, projectname, worker):
uniqueprojectname = request.user.username + '_' + projectname
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'POST':
if 'linkgenerator' in worker:
linkgenaddress = settings.LINK_GENERATOR
try:
r = requests.post('%s/schedule.json' % linkgenaddress, data={'project': uniqueprojectname, 'spider': uniqueprojectname}, timeout=(3, None))
except:
pass
elif 'worker' in worker:
workernumber = ''.join(x for x in worker if x.isdigit())
workernumber = int(workernumber)
workeraddress = settings.SCRAPERS[workernumber - 1]
try:
r = requests.post('%s/schedule.json' % workeraddress, data={'project': uniqueprojectname, 'spider': uniqueprojectname}, timeout=(3, None))
except:
pass
return HttpResponse('sent start signal')
@login_required
def stop_project(request, projectname, worker):
uniqueprojectname = request.user.username + '_' + projectname
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'POST':
if 'linkgenerator' in worker:
linkgenaddress = settings.LINK_GENERATOR
try:
r = requests.get('%s/listjobs.json' % linkgenaddress,
params={'project': uniqueprojectname}, timeout=(3, None))
result = r.json()
jobid = result['running'][0]['id']
s = requests.post('%s/cancel.json' % linkgenaddress, params={'project': uniqueprojectname, 'job': jobid}, timeout=(3, None))
except:
pass
elif 'worker' in worker:
workernumber = ''.join(x for x in worker if x.isdigit())
workernumber = int(workernumber)
workeraddress = settings.SCRAPERS[workernumber - 1]
try:
r = requests.get('%s/listjobs.json' % workeraddress,
params={'project': uniqueprojectname}, timeout=(3, None))
result = r.json()
jobid = result['running'][0]['id']
s = requests.post('%s/cancel.json' % workeraddress, params={'project': uniqueprojectname, 'job': jobid}, timeout=(3, None))
except:
pass
return HttpResponse('sent stop signal')
@login_required
def see_log_file(request, projectname, worker):
uniqueprojectname = request.user.username + '_' + projectname
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'GET':
if 'linkgenerator' in worker:
linkgenaddress = settings.LINK_GENERATOR
try:
r = requests.get('%s/listjobs.json' % linkgenaddress,
params={'project': uniqueprojectname}, timeout=(3, None))
result = r.json()
jobid = result['finished'][-1]['id']
log = requests.get('%s/logs/%s/%s/%s.log' % (linkgenaddress, uniqueprojectname, uniqueprojectname, jobid))
except:
return HttpResponse('could not retrieve the log file')
elif 'worker' in worker:
workernumber = ''.join(x for x in worker if x.isdigit())
workernumber = int(workernumber)
workeraddress = settings.SCRAPERS[workernumber - 1]
try:
r = requests.get('%s/listjobs.json' % workeraddress,
params={'project': uniqueprojectname}, timeout=(3, None))
result = r.json()
jobid = result['finished'][-1]['id']
log = requests.get('%s/logs/%s/%s/%s.log' % (workeraddress, uniqueprojectname, uniqueprojectname, jobid))
except:
return HttpResponse('could not retrieve the log file')
return HttpResponse(log.text, content_type='text/plain')
@login_required
def gather_status_for_all_projects(request):
projectsdict = {}
workers = []
for worker in settings.SCRAPERS:
workers.append(worker)
workers.append(settings.LINK_GENERATOR)
projects = Project.objects.filter(user=request.user)
for project in projects:
projectsdict[project.project_name] = []
project_items = Item.objects.filter(project=project)
for item in project_items:
projectsdict[project.project_name].append(item.item_name)
if request.method == 'POST':
if projectsdict:
allprojectdata = {}
for key in projectsdict:
workerstatus = {}
earliest_start_time = None
earliest_finish_time = None
latest_start_time = None
latest_finish_time = None
uniqueprojectname = request.user.username + '_' + key
for worker in workers:
try:
log = requests.get('%s/logs/%s/%s/stats.log' % (worker, uniqueprojectname, uniqueprojectname), timeout=(3, None))
if log.status_code == 200:
result = json.loads(log.text.replace("'", '"'))
if result.get('project_stopped', 0):
workerstatus['finished'] = workerstatus.get('finished', 0) + 1
else:
workerstatus['running'] = workerstatus.get('running', 0) + 1
if result.get('log_count/ERROR', 0):
workerstatus['errors'] = workerstatus.get('errors', 0) + result.get('log_count/ERROR', 0)
for item in projectsdict[key]:
if result.get(item, 0):
workerstatus['item-%s' % item] = workerstatus.get('item-%s' % item, 0) + result.get(item, 0)
if result.get('start_time', False):
start_time = dateutil.parser.parse(result['start_time'])
if earliest_start_time is None:
earliest_start_time = start_time
else:
if start_time < earliest_start_time:
earliest_start_time = start_time
if latest_start_time is None:
latest_start_time = start_time
else:
if start_time > latest_start_time:
latest_start_time = start_time
if result.get('finish_time', False):
finish_time = dateutil.parser.parse(result['finish_time'])
if earliest_finish_time is None:
earliest_finish_time = finish_time
else:
if finish_time < earliest_finish_time:
earliest_finish_time = finish_time
if latest_finish_time is None:
latest_finish_time = finish_time
else:
if finish_time > latest_finish_time:
latest_finish_time = finish_time
elif log.status_code == 404:
workerstatus['hasntlaunched'] = workerstatus.get('hasntlaunched', 0) + 1
else:
workerstatus['unknown'] = workerstatus.get('unknown', 0) + 1
except:
workerstatus['unknown'] = workerstatus.get('unknown', 0) + 1
if earliest_start_time is not None:
workerstatus['earliest_start_time'] = earliest_start_time.strftime("%B %d, %Y %H:%M:%S")
if earliest_finish_time is not None:
workerstatus['earliest_finish_time'] = earliest_finish_time.strftime("%B %d, %Y %H:%M:%S")
if latest_start_time is not None:
workerstatus['latest_start_time'] = latest_start_time.strftime("%B %d, %Y %H:%M:%S")
if latest_finish_time is not None:
workerstatus['latest_finish_time'] = latest_finish_time.strftime("%B %d, %Y %H:%M:%S")
allprojectdata[key] = workerstatus
return JsonResponse(allprojectdata, safe=True)
return HttpResponse('{}')
@login_required
def | |
+ m.x291) - m.x137 + m.x138 == 0)
m.c137 = Constraint(expr=-0.5*m.x460*(m.x291 + m.x292) - m.x138 + m.x139 == 0)
m.c138 = Constraint(expr=-0.5*m.x460*(m.x292 + m.x293) - m.x139 + m.x140 == 0)
m.c139 = Constraint(expr=-0.5*m.x460*(m.x293 + m.x294) - m.x140 + m.x141 == 0)
m.c140 = Constraint(expr=-0.5*m.x460*(m.x294 + m.x295) - m.x141 + m.x142 == 0)
m.c141 = Constraint(expr=-0.5*m.x460*(m.x295 + m.x296) - m.x142 + m.x143 == 0)
m.c142 = Constraint(expr=-0.5*m.x460*(m.x296 + m.x297) - m.x143 + m.x144 == 0)
m.c143 = Constraint(expr=-0.5*m.x460*(m.x297 + m.x298) - m.x144 + m.x145 == 0)
m.c144 = Constraint(expr=-0.5*m.x460*(m.x298 + m.x299) - m.x145 + m.x146 == 0)
m.c145 = Constraint(expr=-0.5*m.x460*(m.x299 + m.x300) - m.x146 + m.x147 == 0)
m.c146 = Constraint(expr=-0.5*m.x460*(m.x300 + m.x301) - m.x147 + m.x148 == 0)
m.c147 = Constraint(expr=-0.5*m.x460*(m.x301 + m.x302) - m.x148 + m.x149 == 0)
m.c148 = Constraint(expr=-0.5*m.x460*(m.x302 + m.x303) - m.x149 + m.x150 == 0)
m.c149 = Constraint(expr=-0.5*m.x460*(m.x303 + m.x304) - m.x150 + m.x151 == 0)
m.c150 = Constraint(expr=-0.5*m.x460*(m.x304 + m.x305) - m.x151 + m.x152 == 0)
m.c151 = Constraint(expr=-0.5*m.x460*(m.x305 + m.x306) - m.x152 + m.x153 == 0)
m.c152 = Constraint(expr=-0.1*m.x460*(m.x307 + m.x308) - m.x154 + m.x155 == 0)
m.c153 = Constraint(expr=-0.1*m.x460*(m.x308 + m.x309) - m.x155 + m.x156 == 0)
m.c154 = Constraint(expr=-0.1*m.x460*(m.x309 + m.x310) - m.x156 + m.x157 == 0)
m.c155 = Constraint(expr=-0.1*m.x460*(m.x310 + m.x311) - m.x157 + m.x158 == 0)
m.c156 = Constraint(expr=-0.1*m.x460*(m.x311 + m.x312) - m.x158 + m.x159 == 0)
m.c157 = Constraint(expr=-0.1*m.x460*(m.x312 + m.x313) - m.x159 + m.x160 == 0)
m.c158 = Constraint(expr=-0.1*m.x460*(m.x313 + m.x314) - m.x160 + m.x161 == 0)
m.c159 = Constraint(expr=-0.1*m.x460*(m.x314 + m.x315) - m.x161 + m.x162 == 0)
m.c160 = Constraint(expr=-0.1*m.x460*(m.x315 + m.x316) - m.x162 + m.x163 == 0)
m.c161 = Constraint(expr=-0.1*m.x460*(m.x316 + m.x317) - m.x163 + m.x164 == 0)
m.c162 = Constraint(expr=-0.1*m.x460*(m.x317 + m.x318) - m.x164 + m.x165 == 0)
m.c163 = Constraint(expr=-0.1*m.x460*(m.x318 + m.x319) - m.x165 + m.x166 == 0)
m.c164 = Constraint(expr=-0.1*m.x460*(m.x319 + m.x320) - m.x166 + m.x167 == 0)
m.c165 = Constraint(expr=-0.1*m.x460*(m.x320 + m.x321) - m.x167 + m.x168 == 0)
m.c166 = Constraint(expr=-0.1*m.x460*(m.x321 + m.x322) - m.x168 + m.x169 == 0)
m.c167 = Constraint(expr=-0.1*m.x460*(m.x322 + m.x323) - m.x169 + m.x170 == 0)
m.c168 = Constraint(expr=-0.1*m.x460*(m.x323 + m.x324) - m.x170 + m.x171 == 0)
m.c169 = Constraint(expr=-0.1*m.x460*(m.x324 + m.x325) - m.x171 + m.x172 == 0)
m.c170 = Constraint(expr=-0.1*m.x460*(m.x325 + m.x326) - m.x172 + m.x173 == 0)
m.c171 = Constraint(expr=-0.1*m.x460*(m.x326 + m.x327) - m.x173 + m.x174 == 0)
m.c172 = Constraint(expr=-0.1*m.x460*(m.x327 + m.x328) - m.x174 + m.x175 == 0)
m.c173 = Constraint(expr=-0.1*m.x460*(m.x328 + m.x329) - m.x175 + m.x176 == 0)
m.c174 = Constraint(expr=-0.1*m.x460*(m.x329 + m.x330) - m.x176 + m.x177 == 0)
m.c175 = Constraint(expr=-0.1*m.x460*(m.x330 + m.x331) - m.x177 + m.x178 == 0)
m.c176 = Constraint(expr=-0.1*m.x460*(m.x331 + m.x332) - m.x178 + m.x179 == 0)
m.c177 = Constraint(expr=-0.1*m.x460*(m.x332 + m.x333) - m.x179 + m.x180 == 0)
m.c178 = Constraint(expr=-0.1*m.x460*(m.x333 + m.x334) - m.x180 + m.x181 == 0)
m.c179 = Constraint(expr=-0.1*m.x460*(m.x334 + m.x335) - m.x181 + m.x182 == 0)
m.c180 = Constraint(expr=-0.1*m.x460*(m.x335 + m.x336) - m.x182 + m.x183 == 0)
m.c181 = Constraint(expr=-0.1*m.x460*(m.x336 + m.x337) - m.x183 + m.x184 == 0)
m.c182 = Constraint(expr=-0.1*m.x460*(m.x337 + m.x338) - m.x184 + m.x185 == 0)
m.c183 = Constraint(expr=-0.1*m.x460*(m.x338 + m.x339) - m.x185 + m.x186 == 0)
m.c184 = Constraint(expr=-0.1*m.x460*(m.x339 + m.x340) - m.x186 + m.x187 == 0)
m.c185 = Constraint(expr=-0.1*m.x460*(m.x340 + m.x341) - m.x187 + m.x188 == 0)
m.c186 = Constraint(expr=-0.1*m.x460*(m.x341 + m.x342) - m.x188 + m.x189 == 0)
m.c187 = Constraint(expr=-0.1*m.x460*(m.x342 + m.x343) - m.x189 + m.x190 == 0)
m.c188 = Constraint(expr=-0.1*m.x460*(m.x343 + m.x344) - m.x190 + m.x191 == 0)
m.c189 = Constraint(expr=-0.1*m.x460*(m.x344 + m.x345) - m.x191 + m.x192 == 0)
m.c190 = Constraint(expr=-0.1*m.x460*(m.x345 + m.x346) - m.x192 + m.x193 == 0)
m.c191 = Constraint(expr=-0.1*m.x460*(m.x346 + m.x347) - m.x193 + m.x194 == 0)
m.c192 = Constraint(expr=-0.1*m.x460*(m.x347 + m.x348) - m.x194 + m.x195 == 0)
m.c193 = Constraint(expr=-0.1*m.x460*(m.x348 + m.x349) - m.x195 + m.x196 == 0)
m.c194 = Constraint(expr=-0.1*m.x460*(m.x349 + m.x350) - m.x196 + m.x197 == 0)
m.c195 = Constraint(expr=-0.1*m.x460*(m.x350 + m.x351) - m.x197 + m.x198 == 0)
m.c196 = Constraint(expr=-0.1*m.x460*(m.x351 + m.x352) - m.x198 + m.x199 == 0)
m.c197 = Constraint(expr=-0.1*m.x460*(m.x352 + m.x353) - m.x199 + m.x200 == 0)
m.c198 = Constraint(expr=-0.1*m.x460*(m.x353 + m.x354) - m.x200 + m.x201 == 0)
m.c199 = Constraint(expr=-0.1*m.x460*(m.x354 + m.x355) - m.x201 + m.x202 == 0)
m.c200 = Constraint(expr=-0.1*m.x460*(m.x355 + m.x356) - m.x202 + m.x203 == 0)
m.c201 = Constraint(expr=-0.1*m.x460*(m.x356 + m.x357) - m.x203 + m.x204 == 0)
m.c202 = Constraint(expr=-0.5*(m.x359/m.x463 + m.x358/m.x462)*m.x460 - m.x205 + m.x206 == 0)
m.c203 = Constraint(expr=-0.5*(m.x360/m.x464 + m.x359/m.x463)*m.x460 - m.x206 + m.x207 == 0)
m.c204 = Constraint(expr=-0.5*(m.x361/m.x465 + m.x360/m.x464)*m.x460 - m.x207 + m.x208 == 0)
m.c205 = Constraint(expr=-0.5*(m.x362/m.x466 + m.x361/m.x465)*m.x460 - m.x208 + m.x209 == 0)
m.c206 = Constraint(expr=-0.5*(m.x363/m.x467 + m.x362/m.x466)*m.x460 - m.x209 + m.x210 == 0)
m.c207 = Constraint(expr=-0.5*(m.x364/m.x468 + m.x363/m.x467)*m.x460 - m.x210 + m.x211 == 0)
m.c208 = Constraint(expr=-0.5*(m.x365/m.x469 + m.x364/m.x468)*m.x460 - m.x211 + m.x212 == 0)
m.c209 = Constraint(expr=-0.5*(m.x366/m.x470 + m.x365/m.x469)*m.x460 - m.x212 + m.x213 == 0)
m.c210 = Constraint(expr=-0.5*(m.x367/m.x471 + m.x366/m.x470)*m.x460 - m.x213 + m.x214 == 0)
m.c211 = Constraint(expr=-0.5*(m.x368/m.x472 + m.x367/m.x471)*m.x460 - m.x214 + m.x215 == 0)
m.c212 = Constraint(expr=-0.5*(m.x369/m.x473 + m.x368/m.x472)*m.x460 - m.x215 + m.x216 == 0)
m.c213 = Constraint(expr=-0.5*(m.x370/m.x474 + m.x369/m.x473)*m.x460 - m.x216 + m.x217 == 0)
m.c214 = Constraint(expr=-0.5*(m.x371/m.x475 + m.x370/m.x474)*m.x460 - m.x217 + m.x218 == 0)
m.c215 = Constraint(expr=-0.5*(m.x372/m.x476 + m.x371/m.x475)*m.x460 - m.x218 + m.x219 == 0)
m.c216 = Constraint(expr=-0.5*(m.x373/m.x477 + m.x372/m.x476)*m.x460 - m.x219 + m.x220 == 0)
m.c217 = Constraint(expr=-0.5*(m.x374/m.x478 + m.x373/m.x477)*m.x460 - m.x220 + m.x221 == 0)
m.c218 = Constraint(expr=-0.5*(m.x375/m.x479 + m.x374/m.x478)*m.x460 - m.x221 + m.x222 == 0)
m.c219 = Constraint(expr=-0.5*(m.x376/m.x480 + m.x375/m.x479)*m.x460 - m.x222 + m.x223 == 0)
m.c220 = Constraint(expr=-0.5*(m.x377/m.x481 + m.x376/m.x480)*m.x460 - m.x223 + m.x224 == 0)
m.c221 = Constraint(expr=-0.5*(m.x378/m.x482 + m.x377/m.x481)*m.x460 - m.x224 + m.x225 == 0)
m.c222 = Constraint(expr=-0.5*(m.x379/m.x483 + m.x378/m.x482)*m.x460 - m.x225 + m.x226 == 0)
m.c223 = Constraint(expr=-0.5*(m.x380/m.x484 + m.x379/m.x483)*m.x460 - m.x226 + m.x227 == 0)
m.c224 = Constraint(expr=-0.5*(m.x381/m.x485 + m.x380/m.x484)*m.x460 - m.x227 + m.x228 == 0)
m.c225 = Constraint(expr=-0.5*(m.x382/m.x486 + m.x381/m.x485)*m.x460 - m.x228 + m.x229 == 0)
m.c226 = Constraint(expr=-0.5*(m.x383/m.x487 + m.x382/m.x486)*m.x460 - m.x229 + m.x230 == 0)
m.c227 = Constraint(expr=-0.5*(m.x384/m.x488 + m.x383/m.x487)*m.x460 - m.x230 + m.x231 == 0)
m.c228 = Constraint(expr=-0.5*(m.x385/m.x489 + m.x384/m.x488)*m.x460 - m.x231 + m.x232 == 0)
m.c229 = Constraint(expr=-0.5*(m.x386/m.x490 + m.x385/m.x489)*m.x460 - m.x232 + m.x233 == 0)
m.c230 = Constraint(expr=-0.5*(m.x387/m.x491 + m.x386/m.x490)*m.x460 - m.x233 + m.x234 == 0)
m.c231 = Constraint(expr=-0.5*(m.x388/m.x492 + m.x387/m.x491)*m.x460 - m.x234 + m.x235 == 0)
m.c232 = Constraint(expr=-0.5*(m.x389/m.x493 + m.x388/m.x492)*m.x460 - m.x235 + m.x236 == 0)
m.c233 = Constraint(expr=-0.5*(m.x390/m.x494 + m.x389/m.x493)*m.x460 - m.x236 + m.x237 == 0)
m.c234 = Constraint(expr=-0.5*(m.x391/m.x495 + m.x390/m.x494)*m.x460 - m.x237 + m.x238 == 0)
m.c235 = Constraint(expr=-0.5*(m.x392/m.x496 + m.x391/m.x495)*m.x460 - m.x238 + m.x239 == 0)
m.c236 = Constraint(expr=-0.5*(m.x393/m.x497 + m.x392/m.x496)*m.x460 - m.x239 + m.x240 == 0)
m.c237 = Constraint(expr=-0.5*(m.x394/m.x498 + m.x393/m.x497)*m.x460 - m.x240 + m.x241 == 0)
m.c238 = Constraint(expr=-0.5*(m.x395/m.x499 + m.x394/m.x498)*m.x460 - m.x241 + m.x242 == 0)
m.c239 = Constraint(expr=-0.5*(m.x396/m.x500 + m.x395/m.x499)*m.x460 - m.x242 + m.x243 == 0)
m.c240 = Constraint(expr=-0.5*(m.x397/m.x501 + m.x396/m.x500)*m.x460 - m.x243 + m.x244 == 0)
m.c241 = Constraint(expr=-0.5*(m.x398/m.x502 + m.x397/m.x501)*m.x460 - m.x244 + m.x245 == 0)
m.c242 = Constraint(expr=-0.5*(m.x399/m.x503 + m.x398/m.x502)*m.x460 - m.x245 + m.x246 == 0)
m.c243 = Constraint(expr=-0.5*(m.x400/m.x504 + m.x399/m.x503)*m.x460 - m.x246 + m.x247 == 0)
m.c244 = Constraint(expr=-0.5*(m.x401/m.x505 + m.x400/m.x504)*m.x460 - m.x247 + m.x248 == 0)
m.c245 = Constraint(expr=-0.5*(m.x402/m.x506 + m.x401/m.x505)*m.x460 - m.x248 + m.x249 == 0)
m.c246 = Constraint(expr=-0.5*(m.x403/m.x507 + m.x402/m.x506)*m.x460 - m.x249 + m.x250 == 0)
m.c247 = Constraint(expr=-0.5*(m.x404/m.x508 + m.x403/m.x507)*m.x460 - m.x250 + m.x251 == 0)
m.c248 = Constraint(expr=-0.5*(m.x405/m.x509 + m.x404/m.x508)*m.x460 - m.x251 + m.x252 == 0)
m.c249 = Constraint(expr=-0.5*(m.x406/m.x510 + m.x405/m.x509)*m.x460 - m.x252 + m.x253 == 0)
m.c250 = Constraint(expr=-0.5*(m.x407/m.x511 + m.x406/m.x510)*m.x460 - m.x253 + m.x254 == 0)
m.c251 = Constraint(expr=-0.5*(m.x408/m.x512 + m.x407/m.x511)*m.x460 - m.x254 + m.x255 == 0)
m.c252 = Constraint(expr=-0.5*(m.x410/m.x514 + m.x409/m.x513)*m.x460 - m.x256 + m.x257 == 0)
m.c253 = Constraint(expr=-0.5*(m.x411/m.x515 + m.x410/m.x514)*m.x460 - m.x257 + m.x258 == 0)
m.c254 = Constraint(expr=-0.5*(m.x412/m.x516 + m.x411/m.x515)*m.x460 - m.x258 + m.x259 == 0)
m.c255 = Constraint(expr=-0.5*(m.x413/m.x517 + m.x412/m.x516)*m.x460 - m.x259 + m.x260 == 0)
m.c256 = Constraint(expr=-0.5*(m.x414/m.x518 + m.x413/m.x517)*m.x460 - m.x260 + m.x261 == 0)
m.c257 = Constraint(expr=-0.5*(m.x415/m.x519 + m.x414/m.x518)*m.x460 - m.x261 + m.x262 == 0)
m.c258 = Constraint(expr=-0.5*(m.x416/m.x520 + m.x415/m.x519)*m.x460 - m.x262 + m.x263 == 0)
m.c259 = Constraint(expr=-0.5*(m.x417/m.x521 + m.x416/m.x520)*m.x460 - m.x263 + m.x264 == 0)
m.c260 = Constraint(expr=-0.5*(m.x418/m.x522 + m.x417/m.x521)*m.x460 - m.x264 + m.x265 == 0)
m.c261 = Constraint(expr=-0.5*(m.x419/m.x523 + m.x418/m.x522)*m.x460 - m.x265 + m.x266 == 0)
m.c262 = Constraint(expr=-0.5*(m.x420/m.x524 + m.x419/m.x523)*m.x460 - m.x266 + m.x267 == 0)
m.c263 = Constraint(expr=-0.5*(m.x421/m.x525 + m.x420/m.x524)*m.x460 - m.x267 + m.x268 == 0)
m.c264 = Constraint(expr=-0.5*(m.x422/m.x526 | |
+ col] = (df.S * df['exi_xgta_ημ_' + col]).shift(1, fill_value=0).cumsum() * self.bs
# END add_exa ==================================================================================================
self.last_update = np.datetime64('now')
# invalidate stored functions
self._linear_quantile_function = None
self.q_temp = None
self._cdf = None
def trim_df(self):
"""
trim out unwanted columns from density_df
epd used in graphics
:return:
"""
self.density_df = self.density_df.drop(
self.density_df.filter(regex='^e_|^exi_xlea|^[a-z_]+ημ').columns,
axis=1
)
def gradient(self, epsilon=1 / 128, kind='homog', method='forward', distortion=None, remove_fuzz=True,
extra_columns=None, do_swap=True):
"""
Compute the gradient of various quantities relative to a change in the volume of each
portfolio component.
Focus is on the quantities used in rate calculations: S, gS, p_total, exa, exag, exi_xgta, exi_xeqq,
exeqa, exgta etc.
homog:
inhomog:
:param epsilon: the increment to use; scale is 1+epsilon
:param kind: homog[ogeneous] or inhomog: homog computes impact of f((1+epsilon)X_i)-f(X_i). Inhomog
scales the frequency and recomputes. Note inhomog will have a slight scale issues with
E[Severity]
:param method: forward, central (using epsilon/2) or backwards
:param distortion: if included derivatives of statistics using the distortion, such as exag are also
computed
:param extra_columns: extra columns to compute dervs of. Note there is virtually no overhead of adding additional
columns
:param do_swap: force the step to replace line with line+epsilon in all not line2's line2!=line1; whether you need
this or not depends on what variables you to be differentiated. E.g. if you ask for exa_total only you don't need
to swap. But if you want exa_A, exa_B you do, otherwise the d/dA exa_B won't be correct. TODO: replace with code!
:return: DataFrame of gradients and audit_df in an Answer class
"""
if kind == 'inhomog' or kind[:7] == 'inhomog':
raise NotImplementedError(f'kind=={kind} not yet implemented')
if method == 'central':
raise NotImplementedError(f'method=={method} not yet implemented')
if method not in ('forward', 'backwards', 'central'):
raise ValueError('Inadmissible option passed to gradient.')
if self.tilt_amount:
raise ValueError('Gradients do not allow tilts')
# central = run this code forwards and backwards with epsilon / 2 and average?!
# Forwards or backwards
if method == 'forward':
delta = 1 + epsilon
dx = epsilon
pm = '+'
else:
delta = 1 - epsilon
dx = -epsilon
pm = '-'
# FFT functions for use in exa calculations; padding needs to be consistent with agg
def loc_ft(x):
return ft(x, self.padding, None)
def loc_ift(x):
return ift(x, self.padding, None)
# setup (compare self.update)
xs = self.density_df['loss'].values
tilt_vector = None
# (1+e)X computed for each line
agg_epsilon_df = pd.DataFrame(index=xs)
# compute the individual line (1+epsilon)X_i and then the revised total
new_aggs = {}
for base_agg in self.agg_list:
agg = base_agg.rescale(delta, kind)
new_aggs[base_agg.name] = agg
_a = agg.update(xs, self.padding, tilt_vector, 'exact' if agg.n < self.approx_freq_ge else self.approx_type,
self.sev_calc, self.discretization_calc, verbose=False)
agg_epsilon_df[f'p_{agg.name}'] = agg.agg_density
# the total with the line incremented
agg_epsilon_df[f'p_total_{agg.name}'] = \
np.real(loc_ift(agg.ftagg_density * loc_ft(self.density_df[f'ημ_{agg.name}'])))
self.remove_fuzz(df=agg_epsilon_df, force=remove_fuzz, log='gradient')
percentiles = [0.9, 0.95, 0.99, 0.996, 0.999, 0.9999, 1 - 1e-6]
audit_df = pd.DataFrame(
columns=['Sum probs', 'EmpMean', 'EmpCV', 'EmpSkew', 'EmpEX1', 'EmpEX2', 'EmpEX3'] +
['P' + str(100 * i) for i in percentiles])
# 949 = epsilon 916 Delta
ep = chr(949)
D = chr(916)
for col in agg_epsilon_df.columns:
sump = np.sum(agg_epsilon_df[col])
t = agg_epsilon_df[col] * xs
ex1 = np.sum(t)
t *= xs
ex2 = np.sum(t)
t *= xs
ex3 = np.sum(t)
m, cv, s = MomentAggregator.static_moments_to_mcvsk(ex1, ex2, ex3)
ps = np.zeros((len(percentiles)))
temp = agg_epsilon_df[col].cumsum()
for i, p in enumerate(percentiles):
ps[i] = (temp > p).idxmax()
audit_df.loc[f'{col[2:]}{pm}{ep}', :] = [sump, m, cv, s, ex1, ex2, ex3] + list(ps)
for l in self.line_names_ex:
audit_df.loc[l, :] = self.audit_df.loc[l, :]
# differences
for l in self.line_names:
audit_df.loc[f'{l}{D}', :] = audit_df.loc[f'{l}{pm}{ep}'] - audit_df.loc[l]
audit_df.loc[f'total_{l}{D}', :] = audit_df.loc[f'total_{l}{pm}{ep}'] - audit_df.loc['total']
audit_df = audit_df.sort_index()
# now need to iterate through each line to compute differences
# variables we want to differentiate
# note asking for derv of exa_A makes things a lot more complex...see swap function below
# may want to default to not including that?
columns_of_interest = ['S'] + [f'exa_{line}' for line in self.line_names_ex]
if extra_columns:
columns_of_interest += extra_columns
# these are the columns add_exa expects
columns_p_only = ['loss'] + [f'p_{line}' for line in self.line_names_ex] + \
[f'ημ_{line}' for line in self.line_names]
# first, need a base and add exag to coi
if distortion:
_x = self.apply_distortion(distortion, create_augmented=False)
base = _x.augmented_df
columns_of_interest.extend(['gS'] + [f'exag_{line}' for line in self.line_names_ex])
else:
base = self.density_df
# and then a holder for the answer
answer = pd.DataFrame(index=pd.Index(xs, name='loss'),
columns=pd.MultiIndex.from_arrays(((), ()), names=('partial_wrt', 'line')))
answer.columns.name = 'derivatives'
# the exact same as add exa; same padding no tilt
def ae_ft(x):
return ft(x, 1, None)
def swap(adjust_line):
"""
in the not line swap A for Ae
E.g. X = A + B + C and adjust_Line = A. Then not A is the same, but for not B and not C you
need to swap A with A+epsilon. This function accomplishes the swap.
:param ημ:
:param A:
:param Ae:
:return: collection of all not lines with adjusted adjust_line
adjusted_not_fft[line] is fft of not_line with adjust_line swapped out for line + epsilon
"""
# look if there are just two lines then this is easy......but want to see if this works too...
adjusted_not_fft = {}
adjust_line_ft = ae_ft(agg_epsilon_df[f'p_{adjust_line}'])
base_line_ft = ae_ft(base[f'p_{adjust_line}'])
adj_factor = adjust_line_ft / base_line_ft
adj_factor[np.logical_and(base_line_ft == 0, adjust_line_ft == 0)] = 0
n_and = np.sum(np.logical_and(base_line_ft == 0, adjust_line_ft == 0))
n_or = np.sum(np.logical_or(base_line_ft == 0, adjust_line_ft == 0))
# TODO sort this out...often not actually the same...
logger.info(f'SAME? And={n_and} Or={n_or}; Zeros in fft(line) and '
'fft(line + epsilon for {adjust_line}.')
for line in self.line_names:
if line == adjust_line:
# nothing changes, adjust_line not in not adjust_line it doesn't need to change
adjusted_not_fft[line] = ae_ft(base[f'ημ_{line}'])
else:
adjusted_not_fft[line] = ae_ft(base[f'ημ_{line}']) * adj_factor
return adjusted_not_fft
# finally perform iteration and compute differences
for line in self.line_names:
gradient_df = base[columns_p_only].copy()
gradient_df[f'p_{line}'] = agg_epsilon_df[f'p_{line}']
gradient_df['p_total'] = agg_epsilon_df[f'p_total_{line}']
if do_swap:
# we also need to update ημ_lines whenever it includes line (i.e. always
# call original add_exa function, operates on gradient_df in-place
self.add_exa(gradient_df, details=False, ft_nots=swap(line))
else:
self.add_exa(gradient_df, details=False)
if distortion is not None:
# apply to line + epsilon
gradient_df = self.apply_distortion(distortion, df_in=gradient_df, create_augmented=False).augmented_df
# compute differentials and store answer!
# print(columns_of_interest)
# print([(line, i) for i in columns_of_interest])
# print(type(gradient_df))
# temp0 = gradient_df[columns_of_interest]
# temp1 = base[columns_of_interest]
# temp2 = (temp0 - temp1) / dx
answer[[(line, i) for i in columns_of_interest]] = (gradient_df[columns_of_interest] -
base[columns_of_interest]) / dx
return Answer(gradient=answer, audit=audit_df, new_aggs=new_aggs)
def report(self, report_list='quick'):
"""
:param report_list:
:return:
"""
full_report_list = ['statistics', 'quick', 'audit', 'priority_capital', 'priority_analysis']
if report_list == 'all':
report_list = full_report_list
for r in full_report_list:
if r in report_list:
html_title(f'{r} Report for {self.name}', 1)
if r == 'priority_capital':
if self.priority_capital_df is not None:
display(self.priority_capital_df.loc[1e-3:1e-2, :].style)
else:
html_title(f'Report {r} not generated', 2)
elif r == 'quick':
if self.audit_df is not None:
df = self.audit_df[['Mean', 'EmpMean', 'MeanErr', 'CV', 'EmpCV', 'CVErr', 'P99.0']]
display(df.style)
else:
html_title(f'Report {r} not generated', 2)
else:
df = getattr(self, r + '_df', None)
if df is not None:
try:
display(df.style)
except ValueError:
display(df)
else:
html_title(f'Report {r} not generated', 2)
def plot(self, kind='density', line='all', p=0.99, c=0, a=0, axiter=None, figsize=None, height=2,
aspect=1, **kwargs):
"""
kind = density
simple plotting of line density or not line density;
input single line or list of lines;
log underscore appended as appropriate
kind = audit
Miscellaneous audit graphs
kind = priority
LEV EXA, E2Pri and combined plots by line
kind = quick
four bar charts of EL etc.
kind = collateral
plot to illustrate bivariate density of line vs not line with indicated asset a and capital c
:param kind: density | audit | priority | quick | collateral
:param line: lines to use, defaults to all
:param p: for graphics audit, x-axis scale has maximum q(p)
:param c: collateral amount
:param a: asset amount
:param axiter: optional, pass in to use existing ``axiter``
:param figsize: arguments passed to axis_factory if no axiter
:param height:
:param aspect:
:param kwargs: passed to pandas plot routines
:return:
"""
| |
<reponame>khuyentran1401/bnlearn<gh_stars>1-10
"""Bayesian techniques for structure learning, parameter learning, inference and sampling."""
# ------------------------------------
# Name : bnlearn.py
# Author : E.Taskesen
# Contact : <EMAIL>
# Licence : See licences
# ------------------------------------
# %% Libraries
import os
import copy
import wget
import zipfile
import itertools
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from pathlib import Path
from tqdm import tqdm
from pgmpy.models import BayesianModel, NaiveBayes
from pgmpy.factors.discrete import TabularCPD
from pgmpy.sampling import BayesianModelSampling # GibbsSampling
from pgmpy import readwrite
from ismember import ismember
import pypickle
import bnlearn
# %% Convert adjmat to bayesian model
def to_bayesianmodel(model, verbose=3):
"""Convert adjacency matrix to BayesianModel.
Description
-----------
Convert a adjacency to a BayesianModel. This is required as some of the
functionalities, such as ``structure_learning`` output a DAGmodel.
If the output of ``structure_learning`` is provided, the adjmat is extracted and processed.
Parameters
----------
model : pd.DataFrame()
Adjacency matrix.
Raises
------
Exception
The input should not be None and if a model (as dict) is provided, the key 'adjmat' should be included.
Returns
-------
bayesianmodel : Object
BayesianModel that can be used in ``parameter_learning.fit``.
"""
if isinstance(model, dict):
adjmat = model.get('adjmat', None)
else:
adjmat = model
if adjmat is None: raise Exception('[bnlearn] >Error: input for "to_bayesianmodel" should be adjmat or a dict containing a key "adjmat".')
if verbose>=3: print('[bnlearn] >Conversion of adjmat to BayesianModel.')
# Convert to vector
vec = adjmat2vec(adjmat)[['source', 'target']].values.tolist()
# Make BayesianModel
bayesianmodel = BayesianModel(vec)
# Return
return bayesianmodel
# %% Make DAG
def make_DAG(DAG, CPD=None, methodtype='bayes', checkmodel=True, verbose=3):
"""Create Directed Acyclic Graph based on list.
Parameters
----------
DAG : list
list containing source and target in the form of [('A','B'), ('B','C')].
CPD : list, array-like
Containing TabularCPD for each node.
methodtype : str (default: 'bayes')
* 'bayes': Bayesian model
* 'nb' or 'naivebayes': Special case of Bayesian Model where the only edges in the model are from the feature variables to the dependent variable. Or in other words, each tuple should start with the same variable name such as: edges = [('A', 'B'), ('A', 'C'), ('A', 'D')]
checkmodel : bool
Check the validity of the model. The default is True
verbose : int, optional
Print progress to screen. The default is 3.
0: None, 1: ERROR, 2: WARN, 3: INFO (default), 4: DEBUG, 5: TRACE
Returns
-------
dict keys:
* 'adjmat': Adjacency matrix
* 'model': pgmpy.models
* 'methodtype': methodtype
* 'model_edges': Edges
Examples
--------
>>> import bnlearn as bn
>>> edges = [('A', 'B'), ('A', 'C'), ('A', 'D')]
>>> DAG = bn.make_DAG(edges, methodtype='naivebayes')
>>> bn.plot(DAG)
"""
if (CPD is not None) and (not isinstance(CPD, list)):
CPD=[CPD]
if methodtype=='nb': methodtype='naivebayes'
if isinstance(DAG, dict):
DAG = DAG.get('model', None)
if (not isinstance(DAG, list)) and ('pgmpy' not in str(type(DAG))):
raise Exception("[bnlearn] >Error: Input DAG should be a list. in the form [('A','B'), ('B','C')] or a <pgmpy.models.BayesianModel.BayesianModel>")
elif ('pgmpy' in str(type(DAG))):
# Extract methodtype from existing model.
if ('bayesianmodel' in str(type(DAG)).lower()):
methodtype='bayes'
elif('naivebayes' in str(type(DAG)).lower()):
methodtype='naivebayes'
if verbose>=3: print('[bnlearn] >No changes made to existing %s DAG.' %(methodtype))
elif isinstance(DAG, list) and methodtype=='naivebayes':
if verbose>=3: print('[bnlearn] >%s DAG created.' %(methodtype))
edges=DAG
DAG = NaiveBayes()
DAG.add_edges_from(edges)
# modeel.add_nodes_from(DAG)
elif isinstance(DAG, list) and methodtype=='bayes':
if verbose>=3: print('[bnlearn] >%s DAG created.' %(methodtype))
DAG = BayesianModel(DAG)
if CPD is not None:
for cpd in CPD:
DAG.add_cpds(cpd)
if verbose>=3: print('[bnlearn] >Add CPD: %s' %(cpd.variable))
# Check model
if checkmodel:
_check_model(DAG, verbose=verbose)
# Create adjacency matrix from DAG
out = {}
out['adjmat'] = _dag2adjmat(DAG)
out['model'] = DAG
out['methodtype'] = methodtype
out['model_edges'] = DAG.edges()
return out
# %% Print DAG
def print_CPD(DAG, checkmodel=False):
"""Print DAG-model to screen.
Parameters
----------
DAG : pgmpy.models.BayesianModel.BayesianModel
model of the DAG.
checkmodel : bool
Check the validity of the model. The default is True
Returns
-------
None.
"""
# config = None
if isinstance(DAG, dict):
DAG = DAG.get('model', None)
# Print CPDs
# if config['method']=='ml' or config['method']=='maximumlikelihood':
try:
if 'MaximumLikelihood' in str(type(DAG)):
# print CPDs using Maximum Likelihood Estimators
for node in DAG.state_names:
print(DAG.estimate_cpd(node))
elif ('bayesianmodel' in str(type(DAG)).lower()) or ('naivebayes' in str(type(DAG)).lower()):
# print CPDs using Bayesian Parameter Estimation
if len(DAG.get_cpds())==0:
raise Exception('[bnlearn] >Error! This is a Bayesian DAG containing only edges, and no CPDs. Tip: you need to specify or learn the CPDs. Try: DAG=bn.parameter_learning.fit(DAG, df). At this point you can make a plot with: bn.plot(DAG).')
return
for cpd in DAG.get_cpds():
print("CPD of {variable}:".format(variable=cpd.variable))
print(cpd)
if ('bayesianmodel' in str(type(DAG)).lower()):
print('[bnlearn] >Independencies:\n%s' %(DAG.get_independencies()))
print('[bnlearn] >Nodes: %s' %(DAG.nodes()))
print('[bnlearn] >Edges: %s' %(DAG.edges()))
if checkmodel:
_check_model(DAG, verbose=3)
except:
print('[bnlearn] >No CPDs to print. Hint: Add CPDs as following: <bn.make_DAG(DAG, CPD=[cpd_A, cpd_B, etc])> and use bnlearn.plot(DAG) to make a plot.')
# %%
def _check_model(DAG, verbose=3):
if verbose>=3: print('[bnlearn] >Checking CPDs..')
for cpd in DAG.get_cpds():
# print(cpd)
if not np.all(cpd.values.sum(axis=0)==1):
print('[bnlearn] >Warning: CPD [%s] does not add up to 1 but is: %s' %(cpd.variable, cpd.values.sum(axis=0)))
if verbose>=3:
print('[bnlearn] >Check for DAG structure. Correct: %s' %(DAG.check_model()))
# %% Convert DAG into adjacency matrix
def _dag2adjmat(model, verbose=3):
adjmat = None
if hasattr(model, 'nodes') and hasattr(model, 'edges'):
adjmat = pd.DataFrame(data=False, index=model.nodes(), columns=model.nodes()).astype('bool')
# Fill adjmat with edges
edges = model.edges()
# Run over the edges
for edge in edges:
adjmat.loc[edge[0], edge[1]]=True
adjmat.index.name='source'
adjmat.columns.name='target'
else:
if verbose>=1: print('[bnlearn] >Could not convert to adjmat because nodes and/or edges were missing.')
return(adjmat)
# %% Convert adjacency matrix to vector
def vec2adjmat(source, target, weights=None, symmetric=True):
"""Convert source and target into adjacency matrix.
Parameters
----------
source : list
The source node.
target : list
The target node.
weights : list of int
The Weights between the source-target values
symmetric : bool, optional
Make the adjacency matrix symmetric with the same number of rows as columns. The default is True.
Returns
-------
pd.DataFrame
adjacency matrix.
Examples
--------
>>> source=['Cloudy','Cloudy','Sprinkler','Rain']
>>> target=['Sprinkler','Rain','Wet_Grass','Wet_Grass']
>>> vec2adjmat(source, target)
>>> weights=[1,2,1,3]
>>> vec2adjmat(source, target, weights=weights)
"""
if len(source)!=len(target): raise ValueError('[hnet] >Source and Target should have equal elements.')
if weights is None: weights = [1] *len(source)
df = pd.DataFrame(np.c_[source, target], columns=['source', 'target'])
# Make adjacency matrix
adjmat = pd.crosstab(df['source'], df['target'], values=weights, aggfunc='sum').fillna(0)
# Get all unique nodes
nodes = np.unique(list(adjmat.columns.values) +list(adjmat.index.values))
# nodes = np.unique(np.c_[adjmat.columns.values, adjmat.index.values].flatten())
# Make the adjacency matrix symmetric
if symmetric:
# Add missing columns
node_columns = np.setdiff1d(nodes, adjmat.columns.values)
for node in node_columns:
adjmat[node]=0
# Add missing rows
node_rows = np.setdiff1d(nodes, adjmat.index.values)
adjmat=adjmat.T
for node in node_rows:
adjmat[node]=0
adjmat=adjmat.T
# Sort to make ordering of columns and rows similar
[IA, IB] = ismember(adjmat.columns.values, adjmat.index.values)
adjmat = adjmat.iloc[IB, :]
adjmat.index.name='source'
adjmat.columns.name='target'
return(adjmat)
# %% Convert adjacency matrix to vector
def adjmat2vec(adjmat, min_weight=1):
"""Convert adjacency matrix into vector with source and target.
Parameters
----------
adjmat : pd.DataFrame()
Adjacency matrix.
min_weight : float
edges are returned with a minimum weight.
Returns
-------
pd.DataFrame()
nodes that are connected based on source and target
Examples
--------
>>> source=['Cloudy','Cloudy','Sprinkler','Rain']
>>> target=['Sprinkler','Rain','Wet_Grass','Wet_Grass']
>>> adjmat = vec2adjmat(source, target)
>>> vector = adjmat2vec(adjmat)
"""
# Convert adjacency matrix into vector
adjmat = adjmat.stack().reset_index()
# Set columns
adjmat.columns = ['source', 'target', 'weight']
# Remove self loops and no-connected edges
Iloc1 = adjmat['source']!=adjmat['target']
Iloc2 = adjmat['weight']>=min_weight
Iloc = Iloc1 & Iloc2
# Take only connected nodes
adjmat = adjmat.loc[Iloc, :]
adjmat.reset_index(drop=True, inplace=True)
return(adjmat)
# %%
def adjmat2dict(adjmat):
"""Convert adjacency matrix to dict.
Parameters
----------
adjmat : pd.DataFrame
Adjacency matrix.
Returns
-------
graph : dict
Graph.
"""
adjmat=adjmat.astype(bool)
graph={}
rows=adjmat.index.values
for r in rows:
graph.update({r: list(rows[adjmat.loc[r, :]])})
return graph
# %% Sampling from model
def sampling(DAG, n=1000, verbose=3):
"""Generate sample(s) using forward sampling from joint distribution of the bayesian network.
Parameters
----------
DAG : dict
Contains model and adjmat of the DAG.
n : int, optional
Number of samples to generate. The default is 1000.
verbose : int, optional
Print progress to screen. The default is 3.
0: None, 1: ERROR, 2: WARN, 3: INFO (default), 4: DEBUG, 5: TRACE
Returns
-------
df : pd.DataFrame().
Dataframe containing sampled data from the input DAG model.
Example
-------
>>> import bnlearn
>>> DAG = bnlearn.import_DAG('sprinkler')
>>> df = bnlearn.sampling(DAG, n=1000)
"""
if n<=0: raise ValueError('n must be 1 or larger')
if 'BayesianModel' not in str(type(DAG['model'])): raise ValueError('DAG must contain BayesianModel.')
if verbose>=3: print('[bnlearn] >Forward | |
<filename>waterpy/geospatial.py<gh_stars>0
from osgeo import ogr, gdal, osr, gdalconst
import os
import numpy as np
import pandas as pd
import json
import pycrs
import pyproj
import netCDF4
import datetime as dt
class Shp:
"""
Contains various fuctions and metadata desc in init related in SHP objects.
While titled SHP, currently this should only be used with polygons. Will incorporate fun things like
points in future versions. I currently see no reason to incorporate lines.
Outside reliance on the daymet.prj (included in ./static/geospatial) to transform things into daymet to build the
temp/precip series.
"""
def __init__(self, path):
self.path = path
self.shp = ogr.Open(self.path)
self.lyr = self.shp.GetLayer()
self.prj = self.shp.GetLayer().GetSpatialRef()
self.lyr_0 = self.shp.GetLayer(0)
self.prj4 = self.prj.ExportToProj4()
self.feature = self.shp.GetLayer(0).GetFeature(0)
self.extent = self.feature.GetGeometryRef().GetEnvelope()
self.x_cen, self.y_cen = self._centroid()
self.daymet_x, self.daymet_y = self.daymet_proj()
self.karst_flag = 0
@classmethod
def _clean(cls, path):
ds = ogr.Open(path, 1)
lyr = ds.GetLayer()
defn = lyr.GetLayerDefn()
for i in range(defn.GetFieldCount()):
name = defn.GetFieldDefn(i).GetName()
if name == "Shape_Area" or name == "Shape_Leng":
lyr.DeleteField(i)
else:
continue
ds = None
clean_shp = Shp(path=path)
return clean_shp
def _centroid(self):
centroid = json.loads(
self.feature.GetGeometryRef().Centroid().ExportToJson())
center_x = centroid['coordinates'][0]
center_y = centroid['coordinates'][1]
return center_x, center_y
def daymet_proj(self):
daymet_proj = pycrs.load.from_file("database//climate//Daymet.prj")
transformer = pyproj.Transformer.from_crs(self.prj4, daymet_proj.to_proj4())
return transformer.transform(self.x_cen, self.y_cen)
class dbShp:
"""
Basically the same as Raster class, for shapes provided with DB or created by code.
shps.
"""
def __init__(self, path):
self.path = path
self.shp = ogr.Open(self.path)
self.lyr = self.shp.GetLayer()
self.prj = self.shp.GetLayer().GetSpatialRef()
self.prj4 = self.prj.ExportToProj4()
self.feature = self.shp.GetLayer(0).GetFeature(0)
self.x_cen, self.y_cen = self._centroid()
def _centroid(self):
centroid = json.loads(
self.feature.GetGeometryRef().Centroid().ExportToJson())
center_x = centroid['coordinates'][0]
center_y = centroid['coordinates'][1]
return center_x, center_y
def daymet_proj(self):
daymet_proj = pycrs.load.from_file("database//climate//Daymet.prj")
transformer = pyproj.Transformer.from_crs(self.prj4, daymet_proj.to_proj4())
return transformer.transform(self.x_cen, self.y_cen)
class Raster:
"""
Contains various fuctions and metadata desc in init related in rasters objects.
WaterPy internals (./static/geospatal/rasters) utilizes tifs, this object class is compatible with any
osgeo/gdal compliant raster formats.
"""
def __init__(self, path):
self.path = path
self.data = gdal.Open(self.path)
self.band_1 = self.data.GetRasterBand(1)
self.gt = self.data.GetGeoTransform
self.prj = osr.SpatialReference(wkt=self.data.GetProjection())
self.prj4 = self.prj.ExportToProj4()
def bbox_to_pixel_offsets(gt, bbox):
"""
Function to offset (aka snap) polygon to raster.
:param gt: geotransform variable from gdal.data.GetGeoTransform
:param bbox: Bounding extent coordinates from ogr.feature.GetExtent()
:return: tuple to use as a multiplier for the raster array.
"""
origin_x = gt[0]
origin_y = gt[3]
pixel_width = gt[1]
pixel_height = gt[5]
x1 = int((bbox[0] - origin_x) / pixel_width)
x2 = int((bbox[1] - origin_x) / pixel_width) + 1
y1 = int((bbox[3] - origin_y) / pixel_height)
y2 = int((bbox[2] - origin_y) / pixel_height) + 1
xsize = x2 - x1
ysize = y2 - y1
return x1, y1, xsize, ysize
def karst_detection(raster, shp):
"""
:param raster: Raster class object built from karst raster.
:param shp: SHP class object from entire basin.
:return: Shp.karst_flag will be triggered, or it won't.
"""
r_data = raster.data
r_band = r_data.GetRasterBand(1)
r_geotransform = raster.gt()
v_data = shp.shp
v_feature = v_data.GetLayer(0)
sourceprj = v_feature.GetSpatialRef()
targetprj = osr.SpatialReference(wkt=r_data.GetProjection())
if sourceprj.ExportToProj4() != targetprj.ExportToProj4():
to_fill = ogr.GetDriverByName('Memory')
ds = to_fill.CreateDataSource("project")
outlayer = ds.CreateLayer('poly', targetprj, ogr.wkbPolygon)
feature = v_feature.GetFeature(0)
transform = osr.CoordinateTransformation(sourceprj, targetprj)
transformed = feature.GetGeometryRef()
transformed.Transform(transform)
geom = ogr.CreateGeometryFromWkb(transformed.ExportToWkb())
defn = outlayer.GetLayerDefn()
feat = ogr.Feature(defn)
feat.SetGeometry(geom)
outlayer.CreateFeature(feat.Clone())
feat = None
v_feature = outlayer
src_offset = bbox_to_pixel_offsets(r_geotransform, v_feature.GetExtent())
src_array = r_band.ReadAsArray(*src_offset)
new_gt = (
(r_geotransform[0] + (src_offset[0] * r_geotransform[1])),
r_geotransform[1], 0.0,
(r_geotransform[3] + (src_offset[1] * r_geotransform[5])),
0.0, r_geotransform[5]
)
stats = [] # Keeping this unless there are several features in the same shapefile.
driver = gdal.GetDriverByName('MEM')
v_to_r = driver.Create('', src_offset[2], src_offset[3], 1, gdal.GDT_Byte)
v_to_r.SetGeoTransform(new_gt)
gdal.RasterizeLayer(v_to_r, [1], v_feature, burn_values=[1])
v_to_r_array = v_to_r.ReadAsArray()
masked = np.ma.MaskedArray(
src_array,
mask=np.logical_not(v_to_r_array)
)
if masked.max() > 0:
return 1
else:
return 0
def zonal_stats(raster, shp):
"""
Converts a shp file into a raster mask. Masks off a polygon and extracts statistics from the area within the mask.
Currently this only works with a shp file with one feature, however, it's written so that it could be adjusted to
handle multiple features.
:param raster: Raster class object.
:param shp: Shp class object.
:return: list of dict objects from computed stats.
"""
r_data = raster.data
r_band = r_data.GetRasterBand(1)
nodata_value = r_band.GetNoDataValue()
r_geotransform = raster.gt()
v_data = shp.shp
v_feature = v_data.GetLayer(0)
sourceprj = v_feature.GetSpatialRef()
targetprj = osr.SpatialReference(wkt=r_data.GetProjection())
if sourceprj.ExportToProj4() != targetprj.ExportToProj4():
to_fill = ogr.GetDriverByName('Memory')
ds = to_fill.CreateDataSource("project")
outlayer = ds.CreateLayer('poly', targetprj, ogr.wkbPolygon)
feature = v_feature.GetFeature(0)
transform = osr.CoordinateTransformation(sourceprj, targetprj)
transformed = feature.GetGeometryRef()
transformed.Transform(transform)
geom = ogr.CreateGeometryFromWkb(transformed.ExportToWkb())
defn = outlayer.GetLayerDefn()
feat = ogr.Feature(defn)
feat.SetGeometry(geom)
outlayer.CreateFeature(feat.Clone())
v_feature = outlayer
src_offset = bbox_to_pixel_offsets(r_geotransform, v_feature.GetExtent())
src_array = r_band.ReadAsArray(*src_offset)
new_gt = (
(r_geotransform[0] + (src_offset[0] * r_geotransform[1])),
r_geotransform[1], 0.0,
(r_geotransform[3] + (src_offset[1] * r_geotransform[5])),
0.0, r_geotransform[5]
)
driver = gdal.GetDriverByName('MEM')
stats = []
v_to_r = driver.Create('', src_offset[2], src_offset[3], 1, gdal.GDT_Byte)
v_to_r.SetGeoTransform(new_gt)
gdal.RasterizeLayer(v_to_r, [1], v_feature, burn_values=[1])
v_to_r_array = v_to_r.ReadAsArray()
src_array = np.array(src_array, dtype=float)
v_to_r_array = np.array(v_to_r.ReadAsArray(), dtype=float)
masked = np.ma.MaskedArray(
src_array,
mask=np.logical_or(
src_array == nodata_value,
np.logical_not(v_to_r_array)
),
fill_value=np.nan
)
feature_stats = {
'source': str(raster.path),
'min': float(masked.min()),
'mean': float(masked.mean()),
'max': float(masked.max()),
'std': float(masked.std()),
}
ds = None
stats.append(feature_stats)
return feature_stats['mean']
def zonal_area(raster, shp):
"""
Converts a shp file into a raster mask. Masks off a polygon and extracts statistics from the area within the mask.
Currently this only works with a shp file with one feature, however, it's written so that it could be adjusted to
handle multiple features.
:param raster: Raster class object.
:param shp: Shp class object.
:return: list of dict objects from computed stats.
"""
r_data = raster.data
r_band = r_data.GetRasterBand(1)
r_geotransform = raster.gt()
v_data = shp.shp
v_feature = v_data.GetLayer(0)
nodata_value = r_band.GetNoDataValue()
sourceprj = v_feature.GetSpatialRef()
targetprj = osr.SpatialReference(wkt=r_data.GetProjection())
if sourceprj.ExportToProj4() != targetprj.ExportToProj4():
to_fill = ogr.GetDriverByName('Memory')
ds = to_fill.CreateDataSource("project")
outlayer = ds.CreateLayer('poly', targetprj, ogr.wkbPolygon)
feature = v_feature.GetFeature(0)
transform = osr.CoordinateTransformation(sourceprj, targetprj)
transformed = feature.GetGeometryRef()
transformed.Transform(transform)
geom = ogr.CreateGeometryFromWkb(transformed.ExportToWkb())
defn = outlayer.GetLayerDefn()
feat = ogr.Feature(defn)
feat.SetGeometry(geom)
outlayer.CreateFeature(feat.Clone())
feat = None
v_feature = outlayer
src_offset = bbox_to_pixel_offsets(r_geotransform, v_feature.GetExtent())
src_array = r_band.ReadAsArray(*src_offset)
new_gt = (
(r_geotransform[0] + (src_offset[0] * r_geotransform[1])),
r_geotransform[1], 0.0,
(r_geotransform[3] + (src_offset[1] * r_geotransform[5])),
0.0, r_geotransform[5]
)
driver = gdal.GetDriverByName('MEM')
stats = []
v_to_r = driver.Create('', src_offset[2], src_offset[3], 1, gdal.GDT_Byte)
v_to_r.SetGeoTransform(new_gt)
gdal.RasterizeLayer(v_to_r, [1], v_feature, burn_values=[1])
v_to_r_array = v_to_r.ReadAsArray()
src_array = np.array(src_array, dtype=float)
v_to_r_array = np.array(v_to_r.ReadAsArray(), dtype=float)
masked = np.ma.MaskedArray(
src_array,
mask=np.logical_or(
src_array == nodata_value,
src_array == 0,
np.logical_not(v_to_r_array)
),
fill_value=np.nan
)
return float(masked.count() * 100)
def twi_bins(raster, shp, nbins=30):
r_data = raster.data
r_band = r_data.GetRasterBand(1)
r_geotransform = raster.gt()
v_data = shp.shp
v_feature = v_data.GetLayer(0)
sourceprj = v_feature.GetSpatialRef()
targetprj = osr.SpatialReference(wkt=r_data.GetProjection())
if sourceprj.ExportToProj4() != targetprj.ExportToProj4():
to_fill = ogr.GetDriverByName('Memory')
ds = to_fill.CreateDataSource("project")
outlayer = ds.CreateLayer('poly', targetprj, ogr.wkbPolygon)
feature = v_feature.GetFeature(0)
transform = osr.CoordinateTransformation(sourceprj, targetprj)
transformed = feature.GetGeometryRef()
transformed.Transform(transform)
geom = ogr.CreateGeometryFromWkb(transformed.ExportToWkb())
defn = outlayer.GetLayerDefn()
feat = ogr.Feature(defn)
feat.SetGeometry(geom)
outlayer.CreateFeature(feat.Clone())
feat = None
v_feature = outlayer
src_offset = bbox_to_pixel_offsets(r_geotransform, v_feature.GetExtent())
src_array = r_band.ReadAsArray(*src_offset)
new_gt = (
(r_geotransform[0] + (src_offset[0] * r_geotransform[1])),
r_geotransform[1], 0.0,
(r_geotransform[3] + (src_offset[1] * r_geotransform[5])),
0.0, r_geotransform[5]
)
driver = gdal.GetDriverByName('MEM')
v_to_r = driver.Create('', src_offset[2], src_offset[3], 1, gdal.GDT_Byte)
v_to_r.SetGeoTransform(new_gt)
gdal.RasterizeLayer(v_to_r, [1], v_feature, burn_values=[1])
v_to_r_array = v_to_r.ReadAsArray()
src_array = np.array(src_array, dtype=float)
v_to_r_array = np.array(v_to_r.ReadAsArray(), dtype=float)
masked = np.ma.MaskedArray(
src_array,
mask=np.logical_or(
np.logical_not(v_to_r_array),
src_array < 0)
)
mx = masked.max()
mn = masked.min()
mean = masked.mean()
intvl = (mx - mn) / (nbins + 1)
edges = np.arange(mn, mx, intvl)
histo = np.histogram(masked, bins=edges)
# need mean of each bin. Get the rest of the stats while there.
# TWI Mean is the value we need for TopModel Input.
bins = []
for i in range(nbins):
line = []
bin = i + 1
if i == 0:
twi_val = histo[1][i] / 2
else:
twi_val = (histo[1][i] + histo[1][i-1]) / 2
proportion = histo[0][i]/np.sum(histo[0])
line.append(bin)
line.append(twi_val)
line.append(proportion)
bins.append(line)
df = pd.DataFrame(bins, columns=['bin', 'twi', 'proportion'])
df.set_index('bin', inplace=True)
return df
def simplify(src):
driver = ogr.GetDriverByName("ESRI Shapefile")
src_ds = driver.Open(src.path, 0)
src_layer = src_ds.GetLayer()
out_path = src.path[:-4] + '_simple.shp'
srs = osr.SpatialReference()
srs.ImportFromProj4(src.prj4)
if os.path.exists(out_path):
driver.DeleteDataSource(out_path)
out_ds = driver.CreateDataSource(out_path)
out_layer = out_ds.CreateLayer('FINAL', srs=srs, geom_type=ogr.wkbPolygon)
infeature = src_layer.GetFeature(0)
outfeature = ogr.Feature(out_layer.GetLayerDefn())
geom = infeature.geometry().Simplify(100.0)
outfeature.SetGeometry(geom)
out_layer.CreateFeature(outfeature)
outfeature = None
out_ds = None
out_shp = dbShp(path=out_path)
return out_shp
def clip(src, shp):
| |
<reponame>laserson/vdj<gh_stars>1-10
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import copy
import numpy as np
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature, FeatureLocation
import vdj
import refseq
import seqtools
import alignmentcore
warnings.simplefilter('always')
class vdj_aligner(object):
def __init__(self,**kw):
self.numCrudeVCandidates = 5
self.numCrudeDCandidates = 10
self.numCrudeJCandidates = 2
self.minVscore = 100 # derived from calibration data 20090710
self.minDscore = 4
self.minJscore = 13
if kw.has_key('rigorous') and kw['rigorous'] == True:
self.numCrudeVCandidates = 10000
self.numCrudeDCandidates = 10000
self.numCrudeJCandidates = 10000
self.minVscore = 20
self.minDscore = 1
self.minJscore = 5
# Define seed patterns
patternA='111011001011010111'
patternB='1111000100010011010111'
patternC='111111111111'
patternD='110100001100010101111'
patternE='1110111010001111'
self.seedpatterns = [patternA,patternB,patternC,patternD,patternE]
self.miniseedpatterns = ['111011','110111']
self.patternPos = '111111111111'
# set reference sequences (locus) and generate hashes from ref data
self.locus = kw['locus']
self.refV = refseq.__getattribute__(self.locus+'V')
refV_seqs = dict([(allele,record.seq.tostring()) for (allele,record) in self.refV.iteritems()])
self.Vseqlistkeys = vdj_aligner.seqdict2kmers( refV_seqs, self.seedpatterns )
self.refJ = refseq.__getattribute__(self.locus+'J')
refJ_seqs = dict([(allele,record.seq.tostring()) for (allele,record) in self.refJ.iteritems()])
self.Jseqlistkeys = vdj_aligner.seqdict2kmers( refJ_seqs, self.seedpatterns )
try: # this locus may not have D segments
self.refD = refseq.__getattribute__(self.locus+'D')
refD_seqs = dict([(allele,record.seq.tostring()) for (allele,record) in self.refD.iteritems()])
self.Dseqlistkeysmini = vdj_aligner.seqdict2kmers( refD_seqs, self.miniseedpatterns )
except AttributeError:
pass
# Generate reference data for positive sequence ID
posVseqlistkeys = vdj_aligner.seqdict2kmers( refV_seqs, [self.patternPos] )
posJseqlistkeys = vdj_aligner.seqdict2kmers( refJ_seqs, [self.patternPos] )
negVseqlistkeys = vdj_aligner.seqdict2kmers( vdj_aligner.seqdict2revcompseqdict(refV_seqs), [self.patternPos] )
negJseqlistkeys = vdj_aligner.seqdict2kmers( vdj_aligner.seqdict2revcompseqdict(refJ_seqs), [self.patternPos] )
# collect possible keys
posset = set([])
for key in posVseqlistkeys.keys():
posset.update(posVseqlistkeys[key][self.patternPos])
for key in posJseqlistkeys.keys():
posset.update(posJseqlistkeys[key][self.patternPos])
negset = set([])
for key in negVseqlistkeys.keys():
negset.update(negVseqlistkeys[key][self.patternPos])
for key in negJseqlistkeys.keys():
negset.update(negJseqlistkeys[key][self.patternPos])
# get keys unique to positive or negative versions of reference set
possetnew = posset - negset
negsetnew = negset - posset
self.posset = possetnew
self.negset = negsetnew
def Valign_chain(self,chain,verbose=False):
# compute hashes from query seq
querykeys = vdj_aligner.seq2kmers(chain.seq.tostring(),self.seedpatterns)
# for each reference V segment and each pattern, how many shared k-mers are there?
Vscores_hash = vdj_aligner.hashscore(self.Vseqlistkeys,querykeys)
# get numCrudeVCandidates highest scores in Vscores and store their names in descending order
goodVseglist = sorted(self.refV.keys(),key=lambda k: Vscores_hash[k],reverse=True)[0:self.numCrudeVCandidates]
goodVsegdict = dict([(seg,self.refV[seg].seq.tostring()) for seg in goodVseglist])
# Needleman-Wunsch of V segment
(bestVseg,bestVscore,bestVscoremat,bestVtracemat) = vdj_aligner.bestalignNW(goodVsegdict,chain.seq.tostring(),self.minVscore)
# if successful alignment
if bestVseg is not None:
# copy features from ref to query
Vrefaln,Vqueryaln = vdj_aligner.construct_alignment( self.refV[bestVseg].seq.tostring(), chain.seq.tostring(), bestVscoremat, bestVtracemat )
coord_mapping = vdj_aligner.ungapped_coord_mapping(Vrefaln, Vqueryaln)
seqtools.copy_features(self.refV[bestVseg], chain, coord_mapping, erase=['translation'], replace=False)
# store gapped aln
chain.annotations['gapped_query'] = Vqueryaln
chain.annotations['gapped_reference'] = Vrefaln
# annotate mutations
curr_annot = chain.letter_annotations['alignment']
aln_annot = vdj_aligner.alignment_annotation(Vrefaln,Vqueryaln)
aln_annot = aln_annot.translate(None,'D')
lNER = len(aln_annot) - len(aln_annot.lstrip('I'))
rNER = len(aln_annot.rstrip('I'))
chain.letter_annotations['alignment'] = curr_annot[:lNER] + aln_annot[lNER:rNER] + curr_annot[rNER:]
# perform some curating; esp, CDR3-IMGT is annotated in V
# references, though it's not complete. I will recreate that
# annotation manually.
chain._update_feature_dict()
try: # some reference entries do not have CDR3 annotations
chain.features.pop(chain._features['CDR3-IMGT'][0])
chain._features.pop('CDR3-IMGT')
chain._update_feature_dict()
except KeyError:
pass
# update codon_start of V-REGION anchored to the CDR3 2nd-CYS
cys = chain.features[ chain._features['2nd-CYS'][0] ]
v_reg = chain.features[ chain._features['V-REGION'][0] ]
v_reg.qualifiers['codon_start'] = [cys.location.start.position % 3 + 1]
return bestVscore
def Jalign_chain(self,chain,verbose=False):
# try pruning off V region for J alignment
try:
second_cys = chain.__getattribute__('2nd-CYS')
second_cys_offset = second_cys.location.end.position
query = chain.seq.tostring()[second_cys_offset:]
except AttributeError:
query = chain.seq.tostring()
second_cys_offset = 0
# compute hashes from query seq
querykeys = vdj_aligner.seq2kmers(query,self.seedpatterns)
# for each reference J segment and each pattern, how many shared k-mers are there?
Jscores_hash = vdj_aligner.hashscore(self.Jseqlistkeys,querykeys)
# get numCrudeJCandidates highest scores in Jscores and store their names in descending order
goodJseglist = sorted(self.refJ.keys(),key=lambda k: Jscores_hash[k],reverse=True)[0:self.numCrudeJCandidates]
goodJsegdict = dict([(seg,self.refJ[seg].seq.tostring()) for seg in goodJseglist])
# Needleman-Wunsch of J segment
(bestJseg,bestJscore,bestJscoremat,bestJtracemat) = vdj_aligner.bestalignNW(goodJsegdict,query,self.minJscore)
# if successful alignment
if bestJseg is not None:
# copy features from ref to query
Jrefaln,Jqueryaln = vdj_aligner.construct_alignment( self.refJ[bestJseg].seq.tostring(), query, bestJscoremat, bestJtracemat )
coord_mapping = vdj_aligner.ungapped_coord_mapping(Jrefaln, Jqueryaln)
seqtools.copy_features(self.refJ[bestJseg], chain, coord_mapping, offset=second_cys_offset, erase=['translation'], replace=False)
chain._update_feature_dict()
# update gapped aln
gapped_query = chain.annotations.get('gapped_query','')
gapped_reference = chain.annotations.get('gapped_reference','')
gapped_CDR3_offset = vdj_aligner.ungapped2gapped_coord(chain.seq.tostring(),gapped_query,second_cys_offset)
gapped_Vref_aln_end = len(gapped_reference.rstrip('-'))
chain.annotations['gapped_query'] = gapped_query[:gapped_Vref_aln_end] + Jqueryaln[gapped_Vref_aln_end-gapped_CDR3_offset:]
chain.annotations['gapped_reference'] = gapped_reference[:gapped_Vref_aln_end] + Jrefaln[gapped_Vref_aln_end-gapped_CDR3_offset:]
# annotate mutations
curr_annot = chain.letter_annotations['alignment']
aln_annot = vdj_aligner.alignment_annotation(Jrefaln,Jqueryaln)
aln_annot = aln_annot.translate(None,'D')
lNER = len(aln_annot) - len(aln_annot.lstrip('I'))
rNER = len(aln_annot.rstrip('I'))
chain.letter_annotations['alignment'] = curr_annot[:second_cys_offset+lNER] + aln_annot[lNER:rNER] + curr_annot[second_cys_offset+rNER:]
return bestJscore
def Dalign_chain(self,chain,verbose=False):
# prune off V and J regions for D alignment
# we should not be attempting D alignment unless we have
# a well-defined CDR3
query = chain.junction
# compute hashes from query seq
querykeys = vdj_aligner.seq2kmers(query,self.miniseedpatterns)
# for each reference D segment and each pattern, how many shared k-mers are there?
Dscores_hash = vdj_aligner.hashscore(self.Dseqlistkeysmini,querykeys)
# get numCrudeJCandidates highest scores in Jscores and store their names in descending order
goodDseglist = sorted(self.refD.keys(),key=lambda k: Dscores_hash[k],reverse=True)[0:self.numCrudeDCandidates]
goodDsegdict = dict([(seg,self.refD[seg].seq.tostring()) for seg in goodDseglist])
# Needleman-Wunsch of J segment
(bestDseg,bestDscore,bestDscoremat,bestDtracemat) = vdj_aligner.bestalignSW(goodDsegdict,query,self.minDscore)
# if successful alignment
if bestDseg is not None:
# TEMPORARY SOLUTION
chain.annotations['D-REGION'] = bestDseg
return bestDscore
def align_chain(self,chain,verbose=False,debug=False):
# DEBUG
# import vdj
# import vdj.alignment
# from Bio import SeqIO
# from Bio.Alphabet import generic_dna
# iter = SeqIO.parse('smallset.fasta','fasta',generic_dna)
# iter = SeqIO.parse('donor12_cd8_memory_raw_reads.fasta','fasta',generic_dna)
# aligner = vdj.alignment.igh_aligner()
# aligner = vdj.alignment.trb_aligner()
# a = iter.next()
# a = vdj.ImmuneChain(a)
# aligner.coding_chain(a)
# aligner.align_chain(a)
# print a
#
if debug:
import pdb
pdb.set_trace()
if chain.seq.tostring() != chain.seq.tostring().upper():
raise ValueError, "aligner requires all uppercase alphabet."
if not chain.has_tag('positive') and not chain.has_tag('coding'):
warnings.warn('chain %s may not be the correct strand' % chain.id)
# insert letter annotations for alignment annotation
chain.letter_annotations["alignment"] = '_' * len(chain)
scores = {}
scores['v'] = self.Valign_chain(chain,verbose)
scores['j'] = self.Jalign_chain(chain,verbose)
# manually annotate CD3-IMGT, only if V and J alns are successful
try:
if chain.v and chain.j:
cdr3_start = chain.__getattribute__('2nd-CYS').location.end.position
try:
cdr3_end = chain.__getattribute__('J-PHE').location.start.position
except AttributeError:
cdr3_end = chain.__getattribute__('J-TRP').location.start.position
cdr3_feature = SeqFeature(location=FeatureLocation(cdr3_start,cdr3_end),type='CDR3-IMGT',strand=1)
chain.features.append(cdr3_feature)
chain._update_feature_dict()
# erase alignment annotations in CDR3. can't tell SHM from TdT at this point
curr_annot = chain.letter_annotations['alignment']
chain.letter_annotations['alignment'] = curr_annot[:cdr3_start] + '3' * (cdr3_end-cdr3_start) + curr_annot[cdr3_end:]
# if I am in a locus with D segments, try aligning that as well
if self.locus in ['IGH','TRB','TRD']:
scores['d'] = self.Dalign_chain(chain,verbose)
except AttributeError: # chain.v or chain.j raised an error
pass
return scores
def coding_chain(self,chain,verbose=False):
strand = self.seq2coding(chain.seq.tostring())
if strand == -1:
chain.seq = chain.seq.reverse_complement()
chain.add_tag('revcomp')
chain.add_tag('coding')
def seq2coding(self,seq):
seqkeys = vdj_aligner.seq2kmers(seq,[self.patternPos])
seqwords = seqkeys[self.patternPos]
strandid = 1
if len(self.negset & seqwords) > len(self.posset & seqwords):
strandid = -1
return strandid
@staticmethod
def seq2kmers(seq,patterns):
"""Given sequence and patterns, for each pattern, compute all corresponding k-mers from sequence.
The result is seqannot[pattern][key]=[pos1,pos2,...,posN] in seq
seqkeys[pattern] = set([kmers])
"""
seqkeys = {}
patlens = []
for pattern in patterns:
patlens.append(len(pattern))
seqkeys[pattern] = set()
maxpatlen = max(patlens)
for i in xrange(len(seq)):
word = seq[i:i+maxpatlen]
for pattern in patterns:
patlen = len(pattern)
if len(word) >= patlen:
key = ''
for j in xrange(patlen):
if pattern[j] == '1':
key += word[j]
seqkeys[pattern].add(key)
return seqkeys
@staticmethod
def seqdict2kmers(seqdict,patterns):
seqlistkeys = {}
for seq in seqdict.iteritems():
seqlistkeys[seq[0]] = vdj_aligner.seq2kmers(seq[1],patterns)
return seqlistkeys
@staticmethod
def hashscore(refkeys,querykeys):
"""Compute number of common keys for each reference sequence.
querykeys is dict of sets, where dict keys are patterns
reference keys is dict of ref seqs, where each elt is a
dict of patterns with sets as values. the patterns must be
the same
"""
scores = {}
for seg in refkeys.iterkeys():
| |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 5 01:34:00 2021
@author: yrc2
"""
import biosteam as bst
import biorefineries.oilcane as oc
from biosteam.utils import CABBI_colors, colors
from thermosteam.utils import set_figure_size, set_font, roundsigfigs
from thermosteam.units_of_measure import format_units
from colorpalette import Palette
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from warnings import warn
import numpy as np
import pandas as pd
from matplotlib.gridspec import GridSpec
from . import _variable_mockups as variables
from ._variable_mockups import (
tea_monte_carlo_metric_mockups,
tea_monte_carlo_derivative_metric_mockups,
lca_monte_carlo_metric_mockups,
lca_monte_carlo_derivative_metric_mockups,
MFPP, TCI, electricity_production, natural_gas_consumption,
ethanol_production, biodiesel_production,
GWP_ethanol, GWP_biodiesel, GWP_electricity,
GWP_ethanol_allocation, GWP_biodiesel_allocation,
GWP_economic, MFPP_derivative,
TCI_derivative,
ethanol_production_derivative,
biodiesel_production_derivative,
electricity_production_derivative,
natural_gas_consumption_derivative,
GWP_ethanol_derivative,
)
from ._load_data import (
images_folder,
get_monte_carlo,
spearman_file,
)
import os
from._parse_configuration import format_name
__all__ = (
'plot_all',
'plot_montecarlo_main_manuscript',
'plot_breakdowns',
'plot_montecarlo_feedstock_comparison',
'plot_montecarlo_configuration_comparison',
'plot_montecarlo_agile_comparison',
'plot_montecarlo_derivative',
'plot_montecarlo_absolute',
'plot_spearman_tea',
'plot_spearman_lca',
'plot_spearman_tea_short',
'plot_spearman_lca_short',
'plot_monte_carlo_across_coordinate',
'monte_carlo_box_plot',
'plot_monte_carlo',
'plot_spearman',
'plot_configuration_breakdown',
'plot_TCI_areas_across_oil_content',
'plot_heatmap_comparison',
'plot_feedstock_conventional_comparison_kde',
'plot_feedstock_cellulosic_comparison_kde',
'plot_configuration_comparison_kde',
'plot_open_comparison_kde',
'plot_feedstock_comparison_kde',
'plot_crude_configuration_comparison_kde',
'plot_agile_comparison_kde',
'plot_separated_configuration_comparison_kde',
'area_colors',
'area_hatches',
)
area_colors = {
'Feedstock handling': CABBI_colors.teal,
'Juicing': CABBI_colors.green_dirty,
'EtOH prod.': CABBI_colors.blue,
'Ethanol production': CABBI_colors.blue,
'Oil ext.': CABBI_colors.brown,
'Oil extraction': CABBI_colors.brown,
'Biod. prod.': CABBI_colors.orange,
'Biodiesel production': CABBI_colors.orange,
'Pretreatment': CABBI_colors.green,
'Wastewater treatment': colors.purple,
'CH&P': CABBI_colors.yellow,
'Co-Heat and Power': CABBI_colors.yellow,
'Utilities': colors.red,
'Storage': CABBI_colors.grey,
'HXN': colors.orange,
'Heat exchanger network': colors.orange,
}
area_hatches = {
'Feedstock handling': 'x',
'Juicing': '-',
'EtOH prod.': '/',
'Ethanol production': '/',
'Oil ext.': '\\',
'Oil extraction': '\\',
'Biod. prod.': '/|',
'Biodiesel production': '/|',
'Pretreatment': '//',
'Wastewater treatment': r'\\',
'CH&P': '',
'Co-Heat and Power': '',
'Utilities': '\\|',
'Storage': '',
'HXN': '+',
'Heat exchanger network': '+',
}
for i in area_colors: area_colors[i] = area_colors[i].tint(20)
palette = Palette(**area_colors)
letter_color = colors.neutral.shade(25).RGBn
GWP_units_L = '$\\mathrm{kg} \\cdot \\mathrm{CO}_{2}\\mathrm{eq} \\cdot \\mathrm{L}^{-1}$'
GWP_units_L_small = GWP_units_L.replace('kg', 'g')
CABBI_colors.orange_hatch = CABBI_colors.orange.copy(hatch='////')
ethanol_over_biodiesel = bst.MockVariable('Ethanol over biodiesel', 'L/MT', 'Biorefinery')
GWP_ethanol_displacement = variables.GWP_ethanol_displacement
production = (ethanol_production, biodiesel_production)
mc_metric_settings = {
'MFPP': (MFPP, f"MFPP\n[{format_units('USD/MT')}]", None),
'TCI': (TCI, f"TCI\n[{format_units('10^6*USD')}]", None),
'production': (production, f"Production\n[{format_units('L/MT')}]", None),
'electricity_production': (electricity_production, f"Elec. prod.\n[{format_units('kWhr/MT')}]", None),
'natural_gas_consumption': (natural_gas_consumption, f"NG cons.\n[{format_units('m^3/MT')}]", None),
'GWP_ethanol_displacement': (GWP_ethanol_displacement, "GWP$_{\\mathrm{displacement}}$" f"\n[{GWP_units_L}]", None),
'GWP_economic': ((GWP_ethanol, GWP_biodiesel), "GWP$_{\\mathrm{economic}}$" f"\n[{GWP_units_L}]", None),
'GWP_energy': ((GWP_ethanol_allocation, GWP_biodiesel_allocation), "GWP$_{\\mathrm{energy}}$" f"\n[{GWP_units_L}]", None),
}
mc_comparison_settings = {
'MFPP': (MFPP, r"$\Delta$" + f"MFPP\n[{format_units('USD/MT')}]", None),
'TCI': (TCI, r"$\Delta$" + f"TCI\n[{format_units('10^6*USD')}]", None),
'production': (production, r"$\Delta$" + f"Production\n[{format_units('L/MT')}]", None),
'electricity_production': (electricity_production, r"$\Delta$" + f"Elec. prod.\n[{format_units('kWhr/MT')}]", None),
'natural_gas_consumption': (natural_gas_consumption, r"$\Delta$" + f"NG cons.\n[{format_units('m^3/MT')}]", None),
'GWP_ethanol_displacement': (GWP_ethanol_displacement, r"$\Delta$" + "GWP$_{\\mathrm{displacement}}$" f"\n[{GWP_units_L}]", None),
'GWP_economic': (GWP_ethanol, r"$\Delta$" + "GWP$_{\\mathrm{economic}}$" f"\n[{GWP_units_L}]", None),
'GWP_energy': (GWP_ethanol_allocation, r"$\Delta$" + "GWP$_{\\mathrm{energy}}$" f"\n[{GWP_units_L}]", None),
'GWP_property_allocation': ((GWP_ethanol, GWP_ethanol_allocation), r"$\Delta$" + f"GWP\n[{GWP_units_L}]", None),
}
mc_derivative_metric_settings = {
'MFPP': (MFPP_derivative, r"$\Delta$" + format_units(r"MFPP/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('USD/MT')}]", None),
'TCI': (TCI_derivative, r"$\Delta$" + format_units(r"TCI/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('10^6*USD')}]", None),
'production': ((ethanol_production_derivative, biodiesel_production_derivative), r"$\Delta$" + format_units(r"Prod./OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('L/MT')}]", None),
'electricity_production': (electricity_production_derivative, r"$\Delta$" + format_units(r"EP/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('kWhr/MT')}]", None),
'natural_gas_consumption': (natural_gas_consumption_derivative, r"$\Delta$" + format_units(r"NGC/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('m^3/MT')}]", None),
'GWP_economic': (GWP_ethanol_derivative, r"$\Delta$" + r"GWP $\cdot \Delta \mathrm{OC}^{-1}$" f"\n[{GWP_units_L_small}]", 1000),
}
kde_metric_settings = {j[0]: j for j in mc_metric_settings.values()}
kde_comparison_settings = {j[0]: j for j in mc_comparison_settings.values()}
kde_derivative_settings = {j[0]: j for j in mc_derivative_metric_settings.values()}
# %% Plots for publication
def plot_all():
# plot_montecarlo_main_manuscript()
plot_montecarlo_absolute()
plot_spearman_tea()
plot_spearman_lca()
plot_breakdowns()
def plot_montecarlo_main_manuscript():
set_font(size=8)
set_figure_size(aspect_ratio=0.85)
fig = plt.figure()
everything = GridSpec(4, 3, fig, hspace=1.5, wspace=0.7,
top=0.90, bottom=0.05,
left=0.11, right=0.97)
def spec2axes(spec, x, y, hspace=0, wspace=0.7, **kwargs):
subspec = spec.subgridspec(x, y, hspace=hspace, wspace=wspace, **kwargs)
return np.array([[fig.add_subplot(subspec[i, j]) for j in range(y)] for i in range(x)], object)
gs_feedstock_comparison = everything[:2, :]
gs_configuration_comparison = everything[2:, :2]
gs_agile_comparison = everything[2:, 2]
axes_feedstock_comparison = spec2axes(gs_feedstock_comparison, 2, 3)
axes_configuration_comparison = spec2axes(gs_configuration_comparison, 2, 2)
axes_agile_comparison = spec2axes(gs_agile_comparison, 2, 1)
plot_montecarlo_feedstock_comparison(axes_feedstock_comparison, letters='ABCDEFG')
plot_montecarlo_configuration_comparison(axes_configuration_comparison, letters='ABCDEFG')
plot_montecarlo_agile_comparison(axes_agile_comparison, letters='ABCDEFG')
def add_title(gs, title):
ax = fig.add_subplot(gs)
ax._frameon = False
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_title(
title, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold', y=1.1
)
add_title(gs_feedstock_comparison, '(I) Impact of opting to process oilcane over sugarcane')
add_title(gs_configuration_comparison, '(II) Impact of cellulosic ethanol integration')
add_title(gs_agile_comparison, '(III) Impact of\noilsorghum\nintegration')
plt.show()
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_main_manuscript.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_feedstock_comparison(axes_box=None, letters=None,
single_column=True):
if single_column:
width = 'half'
aspect_ratio = 2.25
ncols = 1
left = 0.255
bottom = 0.05
else:
width = None
aspect_ratio = 0.75
left = 0.105
bottom = 0.12
ncols = 3
if axes_box is None:
set_font(size=8)
set_figure_size(width=width, aspect_ratio=aspect_ratio)
fig, axes = plot_monte_carlo(
derivative=False, absolute=False, comparison=True,
tickmarks=None, agile=False, ncols=ncols, axes_box=axes_box,
labels=[
'Direct Cogeneration',
'Integrated Co-Fermentation',
# 'Direct Cogeneration',
# 'Integrated Co-Fermentation',
],
comparison_names=['O1 - S1', 'O2 - S2'],
metrics = ['MFPP', 'TCI', 'production', 'GWP_property_allocation',
'natural_gas_consumption', 'electricity_production'],
color_wheel = CABBI_colors.wheel([
'blue_light', 'green_dirty', 'orange', 'green',
'orange', 'orange_hatch', 'grey', 'brown',
])
)
for ax, letter in zip(axes, 'ABCDEFGH' if letters is None else letters):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(1.65, ylb + (yub - ylb) * 0.90, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
# if axes_box is None and letter in 'DH':
# x = 0.5
# plt.text(x, ylb - (yub - ylb) * 0.3,
# 'Impact of processing\noilcane over sugarcane',
# horizontalalignment='center',verticalalignment='center',
# fontsize=8)
if axes_box is None:
plt.subplots_adjust(right=0.96, left=left, wspace=0.38, top=0.98, bottom=bottom)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_feedstock_comparison.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_configuration_comparison(axes_box=None, letters=None,
single_column=True):
if single_column:
width = 'half'
aspect_ratio = 2.25
ncols = 1
left = 0.255
bottom = 0.05
x = 1.65
metrics= ['MFPP', 'TCI', 'production', 'GWP_property_allocation',
'natural_gas_consumption', 'electricity_production']
else:
width = None
aspect_ratio = 0.75
left = 0.105
bottom = 0.12
ncols = 2
x = 0.58
metrics= ['MFPP', 'TCI', 'production', 'GWP_property_allocation']
if axes_box is None:
set_font(size=8)
set_figure_size(width=width, aspect_ratio=aspect_ratio)
fig, axes = plot_monte_carlo(
derivative=False, absolute=False, comparison=True,
tickmarks=None, agile=False, ncols=ncols, axes_box=axes_box,
labels=[
'Oilcane',
# 'Sugarcane',
],
comparison_names=[
'O2 - O1',
# 'S2 - S1'
],
metrics=metrics,
color_wheel = CABBI_colors.wheel([
'blue_light', 'green_dirty', 'orange', 'green',
'orange', 'orange_hatch',
])
)
for ax, letter in zip(axes, 'ABCDEF' if letters is None else letters):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(x, ylb + (yub - ylb) * 0.90, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
if axes_box is None:
plt.subplots_adjust(right=0.96, left=left, wspace=0.38, top=0.98, bottom=bottom)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_configuration_comparison.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_agile_comparison(axes_box=None, letters=None):
if axes_box is None:
set_font(size=8)
set_figure_size(width=3.3071, aspect_ratio=1.0)
fig, axes = plot_monte_carlo(
derivative=False, absolute=False, comparison=True,
tickmarks=None, agile_only=True, ncols=1,
labels=[
'Direct Cogeneration',
'Integrated Co-Fermentation'
],
metrics=['MFPP', 'TCI'],
axes_box=axes_box,
)
for ax, letter in zip(axes, 'AB' if letters is None else letters):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(1.65, ylb + (yub - ylb) * 0.90, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
if axes_box is None and letter == 'B':
plt.text(0.5, ylb - (yub - ylb) * 0.25,
'Impact of integrating oilsorghum\nat an agile oilcane biorefinery',
horizontalalignment='center',verticalalignment='center',
fontsize=8)
if axes_box is None:
plt.subplots_adjust(right=0.9, left=0.2, wspace=0.5, top=0.98, bottom=0.15)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_agile_comparison.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_derivative():
set_font(size=8)
set_figure_size(
aspect_ratio=0.5,
# width=3.3071, aspect_ratio=1.85
)
fig, axes = plot_monte_carlo(
derivative=True, absolute=True,
comparison=False, agile=False,
ncols=3,
# tickmarks=np.array([
# [-3, -2, -1, 0, 1, 2, 3, 4, 5],
# [-9, -6, -3, 0, 3, 6, 9, 12, 15],
# [-2.0, -1.5, -1.0, -0.5, 0, 0.5, 1.0, 1.5, 2],
# [-16, -8, 0, 8, 16, 24, 32, 40, 48],
# [-400, -300, -200, -100, 0, 100, 200, 300, 400],
# [-300, -225, -150, -75, 0, 75, 150, 225, 300]
# ], dtype=object),
labels=['DC', 'ICF'],
color_wheel = CABBI_colors.wheel([
'blue_light', 'green_dirty', 'orange', 'green', 'grey', 'brown',
'orange',
])
)
for ax, letter in zip(axes, 'ABCDEFGH'):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(1.65, ylb + (yub - ylb) * 0.90, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
plt.subplots_adjust(
hspace=0, wspace=0.7,
top=0.95, bottom=0.1,
left=0.12, right=0.96
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_derivative.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_absolute():
set_font(size=8)
set_figure_size(aspect_ratio=1.05)
fig, axes = plot_monte_carlo(
absolute=True, comparison=False, ncols=2,
expand=0.1,
labels=['Sugarcane\nDC', 'Oilcane\nDC',
'Sugarcane\nICF', 'Oilcane\nICF',
'Sugarcane &\nSorghum DC', 'Oilcane &\nOil-sorghum DC',
'Sugarcane &\nSorghum ICF', 'Oilcane &\nOil-sorghum ICF'],
xrot=90,
color_wheel = CABBI_colors.wheel([
'blue_light', 'green_dirty', 'orange', 'green', 'grey', 'brown',
'orange', 'orange', 'green', 'orange', 'green',
])
)
for ax, letter in zip(axes, 'ABCDEFGHIJ'):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(7.8, ylb + (yub - ylb) * 0.92, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
plt.subplots_adjust(left=0.12, right=0.95, wspace=0.40, top=0.98, bottom=0.2)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_absolute.{i}')
plt.savefig(file, transparent=True)
def plot_spearman_tea(with_units=None, aspect_ratio=0.8, **kwargs):
set_font(size=8)
set_figure_size(aspect_ratio=aspect_ratio)
plot_spearman(
configurations=[
'O1', 'O1*',
'O2', 'O2*',
],
labels=[
'DC', 'Oil-sorghum int., DC',
'ICF', 'Oil-sorghum int., ICF',
],
kind='TEA',
with_units=with_units,
cutoff=0.03,
**kwargs
)
plt.subplots_adjust(left=0.45, right=0.975, top=0.98, | |
shutil.copyfile(io_config, dconfig)
def make_data_splits(samples, params, RESULTSDIR, num_experiments):
"""
Make train/validation splits from list of samples, or load in a specific
list of sampleIDs if desired.
"""
# TODO: Switch to .mat from .pickle so that these lists are easier to read
# and change.
partition = {}
if params["load_valid"] is None:
all_inds = np.arange(len(samples))
# extract random inds from each set for validation
v = params["num_validation_per_exp"]
valid_inds = []
if params["num_validation_per_exp"] > 0: # if 0, do not perform validation
for e in range(num_experiments):
tinds = [
i for i in range(len(samples)) if int(samples[i].split("_")[0]) == e
]
valid_inds = valid_inds + list(
np.random.choice(tinds, (v,), replace=False)
)
valid_inds = list(np.sort(valid_inds))
train_inds = [i for i in all_inds if i not in valid_inds]
assert (set(valid_inds) & set(train_inds)) == set()
partition["valid_sampleIDs"] = samples[valid_inds]
partition["train_sampleIDs"] = samples[train_inds]
# Save train/val inds
with open(os.path.join(RESULTSDIR, "val_samples.pickle"), "wb") as f:
cPickle.dump(partition["valid_sampleIDs"], f)
with open(os.path.join(RESULTSDIR, "train_samples.pickle"), "wb") as f:
cPickle.dump(partition["train_sampleIDs"], f)
else:
# Load validation samples from elsewhere
with open(os.path.join(params["load_valid"], "val_samples.pickle"), "rb",) as f:
partition["valid_sampleIDs"] = cPickle.load(f)
partition["train_sampleIDs"] = [
f for f in samples if f not in partition["valid_sampleIDs"]
]
return partition
def rename_weights(traindir, kkey, mon):
"""
At the end of DANNCe or COM training, rename the best weights file with the epoch #
and value of the monitored quantity
"""
# First load in the training.csv
r = np.genfromtxt(os.path.join(traindir, "training.csv"), delimiter=",", names=True)
e = r["epoch"]
q = r[mon]
minq = np.min(q)
beste = e[np.argmin(q)]
newname = "weights." + str(int(beste)) + "-" + "{:.5f}".format(minq) + ".hdf5"
os.rename(os.path.join(traindir, kkey), os.path.join(traindir, newname))
def make_paths_safe(params):
"""Given a parameter dictionary, loops through the keys and replaces any \\ or / with os.sep
to promote OS agnosticism
"""
for key in params.keys():
if isinstance(params[key], str):
params[key] = params[key].replace("/", os.sep)
params[key] = params[key].replace("\\", os.sep)
return params
def trim_COM_pickle(fpath, start_sample, end_sample, opath=None):
"""Trim dictionary entries to the range [start_sample, end_sample].
spath is the output path for saving the trimmed COM dictionary, if desired
"""
with open(fpath, "rb") as f:
save_data = cPickle.load(f)
sd = {}
for key in save_data:
if key >= start_sample and key <= end_sample:
sd[key] = save_data[key]
with open(opath, "wb") as f:
cPickle.dump(sd, f)
return sd
def save_params(outdir, params):
"""
Save copy of params to outdir as .mat file
"""
sio.savemat(os.path.join(outdir, "copy_params.mat"), prepare_save_metadata(params))
return True
def make_none_safe(pdict):
if isinstance(pdict, dict):
for key in pdict:
pdict[key] = make_none_safe(pdict[key])
else:
if (
pdict is None
or (isinstance(pdict, list) and None in pdict)
or (isinstance(pdict, tuple) and None in pdict)
):
return "None"
else:
return pdict
return pdict
def prepare_save_metadata(params):
"""
To save metadata, i.e. the prediction param values associated with COM or DANNCE
output, we need to convert loss and metrics and net into names, and remove
the 'experiment' field
"""
# Need to convert None to string but still want to conserve the metadat structure
# format, so we don't want to convert the whole dict to a string
meta = params.copy()
if "experiment" in meta:
del meta["experiment"]
if "loss" in meta:
meta["loss"] = meta["loss"].__name__
if "net" in meta:
meta["net"] = meta["net"].__name__
if "metric" in meta:
meta["metric"] = [
f.__name__ if not isinstance(f, str) else f for f in meta["metric"]
]
meta = make_none_safe(meta.copy())
return meta
def save_COM_dannce_mat(params, com3d, sampleID):
"""
Instead of saving 3D COM to com3d.mat, save it into the dannce.mat file, which
streamlines subsequent dannce access.
"""
com = {}
com["com3d"] = com3d
com["sampleID"] = sampleID
com["metadata"] = prepare_save_metadata(params)
# Open dannce.mat file, add com and re-save
print("Saving COM predictions to " + params["label3d_file"])
rr = sio.loadmat(params["label3d_file"])
# For safety, save old file to temp and delete it at the end
sio.savemat(params["label3d_file"] + ".temp", rr)
rr["com"] = com
sio.savemat(params["label3d_file"], rr)
os.remove(params["label3d_file"] + ".temp")
def save_COM_checkpoint(
save_data, RESULTSDIR, datadict_, cameras, params, file_name="com3d"
):
"""
Saves COM pickle and matfiles
"""
# Save undistorted 2D COMs and their 3D triangulations
f = open(os.path.join(RESULTSDIR, file_name + ".pickle"), "wb")
cPickle.dump(save_data, f)
f.close()
# We need to remove the eID in front of all the keys in datadict
# for prepare_COM to run properly
datadict_save = {}
for key in datadict_:
datadict_save[int(float(key.split("_")[-1]))] = datadict_[key]
if params["n_instances"] > 1:
if params["n_channels_out"] > 1:
linking_method = "multi_channel"
else:
linking_method = "euclidean"
_, com3d_dict = serve_data_DANNCE.prepare_COM_multi_instance(
os.path.join(RESULTSDIR, file_name + ".pickle"),
datadict_save,
comthresh=0,
weighted=False,
camera_mats=cameras,
linking_method=linking_method,
)
else:
prepare_func = serve_data_DANNCE.prepare_COM
_, com3d_dict = serve_data_DANNCE.prepare_COM(
os.path.join(RESULTSDIR, file_name + ".pickle"),
datadict_save,
comthresh=0,
weighted=False,
camera_mats=cameras,
method="median",
)
cfilename = os.path.join(RESULTSDIR, file_name + ".mat")
print("Saving 3D COM to {}".format(cfilename))
samples_keys = list(com3d_dict.keys())
if params["n_instances"] > 1:
c3d = np.zeros((len(samples_keys), 3, params["n_instances"]))
else:
c3d = np.zeros((len(samples_keys), 3))
for i in range(len(samples_keys)):
c3d[i] = com3d_dict[samples_keys[i]]
sio.savemat(
cfilename,
{
"sampleID": samples_keys,
"com": c3d,
"metadata": prepare_save_metadata(params),
},
)
# Also save a copy into the label3d file
# save_COM_dannce_mat(params, c3d, samples_keys)
def inherit_config(child, parent, keys):
"""
If a key in keys does not exist in child, assigns the key-value in parent to
child.
"""
for key in keys:
if key not in child.keys():
child[key] = parent[key]
print(
"{} not found in io.yaml file, falling back to main config".format(key)
)
return child
def grab_predict_label3d_file(defaultdir=""):
"""
Finds the paths to the training experiment yaml files.
"""
def_ep = os.path.join(".", defaultdir)
label3d_files = os.listdir(def_ep)
label3d_files = [
os.path.join(def_ep, f) for f in label3d_files if "dannce.mat" in f
]
label3d_files.sort()
if len(label3d_files) == 0:
raise Exception("Did not find any *dannce.mat file in {}".format(def_ep))
print("Using the following *dannce.mat files: {}".format(label3d_files[0]))
return label3d_files[0]
def load_expdict(params, e, expdict, _DEFAULT_VIDDIR):
"""
Load in camnames and video directories and label3d files for a single experiment
during training.
"""
_DEFAULT_NPY_DIR = 'npy_volumes'
exp = params.copy()
exp = make_paths_safe(exp)
exp["label3d_file"] = expdict["label3d_file"]
exp["base_exp_folder"] = os.path.dirname(exp["label3d_file"])
if "viddir" not in expdict:
# if the videos are not at the _DEFAULT_VIDDIR, then it must
# be specified in the io.yaml experiment portion
exp["viddir"] = os.path.join(exp["base_exp_folder"], _DEFAULT_VIDDIR)
else:
exp["viddir"] = expdict["viddir"]
print("Experiment {} using videos in {}".format(e, exp["viddir"]))
l3d_camnames = io.load_camnames(expdict["label3d_file"])
if "camnames" in expdict:
exp["camnames"] = expdict["camnames"]
elif l3d_camnames is not None:
exp["camnames"] = l3d_camnames
print("Experiment {} using camnames: {}".format(e, exp["camnames"]))
# Use the camnames to find the chunks for each video
chunks = {}
for name in exp["camnames"]:
if exp["vid_dir_flag"]:
camdir = os.path.join(exp["viddir"], name)
else:
camdir = os.path.join(exp["viddir"], name)
intermediate_folder = os.listdir(camdir)
camdir = os.path.join(camdir, intermediate_folder[0])
video_files = os.listdir(camdir)
video_files = [f for f in video_files if ".mp4" in f]
video_files = sorted(video_files, key=lambda x: int(x.split(".")[0]))
chunks[str(e) + "_" + name] = np.sort(
[int(x.split(".")[0]) for x in video_files]
)
exp["chunks"] = chunks
print(chunks)
# For npy volume training
if params["use_npy"]:
exp["npy_vol_dir"] = os.path.join(exp["base_exp_folder"], _DEFAULT_NPY_DIR)
return exp
def batch_rgb2gray(imstack):
"""Convert to gray image-wise.
batch dimension is first.
"""
grayim = np.zeros((imstack.shape[0], imstack.shape[1], imstack.shape[2]), "float32")
for i in range(grayim.shape[0]):
grayim[i] = rgb2gray(imstack[i].astype("uint8"))
return grayim
def return_tile(imstack, fac=2):
"""Crop a larger image into smaller tiles without any overlap."""
height = imstack.shape[1] // fac
width = imstack.shape[2] // fac
out = np.zeros(
(imstack.shape[0] * fac * fac, height, width, imstack.shape[3]), "float32"
)
cnt = 0
for i in range(imstack.shape[0]):
for j in np.arange(0, imstack.shape[1], height):
for k in np.arange(0, imstack.shape[2], width):
out[cnt, :, :, :] = imstack[i, j : j + height, k : k + width, :]
cnt = cnt + 1
return out
def tile2im(imstack, fac=2):
"""Reconstruct lagrer image from tiled data."""
height = imstack.shape[1]
width = imstack.shape[2]
out = np.zeros(
(imstack.shape[0] // (fac * fac), height * fac, width * fac, imstack.shape[3]),
"float32",
)
cnt = 0
for i in range(out.shape[0]):
for j in np.arange(0, out.shape[1], height):
for k in np.arange(0, out.shape[2], width):
out[i, j : j + height, k : k + width, :] = imstack[cnt]
cnt += 1
return out
def downsample_batch(imstack, fac=2, method="PIL"):
"""Downsample each image in a batch."""
if method == "PIL":
out = np.zeros(
(
imstack.shape[0],
imstack.shape[1] // fac,
imstack.shape[2] // fac,
imstack.shape[3],
),
"float32",
)
if out.shape[-1] == 3:
# this is just an RGB image, so no need to loop over channels with PIL
for i in range(imstack.shape[0]):
| |
<filename>reverse_engineering/premium_4/captures/mcu_main_to_sub/decode.py
import csv
import gzip
import struct
import sys
signed_char = lambda x: struct.unpack('b', x)[0]
class SubMCU(object):
def __init__(self):
pass
# cmd, pict, screen, param 0, param 1, param 2
def process(self, packet):
sys.stdout.write(hexdump(packet) + " -> ")
self.packet = packet
self.screen_num = packet[2]
self.message = bytearray(self.messages[self.screen_num])
self._dispatch()
sys.stdout.write(self.message.decode('utf-8'))
sys.stdout.write("\n")
def _dispatch(self):
prefix = '_msg_%02x_' % self.screen_num
for funcname in dir(self):
if funcname.startswith(prefix):
f = getattr(self, funcname)
f()
return
raise NotImplementedError("0x%02x" % self.screen_num)
def _msg_01_cd_tr(self):
# Buffer: 'CD...TR....'
# Example: 'CD 1 TR 03 '
#
# Param 0 High Nibble = Unused
# Param 1 Low Nibble = CD number
# Param 1 Byte = Track number
# Param 2 Byte = Unused
pass
# TODO finish me
# 0x01: 'CD...TR....',
# 0x02: 'CUE........',
# 0x03: 'REV........',
# 0x04: 'SCANCD.TR..',
# 0x05: 'NO..CHANGER',
# 0x06: 'NO..MAGAZIN',
# 0x07: '....NO.DISC',
# 0x08: 'CD...ERROR.',
# 0x09: 'CD.........',
# 0x0a: 'CD....MAX..',
# 0x0b: 'CD....MIN..',
# 0x0c: 'CHK.MAGAZIN',
# 0x0d: 'CD..CD.ERR.',
# 0x0e: 'CD...ERROR.',
# 0x0f: 'CD...NO.CD.',
def _msg_10_set_onvol_y(self):
# Buffer: 'SET.ONVOL.Y'
# Example: 'SET ONVOL Y'
#
# No params
pass
def _msg_11_set_onvol_n(self):
# Buffer: 'SET.ONVOL.N'
# Example: 'SET ONVOL N'
#
# No params
pass
def msg_12_set_onvol_(self):
# TODO
# 0x12: 'SET.ONVOL..',
pass
def msg_13_set_cdmix1(self):
# Buffer: 'SET.CD.MIX1'
# Example: 'SET CD MIX1'
#
# No params
pass
def msg_14_set_cdmix6(self):
# Buffer: 'SET.CD.MIX6'
# Example: 'SET CD MIX6'
#
# No params
pass
def msg_15_tape_skip_y(self):
# Buffer: 'TAPE.SKIP.Y'
# Example: 'TAPE SKIP Y'
#
# No params
pass
def msg_16_tape_skip_n(self):
# Buffer: 'TAPE.SKIP.N'
# Example: 'TAPE SKIP N'
#
# No params
pass
def _msg_40_fm_mhz(self):
# Buffer: 'FM......MHZ'
# Example: 'FM261389MHZ'
#
# Param 0 High Nibble = FM mode number (1, 2 for FM1, FM2)
# Param 0 Low Nibble = Preset number (0=none, 1-6)
# Param 1 Byte = FM Frequency Index (0=87.9 MHz, 0xFF=138.9 MHz)
# Param 2 Byte = Unused
fm_mode = (self.packet[3] & 0xf0) >> 8
self.message[2] = fm_mode + 0x30
preset = self.packet[3] & 0x0f
self.message[3] = preset + 0x30
freq_index = self.packet[4]
freq_str = str(879 + (2 * freq_index)).rjust(4, "0")
for i, digit in enumerate(freq_str):
if (i == 0) and digit == '0':
continue
self.message[4 + i] = digit
def _msg_41_am_khz(self):
# Buffer: 'AM......KHZ'
# Example: 'AM 2 540KHZ'
#
# Param 0 High Nibble = Unused
# Param 0 Low Nibble = Preset number (0=none, 1-6)
# Param 1 Byte = AM Frequency Index (0=540 kHz, 3080 kHz)
# Param 2 Byte = Unused
preset = self.packet[3] & 0x0f
self.message[3] = preset + 0x30
freq_index = self.packet[4]
freq_str = str(530 + (10 * freq_index)).rjust(4, "0")
for i, digit in enumerate(freq_str):
if (i == 0) and digit == '0':
continue
self.message[4 + i] = digit
def _msg_42_fm_mhz(self):
# 'SCAN....MHZ'
freq_index = self.packet[4]
freq_str = str(879 + (2 * freq_index)).rjust(4, "0")
for i, digit in enumerate(freq_str):
if (i == 0) and digit == '0':
continue
self.message[4 + i] = digit
def _msg_43_scan_khz(self):
# Buffer: 'AM......KHZ'
# Example: 'AM 2 540KHZ'
#
# Param 0 High Nibble = Unused
# Param 0 Low Nibble = Preset number (0=none, 1-6)
# Param 1 Byte = AM Frequency Index (0=540 kHz, 3080 kHz)
# Param 2 Byte = Unused
freq_index = self.packet[4]
freq_str = str(530 + (10 * freq_index)).rjust(4, "0")
for i, digit in enumerate(freq_str):
if (i == 0) and digit == '0':
continue
self.message[4 + i] = digit
def _msg_44_fm_max(self):
# 'FM....MAX..'
pass
def _msg_45_fm_min(self):
# 'FM....MIN..'
pass
def _msg_46_am_max(self):
# 'AM....MAX..'
pass
def _msg_47_am_min(self):
# 'AM....MIN..'
pass
def _msg_50_tape_play_a(self):
# Buffer: 'TAPE.PLAY.A'
# Example: 'TAPE PLAY A'
#
# No params
pass
def _msg_51_tape_play_b(self):
# Buffer: 'TAPE.PLAY.B'
# Example: 'TAPE PLAY B'
#
# No params
pass
def _msg_52_tape_ff(self):
# Buffer: 'TAPE..FF...'
# Example: 'TAPE FF '
#
# No params
pass
def _msg_53_tape_rew(self):
# Buffer: 'TAPE..REW..'
# Example: 'TAPE REW '
#
# No params
pass
def _msg_54_tape_mss_ff(self):
# Buffer: 'TAPEMSS.FF.'
# Example: 'TAPEMSS FF '
#
# No params
pass
def _msg_55_tape_mss_rew(self):
# Buffer: 'TAPEMSS.REW'
# Example: 'TAPEMSS REW'
#
# No params
pass
def _msg_56_tape_scan_a(self):
# Buffer: 'TAPE.SCAN.A'
# Example: 'TAPE SCAN A'
#
# No params
pass
def _msg_57_tape_scan_b(self):
# Buffer: 'TAPE.SCAN.B'
# Example: 'TAPE SCAN B'
#
# No params
pass
def _msg_58_tape_metal(self):
# Buffer: 'TAPE.METAL.'
# Example: 'TAPE METAL '
#
# No params
pass
def _msg_59_tape_bls(self):
# Buffer: 'TAPE..BLS..'
# Example: 'TAPE BLS '
#
# No params
pass
def _msg_5a_no_tape(self):
# Buffer: '....NO.TAPE'
# Example: ' NO TAPE'
#
# No params
pass
def _msg_5b_tape_error(self):
# Buffer: 'TAPE.ERROR.'
# Example: 'TAPE ERROR '
#
# No params
pass
def _msg_5c_tape_max(self):
# Buffer: 'TAPE..MAX..'
# Example: 'TAPE MAX '
#
# No params
pass
def _msg_5d_tape_min(self):
# Buffer: 'TAPE..MIN..'
# Example: 'TAPE MIN '
#
# No params
pass
def _msg_60_max(self):
# '.....MAX...'
pass
def _msg_61_min(self):
# '.....MIN...'
pass
def _msg_62_bass(self):
# Buffer: 'BASS.......'
# Example: 'BASS 0 '
# Example: 'BASS +9 '
# Example: 'BASS -9 '
#
# Param 0 Byte = Signed binary number
# Param 1 Byte = Unused
# Param 2 Byte = Unused
level = signed_char(self.packet[3:3+1])
if level < 0:
self.message[6] = '-'
elif level > 0:
self.message[6] = '+'
self.message[7] = abs(level) + 0x30
def _msg_63_treb(self):
# Buffer: 'TREB.......'
# Example: 'TREB 0 '
# Example: 'TREB +9 '
# Example: 'TREB -9 '
#
# Param 0 Byte = Signed binary number
# Param 1 Byte = Unused
# Param 2 Byte = Unused
self._msg_62_bass()
def _msg_64_bal_left(self):
# Buffer: 'BAL.LEFT...'
# Example: 'BAL LEFT 9'
# Example: 'BAL LEFT 1'
#
# Param 0 Byte = Signed binary number (always positive)
# Param 1 Byte = Unused
# Param 2 Byte = Unused
level = abs(signed_char(self.packet[3:3+1]))
self.message[10] = level + 0x30
def _msg_65_bal_right(self):
# Buffer: 'BAL.RIGHT..'
# Example: 'BAL RIGHT 9'
# Example: 'BAL RIGHT 1'
#
# Param 0 Byte = Signed binary number (always negative)
# Param 1 Byte = Unused
# Param 2 Byte = Unused
level = abs(signed_char(self.packet[3:3+1]))
self.message[10] = level + 0x30
def _msg_66_bal_center(self):
pass
def _msg_67_fadefront(self):
level = abs(signed_char(self.packet[3:3+1]))
self.message[10] = level + 0x30
def _msg_68_faderear(self):
level = abs(signed_char(self.packet[3:3+1]))
self.message[10] = level + 0x30
def _msg_69_fade_center(self):
pass
def _msg_80_no_code(self):
# Buffer: '....NO.CODE'
# Example: ' NO CODE'
#
# No params
pass
def _msg_81_code(self):
# Buffer: '.....CODE..'
# Example: ' CODE '
#
# No params
pass
def _msg_82_code_entry(self):
# cmd, pict, screen, param 0, param 1, param 2
# Buffer: '...........'
# Example: '2 1234 '
#
# Param 0 High Nibble = Unused
# Param 0 Low Nibble = Attempt number
# Param 1 Byte = Safe code high byte (BCD)
# Param 2 Byte = Safe code low byte (BCD)
attempt = self.packet[3] & 0x0f
if attempt != 0:
self.message[0] = attempt + 0x30
self.message[5] = ((self.packet[4] >> 4) & 0x0f) + 0x30
self.message[6] = (self.packet[4] & 0x0f) + 0x30
self.message[7] = ((self.packet[5] >> 4) & 0x0f) + 0x30
self.message[8] = (self.packet[5] & 0x0f) + 0x30
def _msg_83_safe(self):
# Buffer: '.....SAFE..'
# Example: '2....SAFE..'
#
# Param 0 High Nibble = Unused
# Param 0 Low Nibble = Attempt number
# Param 1 Byte = Unused
# Param 2 Byte = Unused
attempt = self.packet[3] & 0x0f
self.message[0] = attempt + 0x30
def _msg_84_initial(self):
# Buffer: '....INITIAL'
# Example: ' INITIAL'
#
# No params
pass
def _msg_85_no_code(self):
# Buffer: '....NO.CODE'
# Example: ' NO CODE'
#
# No params
pass
def _msg_87_clear(self):
# Buffer: '....CLEAR..'
# Example: ' CLEAR '
#
# No params
pass
def _msg_b0_diag(self):
# Buffer: '.....DIAG..'
# Example: ' DIAG '
#
# No params
pass
def _msg_b1_testdisplay(self):
# Buffer: 'TESTDISPLAY'
# Example: 'TESTDISPLAY'
#
# No params
pass
def _msg_c0_bose(self):
# Buffer: '.....BOSE..'
# Example: ' BOSE '
#
# No | |
!= 0)]
stereo_m2_data = stereo_m2_data[np.where(stereo_m2_data != 0)]
stereo_numbers = np.intersect1d(stereo_m1_data, stereo_m2_data)
# because of IDs equal to 0, we must find indices in a slight different way
# see https://stackoverflow.com/questions/8251541/numpy-for-every-element-in-one-array-find-the-index-in-another-array
index_m1 = np.argsort(self.event_data['M1']['stereo_event_number'])
index_m2 = np.argsort(self.event_data['M2']['stereo_event_number'])
sort_stereo_events_m1 = self.event_data['M1']['stereo_event_number'][index_m1]
sort_stereo_events_m2 = self.event_data['M2']['stereo_event_number'][index_m2]
sort_index_m1 = np.searchsorted(sort_stereo_events_m1, stereo_numbers)
sort_index_m2 = np.searchsorted(sort_stereo_events_m2, stereo_numbers)
m1_ids = np.take(index_m1, sort_index_m1)
m2_ids = np.take(index_m2, sort_index_m2)
stereo_ids = list(zip(m1_ids, m2_ids))
return stereo_ids
def _find_stereo_mc_events(self):
"""
This internal methods identifies stereo events in the run.
Returns
-------
list:
A list of pairs (M1_id, M2_id) corresponding to stereo events in the run.
"""
mono_ids = dict()
mono_ids['M1'] = []
mono_ids['M2'] = []
m1_ids = np.argwhere(self.event_data['M1']['stereo_event_number'])
m2_ids = np.argwhere(self.event_data['M2']['stereo_event_number'])
mono_ids['M1'] = list(m1_ids.flatten())
mono_ids['M2'] = list(m2_ids.flatten())
return mono_ids
def _find_mono_events(self):
"""
This internal method identifies the IDs (order numbers) of the
pedestal events in the run.
Returns
-------
dict:
A dictionary of pedestal event IDs in M1/2 separately.
"""
mono_ids = dict()
mono_ids['M1'] = []
mono_ids['M2'] = []
n_m1_events = len(self.event_data['M1']['stereo_event_number'])
n_m2_events = len(self.event_data['M2']['stereo_event_number'])
if not self.is_mc:
if (n_m1_events != 0) and (n_m2_events != 0):
m1_data = self.event_data['M1']['stereo_event_number'][np.where(self.event_data['M1']['trigger_pattern'] == DATA_STEREO_TRIGGER_PATTERN)]
m2_data = self.event_data['M2']['stereo_event_number'][np.where(self.event_data['M2']['trigger_pattern'] == DATA_STEREO_TRIGGER_PATTERN)]
m1_ids_data = np.where(self.event_data['M1']['trigger_pattern'] == DATA_STEREO_TRIGGER_PATTERN)[0]
m2_ids_data = np.where(self.event_data['M2']['trigger_pattern'] == DATA_STEREO_TRIGGER_PATTERN)[0]
stereo_numbers = np.intersect1d(m1_data, m2_data)
m1_ids_stereo = np.searchsorted(self.event_data['M1']['stereo_event_number'], stereo_numbers)
m2_ids_stereo = np.searchsorted(self.event_data['M2']['stereo_event_number'], stereo_numbers)
# remove ids that have stereo trigger from the array of ids of data events
# see: https://stackoverflow.com/questions/52417929/remove-elements-from-one-array-if-present-in-another-array-keep-duplicates-nu
sidx1 = m1_ids_stereo.argsort()
idx1 = np.searchsorted(m1_ids_stereo,m1_ids_data,sorter=sidx1)
idx1[idx1==len(m1_ids_stereo)] = 0
m1_ids_mono = m1_ids_data[m1_ids_stereo[sidx1[idx1]] != m1_ids_data]
sidx2 = m2_ids_stereo.argsort()
idx2 = np.searchsorted(m2_ids_stereo,m2_ids_data,sorter=sidx2)
idx2[idx2==len(m2_ids_stereo)] = 0
m2_ids_mono = m2_ids_data[m2_ids_stereo[sidx2[idx2]] != m2_ids_data]
mono_ids['M1'] = m1_ids_mono.tolist()
mono_ids['M2'] = m2_ids_mono.tolist()
elif (n_m1_events != 0) and (n_m2_events == 0):
m1_ids_data = np.where(self.event_data['M1']['trigger_pattern'] == DATA_STEREO_TRIGGER_PATTERN)[0]
mono_ids['M1'] = m1_ids_data.tolist()
elif (n_m1_events == 0) and (n_m2_events != 0):
m2_ids_data = np.where(self.event_data['M2']['trigger_pattern'] == DATA_STEREO_TRIGGER_PATTERN)[0]
mono_ids['M2'] = m2_ids_data.tolist()
else:
# just find ids where event stereo number is 0 (which is given to mono events) and pattern is MC trigger
m1_mono_mask = np.logical_and(self.event_data['M1']['trigger_pattern'] == MC_STEREO_TRIGGER_PATTERN, self.event_data['M1']['stereo_event_number'] == 0)
m2_mono_mask = np.logical_and(self.event_data['M2']['trigger_pattern'] == MC_STEREO_TRIGGER_PATTERN, self.event_data['M2']['stereo_event_number'] == 0)
m1_ids = np.where(m1_mono_mask == True)[0].tolist()
m2_ids = np.where(m2_mono_mask == True)[0].tolist()
mono_ids['M1'] = m1_ids
mono_ids['M2'] = m2_ids
return mono_ids
def _get_pedestal_file_num(self, pedestal_event_num, telescope):
"""
This internal method identifies the M1/2 file number of the
given pedestal event in M1/2 file lists, corresponding to this run.
Parameters
----------
pedestal_event_num: int
Order number of the event in the list of pedestal events
of the specified telescope, corresponding to this run.
telescope: str
The name of the telescope to which this event corresponds.
May be "M1" or "M2".
Returns
-------
file_num:
Order number of the corresponding file in the M1 or M2 file list.
"""
event_id = self.pedestal_ids[telescope][pedestal_event_num]
file_num = np.digitize(
[event_id], self.event_data[telescope]['file_edges'])
file_num = file_num[0] - 1
return file_num
def _get_stereo_file_num(self, stereo_event_num):
"""
This internal method identifies the M1/2 file numbers of the
given stereo event in M1/2 file lists, corresponding to this run.
Parameters
----------
stereo_event_num: int
Order number of the event in the list of stereo events corresponding
to this run.
Returns
-------
m1_file_num:
Order number of the corresponding file in the M1 file list.
m2_file_num:
Order number of the corresponding file in the M2 file list.
"""
m1_id = self.stereo_ids[stereo_event_num][0]
m2_id = self.stereo_ids[stereo_event_num][1]
m1_file_num = np.digitize([m1_id], self.event_data['M1']['file_edges'])
m2_file_num = np.digitize([m2_id], self.event_data['M2']['file_edges'])
m1_file_num = m1_file_num[0] - 1
m2_file_num = m2_file_num[0] - 1
return m1_file_num, m2_file_num
def _get_mono_file_num(self, mono_event_num, telescope):
"""
This internal method identifies the M1/2 file number of the
given mono event in M1/2 file lists, corresponding to this run.
Parameters
----------
mono_event_num: int
Order number of the event in the list of stereo events corresponding
to this run.
telescope: str
The name of the telescope to which this event corresponds.
May be "M1" or "M2".
Returns
-------
file_num:
Order number of the corresponding file in the M1 or M2 file list.
"""
event_id = self.mono_ids[telescope][mono_event_num]
file_num = np.digitize(
[event_id], self.event_data[telescope]['file_edges'])
file_num = file_num[0] - 1
return file_num
def get_pedestal_event_data(self, pedestal_event_num, telescope):
"""
This method read the photon content and arrival time (per pixel)
for the specified pedestal event. Also returned is the event telescope pointing
data.
Parameters
----------
pedestal_event_num: int
Order number of the event in the list of pedestal events for the
given telescope, corresponding to this run.
telescope: str
The name of the telescope to which this event corresponds.
May be "M1" or "M2".
Returns
-------
dict:
The output has the following structure:
'image' - photon_content in requested telescope
'pulse_time' - arrival_times in requested telescope
'pointing_az' - pointing azimuth [degrees]
'pointing_zd' - pointing zenith angle [degrees]
'pointing_ra' - pointing right ascension [degrees]
'pointing_dec' - pointing declination [degrees]
'unix' - event arrival time [unix]
"""
file_num = self._get_pedestal_file_num(pedestal_event_num, telescope)
event_id = self.pedestal_ids[telescope][pedestal_event_num]
id_in_file = event_id - self.event_data[telescope]['file_edges'][file_num]
photon_content = self.event_data[telescope]['charge'][file_num][id_in_file][:self.n_camera_pixels]
arrival_times = self.event_data[telescope]['arrival_time'][file_num][id_in_file][:self.n_camera_pixels]
event_data = dict()
event_data['image'] = np.array(photon_content)
event_data['pulse_time'] = np.array(arrival_times)
event_data['pointing_az'] = self.event_data[telescope]['pointing_az'][event_id]
event_data['pointing_zd'] = self.event_data[telescope]['pointing_zd'][event_id]
event_data['pointing_ra'] = self.event_data[telescope]['pointing_ra'][event_id]
event_data['pointing_dec'] = self.event_data[telescope]['pointing_dec'][event_id]
event_data['unix'] = self.event_data[telescope]['unix'][event_id]
return event_data
def get_stereo_event_data(self, stereo_event_num):
"""
This method read the photon content and arrival time (per pixel)
for the specified stereo event. Also returned is the event telescope pointing
data.
Parameters
----------
stereo_event_num: int
Order number of the event in the list of stereo events corresponding
to this run.
Returns
-------
dict:
The output has the following structure:
'm1_image' - M1 photon_content
'm1_pulse_time' - M1 arrival_times
'm2_image' - M2 photon_content
'm2_peak_pos' - M2 arrival_times
'm1_pointing_az' - M1 pointing azimuth [degrees]
'm1_pointing_zd' - M1 pointing zenith angle [degrees]
'm1_pointing_ra' - M1 pointing right ascension [degrees]
'm1_pointing_dec' - M1 pointing declination [degrees]
'm2_pointing_az' - M2 pointing azimuth [degrees]
'm2_pointing_zd' - M2 pointing zenith angle [degrees]
'm2_pointing_ra' - M2 pointing right ascension [degrees]
'm2_pointing_dec' - M2 pointing declination [degrees]
'm1_unix' - M1 event arrival time [unix]
'm2_unix' - M2 event arrival time [unix]
"""
m1_file_num, m2_file_num = self._get_stereo_file_num(stereo_event_num)
m1_id = self.stereo_ids[stereo_event_num][0]
m2_id = self.stereo_ids[stereo_event_num][1]
m1_id_in_file = m1_id - \
self.event_data['M1']['file_edges'][m1_file_num]
m2_id_in_file = m2_id - \
self.event_data['M2']['file_edges'][m2_file_num]
m1_photon_content = self.event_data['M1']['charge'][m1_file_num][m1_id_in_file][:self.n_camera_pixels]
m1_arrival_times = self.event_data['M1']['arrival_time'][m1_file_num][m1_id_in_file][:self.n_camera_pixels]
m2_photon_content = self.event_data['M2']['charge'][m2_file_num][m2_id_in_file][:self.n_camera_pixels]
m2_arrival_times = self.event_data['M2']['arrival_time'][m2_file_num][m2_id_in_file][:self.n_camera_pixels]
event_data = dict()
event_data['m1_image'] = np.array(m1_photon_content)
event_data['m1_pulse_time'] = np.array(m1_arrival_times)
event_data['m2_image'] = np.array(m2_photon_content)
event_data['m2_pulse_time'] = np.array(m2_arrival_times)
event_data['m1_pointing_az'] = self.event_data['M1']['pointing_az'][m1_id]
event_data['m1_pointing_zd'] = self.event_data['M1']['pointing_zd'][m1_id]
event_data['m1_pointing_ra'] = self.event_data['M1']['pointing_ra'][m1_id]
event_data['m1_pointing_dec'] = self.event_data['M1']['pointing_dec'][m1_id]
event_data['m2_pointing_az'] = self.event_data['M2']['pointing_az'][m2_id]
event_data['m2_pointing_zd'] = self.event_data['M2']['pointing_zd'][m2_id]
event_data['m2_pointing_ra'] = self.event_data['M2']['pointing_ra'][m2_id]
event_data['m2_pointing_dec'] = self.event_data['M2']['pointing_dec'][m2_id]
if not self.is_mc:
event_data['m1_unix'] = self.event_data['M1']['unix'][m1_id]
event_data['m2_unix'] = self.event_data['M2']['unix'][m2_id]
else:
event_data['true_energy'] = self.event_data['M1']['true_energy'][m1_id]
event_data['true_zd'] = self.event_data['M1']['true_zd'][m1_id]
event_data['true_az'] = self.event_data['M1']['true_az'][m1_id]
event_data['true_shower_primary_id'] = self.event_data['M1']['true_shower_primary_id'][m1_id]
event_data['true_h_first_int'] = self.event_data['M1']['true_h_first_int'][m1_id]
event_data['true_core_x'] = self.event_data['M1']['true_core_x'][m1_id]
event_data['true_core_y'] = self.event_data['M1']['true_core_y'][m1_id]
return event_data
def get_mono_event_data(self, mono_event_num, telescope):
"""
This method read the photon content and arrival time (per pixel)
for the specified mono event. Also returned is the event telescope pointing
data.
Parameters
----------
mono_event_num: int
Order number of the event in the list of mono events for the
given telescope, corresponding to this run.
telescope: str
The name of the telescope to which this event corresponds.
May be "M1" or "M2".
Returns
-------
dict:
The output has the following structure:
'image' - photon_content in requested telescope
'pulse_time' - arrival_times in requested telescope
'pointing_az' - pointing azimuth [degrees]
'pointing_zd' - pointing zenith angle [degrees]
'pointing_ra' - pointing right ascension [degrees]
'pointing_dec' - pointing declination [degrees]
'unix' - event arrival time [unix]
"""
file_num = self._get_mono_file_num(mono_event_num, telescope)
event_id = self.mono_ids[telescope][mono_event_num]
id_in_file = event_id - \
self.event_data[telescope]['file_edges'][file_num]
photon_content = self.event_data[telescope]['charge'][file_num][id_in_file][:self.n_camera_pixels]
arrival_times = self.event_data[telescope]['arrival_time'][file_num][id_in_file][:self.n_camera_pixels]
event_data = dict()
event_data['image'] = np.array(photon_content, dtype=np.float)
event_data['pulse_time'] = np.array(arrival_times, dtype=np.float)
event_data['pointing_az'] = self.event_data[telescope]['pointing_az'][event_id]
event_data['pointing_zd'] = self.event_data[telescope]['pointing_zd'][event_id]
event_data['pointing_ra'] = self.event_data[telescope]['pointing_ra'][event_id]
event_data['pointing_dec'] = self.event_data[telescope]['pointing_dec'][event_id]
if not self.is_mc:
event_data['unix'] = self.event_data[telescope]['unix'][event_id]
else:
event_data['true_energy'] = self.event_data[telescope]['true_energy'][event_id]
event_data['true_zd'] = self.event_data[telescope]['true_zd'][event_id]
event_data['true_az'] = self.event_data[telescope]['true_az'][event_id]
event_data['true_shower_primary_id'] = self.event_data[telescope]['true_shower_primary_id'][event_id]
event_data['true_h_first_int'] = self.event_data[telescope]['true_h_first_int'][event_id]
event_data['true_core_x'] = self.event_data[telescope]['true_core_x'][event_id]
event_data['true_core_y'] = self.event_data[telescope]['true_core_y'][event_id]
return event_data
class PixelStatusContainer(Container):
"""
Container for pixel status information
It contains masks obtained by several data analysis steps
At r0/r1 level only the hardware_mask is initialized
"""
sample_time_range = Field(
[], "Range of time of the pedestal events [t_min, t_max]", unit=u.s
)
hardware_failing_pixels = Field(
None,
"Boolean np | |
<gh_stars>0
import os
from compat import iteritems
def add_source_files(self, sources, filetype, lib_env=None, shared=False):
import glob
import string
# if not lib_objects:
if not lib_env:
lib_env = self
if type(filetype) == type(""):
dir = self.Dir('.').abspath
list = glob.glob(dir + "/" + filetype)
for f in list:
sources.append(self.Object(f))
else:
for f in filetype:
sources.append(self.Object(f))
class LegacyGLHeaderStruct:
def __init__(self):
self.vertex_lines = []
self.fragment_lines = []
self.uniforms = []
self.attributes = []
self.feedbacks = []
self.fbos = []
self.conditionals = []
self.enums = {}
self.texunits = []
self.texunit_names = []
self.ubos = []
self.ubo_names = []
self.vertex_included_files = []
self.fragment_included_files = []
self.reading = ""
self.line_offset = 0
self.vertex_offset = 0
self.fragment_offset = 0
def include_file_in_legacygl_header(filename, header_data, depth):
fs = open(filename, "r")
line = fs.readline()
while(line):
if (line.find("[vertex]") != -1):
header_data.reading = "vertex"
line = fs.readline()
header_data.line_offset += 1
header_data.vertex_offset = header_data.line_offset
continue
if (line.find("[fragment]") != -1):
header_data.reading = "fragment"
line = fs.readline()
header_data.line_offset += 1
header_data.fragment_offset = header_data.line_offset
continue
while(line.find("#include ") != -1):
includeline = line.replace("#include ", "").strip()[1:-1]
import os.path
included_file = os.path.relpath(os.path.dirname(filename) + "/" + includeline)
if (not included_file in header_data.vertex_included_files and header_data.reading == "vertex"):
header_data.vertex_included_files += [included_file]
if(include_file_in_legacygl_header(included_file, header_data, depth + 1) == None):
print("Error in file '" + filename + "': #include " + includeline + "could not be found!")
elif (not included_file in header_data.fragment_included_files and header_data.reading == "fragment"):
header_data.fragment_included_files += [included_file]
if(include_file_in_legacygl_header(included_file, header_data, depth + 1) == None):
print("Error in file '" + filename + "': #include " + includeline + "could not be found!")
line = fs.readline()
if (line.find("#ifdef ") != -1 or line.find("#elif defined(") != -1):
if (line.find("#ifdef ") != -1):
ifdefline = line.replace("#ifdef ", "").strip()
else:
ifdefline = line.replace("#elif defined(", "").strip()
ifdefline = ifdefline.replace(")", "").strip()
if (line.find("_EN_") != -1):
enumbase = ifdefline[:ifdefline.find("_EN_")]
ifdefline = ifdefline.replace("_EN_", "_")
line = line.replace("_EN_", "_")
if (enumbase not in header_data.enums):
header_data.enums[enumbase] = []
if (ifdefline not in header_data.enums[enumbase]):
header_data.enums[enumbase].append(ifdefline)
elif (not ifdefline in header_data.conditionals):
header_data.conditionals += [ifdefline]
if (line.find("uniform") != -1 and line.lower().find("texunit:") != -1):
# texture unit
texunitstr = line[line.find(":") + 1:].strip()
if (texunitstr == "auto"):
texunit = "-1"
else:
texunit = str(int(texunitstr))
uline = line[:line.lower().find("//")]
uline = uline.replace("uniform", "")
uline = uline.replace("highp", "")
uline = uline.replace(";", "")
lines = uline.split(",")
for x in lines:
x = x.strip()
x = x[x.rfind(" ") + 1:]
if (x.find("[") != -1):
# unfiorm array
x = x[:x.find("[")]
if (not x in header_data.texunit_names):
header_data.texunits += [(x, texunit)]
header_data.texunit_names += [x]
elif (line.find("uniform") != -1 and line.lower().find("ubo:") != -1):
# uniform buffer object
ubostr = line[line.find(":") + 1:].strip()
ubo = str(int(ubostr))
uline = line[:line.lower().find("//")]
uline = uline[uline.find("uniform") + len("uniform"):]
uline = uline.replace("highp", "")
uline = uline.replace(";", "")
uline = uline.replace("{", "").strip()
lines = uline.split(",")
for x in lines:
x = x.strip()
x = x[x.rfind(" ") + 1:]
if (x.find("[") != -1):
# unfiorm array
x = x[:x.find("[")]
if (not x in header_data.ubo_names):
header_data.ubos += [(x, ubo)]
header_data.ubo_names += [x]
elif (line.find("uniform") != -1 and line.find("{") == -1 and line.find(";") != -1):
uline = line.replace("uniform", "")
uline = uline.replace(";", "")
lines = uline.split(",")
for x in lines:
x = x.strip()
x = x[x.rfind(" ") + 1:]
if (x.find("[") != -1):
# unfiorm array
x = x[:x.find("[")]
if (not x in header_data.uniforms):
header_data.uniforms += [x]
if (line.strip().find("attribute ") == 0 and line.find("attrib:") != -1):
uline = line.replace("in ", "")
uline = uline.replace("attribute ", "")
uline = uline.replace("highp ", "")
uline = uline.replace(";", "")
uline = uline[uline.find(" "):].strip()
if (uline.find("//") != -1):
name, bind = uline.split("//")
if (bind.find("attrib:") != -1):
name = name.strip()
bind = bind.replace("attrib:", "").strip()
header_data.attributes += [(name, bind)]
if (line.strip().find("out ") == 0 and line.find("tfb:") != -1):
uline = line.replace("out ", "")
uline = uline.replace("highp ", "")
uline = uline.replace(";", "")
uline = uline[uline.find(" "):].strip()
if (uline.find("//") != -1):
name, bind = uline.split("//")
if (bind.find("tfb:") != -1):
name = name.strip()
bind = bind.replace("tfb:", "").strip()
header_data.feedbacks += [(name, bind)]
line = line.replace("\r", "")
line = line.replace("\n", "")
if (header_data.reading == "vertex"):
header_data.vertex_lines += [line]
if (header_data.reading == "fragment"):
header_data.fragment_lines += [line]
line = fs.readline()
header_data.line_offset += 1
fs.close()
return header_data
def build_legacygl_header(filename, include, class_suffix, output_attribs, gles2=False):
header_data = LegacyGLHeaderStruct()
include_file_in_legacygl_header(filename, header_data, 0)
out_file = filename + ".gen.h"
fd = open(out_file, "w")
enum_constants = []
fd.write("/* WARNING, THIS FILE WAS GENERATED, DO NOT EDIT */\n")
out_file_base = out_file
out_file_base = out_file_base[out_file_base.rfind("/") + 1:]
out_file_base = out_file_base[out_file_base.rfind("\\") + 1:]
out_file_ifdef = out_file_base.replace(".", "_").upper()
fd.write("#ifndef " + out_file_ifdef + class_suffix + "_120\n")
fd.write("#define " + out_file_ifdef + class_suffix + "_120\n")
out_file_class = out_file_base.replace(".glsl.gen.h", "").title().replace("_", "").replace(".", "") + "Shader" + class_suffix
fd.write("\n\n")
fd.write("#include \"" + include + "\"\n\n\n")
fd.write("class " + out_file_class + " : public Shader" + class_suffix + " {\n\n")
fd.write("\t virtual String get_shader_name() const { return \"" + out_file_class + "\"; }\n")
fd.write("public:\n\n")
if (len(header_data.conditionals)):
fd.write("\tenum Conditionals {\n")
for x in header_data.conditionals:
fd.write("\t\t" + x.upper() + ",\n")
fd.write("\t};\n\n")
if (len(header_data.uniforms)):
fd.write("\tenum Uniforms {\n")
for x in header_data.uniforms:
fd.write("\t\t" + x.upper() + ",\n")
fd.write("\t};\n\n")
fd.write("\t_FORCE_INLINE_ int get_uniform(Uniforms p_uniform) const { return _get_uniform(p_uniform); }\n\n")
if (len(header_data.conditionals)):
fd.write("\t_FORCE_INLINE_ void set_conditional(Conditionals p_conditional,bool p_enable) { _set_conditional(p_conditional,p_enable); }\n\n")
fd.write("\t#define _FU if (get_uniform(p_uniform)<0) return; ERR_FAIL_COND( get_active()!=this );\n\n ")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_value) { _FU glUniform1f(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, double p_value) { _FU glUniform1f(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint8_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int8_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint16_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int16_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, uint32_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, int32_t p_value) { _FU glUniform1i(get_uniform(p_uniform),p_value); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Color& p_color) { _FU GLfloat col[4]={p_color.r,p_color.g,p_color.b,p_color.a}; glUniform4fv(get_uniform(p_uniform),1,col); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector2& p_vec2) { _FU GLfloat vec2[2]={p_vec2.x,p_vec2.y}; glUniform2fv(get_uniform(p_uniform),1,vec2); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Vector3& p_vec3) { _FU GLfloat vec3[3]={p_vec3.x,p_vec3.y,p_vec3.z}; glUniform3fv(get_uniform(p_uniform),1,vec3); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b) { _FU glUniform2f(get_uniform(p_uniform),p_a,p_b); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c) { _FU glUniform3f(get_uniform(p_uniform),p_a,p_b,p_c); }\n\n")
fd.write("\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, float p_a, float p_b, float p_c, float p_d) { _FU glUniform4f(get_uniform(p_uniform),p_a,p_b,p_c,p_d); }\n\n")
fd.write("""\t_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Transform& p_transform) { _FU
const Transform &tr = p_transform;
GLfloat matrix[16]={ /* build a 16x16 matrix */
tr.basis.elements[0][0],
tr.basis.elements[1][0],
tr.basis.elements[2][0],
0,
tr.basis.elements[0][1],
tr.basis.elements[1][1],
tr.basis.elements[2][1],
0,
tr.basis.elements[0][2],
tr.basis.elements[1][2],
tr.basis.elements[2][2],
0,
tr.origin.x,
tr.origin.y,
tr.origin.z,
1
};
glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix);
}
""")
fd.write("""_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const Transform2D& p_transform) { _FU
const Transform2D &tr = p_transform;
GLfloat matrix[16]={ /* build a 16x16 matrix */
tr.elements[0][0],
tr.elements[0][1],
0,
0,
tr.elements[1][0],
tr.elements[1][1],
0,
0,
0,
0,
1,
0,
tr.elements[2][0],
tr.elements[2][1],
0,
1
};
glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix);
}
""")
fd.write("""_FORCE_INLINE_ void set_uniform(Uniforms p_uniform, const CameraMatrix& p_matrix) { _FU
GLfloat matrix[16];
for (int i=0;i<4;i++) {
for (int j=0;j<4;j++) {
matrix[i*4+j]=p_matrix.matrix[i][j];
}
}
glUniformMatrix4fv(get_uniform(p_uniform),1,false,matrix);
} """)
fd.write("\n\n#undef _FU\n\n\n")
fd.write("\tvirtual void init() {\n\n")
enum_value_count = 0
if (len(header_data.enums)):
fd.write("\t\t//Written using math, given nonstandarity of 64 bits integer constants..\n")
fd.write("\t\tstatic const Enum _enums[]={\n")
bitofs = len(header_data.conditionals)
enum_vals = []
for xv in header_data.enums:
x = header_data.enums[xv]
bits = 1
amt = len(x)
while(2**bits < amt):
bits += 1
strs = "{"
for i in range(amt):
strs += "\"#define " + x[i] + "\\n\","
v = {}
v["set_mask"] = "uint64_t(" + str(i) + ")<<" + str(bitofs)
v["clear_mask"] = "((uint64_t(1)<<40)-1) ^ (((uint64_t(1)<<" + str(bits) + ") - 1)<<" + str(bitofs) + ")"
enum_vals.append(v)
enum_constants.append(x[i])
strs += "NULL}"
fd.write("\t\t\t{(uint64_t(1<<" + str(bits) + ")-1)<<" + str(bitofs) + "," + str(bitofs) + "," + strs + "},\n")
bitofs += bits
fd.write("\t\t};\n\n")
fd.write("\t\tstatic const EnumValue _enum_values[]={\n")
enum_value_count = len(enum_vals)
for x in enum_vals:
fd.write("\t\t\t{" + x["set_mask"] + "," + x["clear_mask"] + "},\n")
fd.write("\t\t};\n\n")
conditionals_found = []
if (len(header_data.conditionals)):
fd.write("\t\tstatic const char* _conditional_strings[]={\n")
if (len(header_data.conditionals)):
for x in header_data.conditionals:
fd.write("\t\t\t\"#define " + x + "\\n\",\n")
conditionals_found.append(x)
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic const char **_conditional_strings=NULL;\n")
if (len(header_data.uniforms)):
fd.write("\t\tstatic const char* _uniform_strings[]={\n")
if (len(header_data.uniforms)):
for x in header_data.uniforms:
fd.write("\t\t\t\"" + x + "\",\n")
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic const char **_uniform_strings=NULL;\n")
if output_attribs:
if (len(header_data.attributes)):
fd.write("\t\tstatic AttributePair _attribute_pairs[]={\n")
for x in header_data.attributes:
fd.write("\t\t\t{\"" + x[0] + "\"," + x[1] + "},\n")
fd.write("\t\t};\n\n")
else:
fd.write("\t\tstatic AttributePair *_attribute_pairs=NULL;\n")
feedback_count = 0
if (not gles2 and len(header_data.feedbacks)):
fd.write("\t\tstatic const Feedback _feedbacks[]={\n")
for x in header_data.feedbacks:
| |
# that.
if instr:
regex += 'instruction=%s\n' % GlobToRegex(instr)
for line in stack:
if line == ELLIPSIS:
regex += '(.*\n)*'
elif '!' in line:
(mod, func) = line.split('!')
if func == ELLIPSIS: # mod!ellipsis frame
regex += '(%s\!.*\n)+' % GlobToRegex(mod, ignore_case=True)
else: # mod!func frame
# Ignore case for the module match, but not the function match.
regex += '%s\!%s\n' % (GlobToRegex(mod, ignore_case=True),
GlobToRegex(func, ignore_case=False))
else:
regex += GlobToRegex(line)
regex += '\n'
regex += '(.*\n)*' # Match anything left in the stack.
regex += '}'
return super(DrMemorySuppression, self).__init__(name, report_type, stack,
defined_at, regex)
def __str__(self):
"""Stringify."""
text = self.type + "\n"
if self.description:
text += "name=%s\n" % self.description
if self.instr:
text += "instruction=%s\n" % self.instr
text += "\n".join(self.stack)
text += "\n"
return text
# Possible DrMemory error report types. Keep consistent with suppress_name
# array in drmemory/drmemory/report.c.
DRMEMORY_ERROR_TYPES = [
'UNADDRESSABLE ACCESS',
'UNINITIALIZED READ',
'INVALID HEAP ARGUMENT',
'GDI USAGE ERROR',
'HANDLE LEAK',
'LEAK',
'POSSIBLE LEAK',
'WARNING',
]
# Regexes to match valid drmemory frames.
DRMEMORY_FRAME_PATTERNS = [
re.compile(r"^.*\!.*$"), # mod!func
re.compile(r"^.*!\.\.\.$"), # mod!ellipsis
re.compile(r"^\<.*\+0x.*\>$"), # <mod+0xoffs>
re.compile(r"^\<not in a module\>$"),
re.compile(r"^system call .*$"),
re.compile(r"^\*$"), # wildcard
re.compile(r"^\.\.\.$"), # ellipsis
]
def ReadDrMemorySuppressions(lines, supp_descriptor):
"""Given a list of lines, returns a list of DrMemory suppressions.
Args:
lines: a list of lines containing suppressions.
supp_descriptor: should typically be a filename.
Used only when parsing errors happen.
"""
lines = StripAndSkipCommentsIterator(lines)
suppressions = []
for (line_no, line) in lines:
if not line:
continue
if line not in DRMEMORY_ERROR_TYPES:
raise SuppressionError('Expected a DrMemory error type, '
'found %r instead\n Valid error types: %s' %
(line, ' '.join(DRMEMORY_ERROR_TYPES)),
"%s:%d" % (supp_descriptor, line_no))
# Suppression starts here.
report_type = line
name = ''
instr = None
stack = []
defined_at = "%s:%d" % (supp_descriptor, line_no)
found_stack = False
for (line_no, line) in lines:
if not found_stack and line.startswith('name='):
name = line.replace('name=', '')
elif not found_stack and line.startswith('instruction='):
instr = line.replace('instruction=', '')
else:
# Unrecognized prefix indicates start of stack trace.
found_stack = True
if not line:
# Blank line means end of suppression.
break
if not any([regex.match(line) for regex in DRMEMORY_FRAME_PATTERNS]):
raise SuppressionError(
('Unexpected stack frame pattern at line %d\n' +
'Frames should be one of the following:\n' +
' module!function\n' +
' module!...\n' +
' <module+0xhexoffset>\n' +
' <not in a module>\n' +
' system call Name\n' +
' *\n' +
' ...\n') % line_no, defined_at)
stack.append(line)
if len(stack) == 0: # In case we hit EOF or blank without any stack frames.
raise SuppressionError('Suppression "%s" has no stack frames, ends at %d'
% (name, line_no), defined_at)
if stack[-1] == ELLIPSIS:
raise SuppressionError('Suppression "%s" ends in an ellipsis on line %d' %
(name, line_no), defined_at)
suppressions.append(
DrMemorySuppression(name, report_type, instr, stack, defined_at))
return suppressions
def ParseSuppressionOfType(lines, supp_descriptor, def_line_no, report_type):
"""Parse the suppression starting on this line.
Suppressions start with a type, have an optional name and instruction, and a
stack trace that ends in a blank line.
"""
def TestStack(stack, positive, negative, suppression_parser=None):
"""A helper function for SelfTest() that checks a single stack.
Args:
stack: the stack to match the suppressions.
positive: the list of suppressions that must match the given stack.
negative: the list of suppressions that should not match.
suppression_parser: optional arg for the suppression parser, default is
ReadValgrindStyleSuppressions.
"""
if not suppression_parser:
suppression_parser = ReadValgrindStyleSuppressions
for supp in positive:
parsed = suppression_parser(supp.split("\n"), "positive_suppression")
assert parsed[0].Match(stack.split("\n")), (
"Suppression:\n%s\ndidn't match stack:\n%s" % (supp, stack))
for supp in negative:
parsed = suppression_parser(supp.split("\n"), "negative_suppression")
assert not parsed[0].Match(stack.split("\n")), (
"Suppression:\n%s\ndid match stack:\n%s" % (supp, stack))
def TestFailPresubmit(supp_text, error_text, suppression_parser=None):
"""A helper function for SelfTest() that verifies a presubmit check fires.
Args:
supp_text: suppression text to parse.
error_text: text of the presubmit error we expect to find.
suppression_parser: optional arg for the suppression parser, default is
ReadValgrindStyleSuppressions.
"""
if not suppression_parser:
suppression_parser = ReadValgrindStyleSuppressions
try:
supps = suppression_parser(supp_text.split("\n"), "<presubmit suppression>")
except SuppressionError, e:
# If parsing raised an exception, match the error text here.
assert error_text in str(e), (
"presubmit text %r not in SuppressionError:\n%r" %
(error_text, str(e)))
else:
# Otherwise, run the presubmit checks over the supps. We expect a single
# error that has text matching error_text.
errors = PresubmitCheckSuppressions(supps)
assert len(errors) == 1, (
"expected exactly one presubmit error, got:\n%s" % errors)
assert error_text in str(errors[0]), (
"presubmit text %r not in SuppressionError:\n%r" %
(error_text, str(errors[0])))
def SelfTest():
"""Tests the Suppression.Match() capabilities."""
test_memcheck_stack_1 = """{
test
Memcheck:Leak
fun:absolutly
fun:brilliant
obj:condition
fun:detection
fun:expression
}"""
test_memcheck_stack_2 = """{
test
Memcheck:Uninitialized
fun:absolutly
fun:brilliant
obj:condition
fun:detection
fun:expression
}"""
test_memcheck_stack_3 = """{
test
Memcheck:Unaddressable
fun:absolutly
fun:brilliant
obj:condition
fun:detection
fun:expression
}"""
test_memcheck_stack_4 = """{
test
Memcheck:Addr4
fun:absolutly
fun:brilliant
obj:condition
fun:detection
fun:expression
}"""
positive_memcheck_suppressions_1 = [
"{\nzzz\nMemcheck:Leak\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Leak\nfun:ab*ly\n}",
"{\nzzz\nMemcheck:Leak\nfun:absolutly\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\n...\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\n...\nfun:detection\n}",
"{\nzzz\nMemcheck:Leak\nfun:absolutly\n...\nfun:detection\n}",
"{\nzzz\nMemcheck:Leak\nfun:ab*ly\n...\nfun:detection\n}",
"{\nzzz\nMemcheck:Leak\n...\nobj:condition\n}",
"{\nzzz\nMemcheck:Leak\n...\nobj:condition\nfun:detection\n}",
"{\nzzz\nMemcheck:Leak\n...\nfun:brilliant\nobj:condition\n}",
]
positive_memcheck_suppressions_2 = [
"{\nzzz\nMemcheck:Uninitialized\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Uninitialized\nfun:ab*ly\n}",
"{\nzzz\nMemcheck:Uninitialized\nfun:absolutly\nfun:brilliant\n}",
# Legacy suppression types
"{\nzzz\nMemcheck:Value1\n...\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Cond\n...\nfun:detection\n}",
"{\nzzz\nMemcheck:Value8\nfun:absolutly\nfun:brilliant\n}",
]
positive_memcheck_suppressions_3 = [
"{\nzzz\nMemcheck:Unaddressable\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Unaddressable\nfun:absolutly\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Unaddressable\nfun:absolutly\nfun:brilliant\n}",
# Legacy suppression types
"{\nzzz\nMemcheck:Addr1\n...\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Addr8\n...\nfun:detection\n}",
]
positive_memcheck_suppressions_4 = [
"{\nzzz\nMemcheck:Addr4\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Unaddressable\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Addr4\nfun:absolutly\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Unaddressable\n...\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Addr4\n...\nfun:detection\n}",
]
negative_memcheck_suppressions_1 = [
"{\nzzz\nMemcheck:Leak\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Leak\nfun:ab*liant\n}",
"{\nzzz\nMemcheck:Leak\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\nobj:condition\n}",
"{\nzzz\nMemcheck:Addr8\nfun:brilliant\n}",
]
negative_memcheck_suppressions_2 = [
"{\nzzz\nMemcheck:Cond\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Value2\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Uninitialized\nfun:ab*liant\n}",
"{\nzzz\nMemcheck:Value4\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\nobj:condition\n}",
"{\nzzz\nMemcheck:Addr8\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Unaddressable\nfun:brilliant\n}",
]
negative_memcheck_suppressions_3 = [
"{\nzzz\nMemcheck:Addr1\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Uninitialized\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Addr2\nfun:ab*liant\n}",
"{\nzzz\nMemcheck:Value4\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\nobj:condition\n}",
"{\nzzz\nMemcheck:Addr8\nfun:brilliant\n}",
]
negative_memcheck_suppressions_4 = [
"{\nzzz\nMemcheck:Addr1\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Addr4\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Unaddressable\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Addr1\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Addr2\nfun:ab*liant\n}",
"{\nzzz\nMemcheck:Value4\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\nobj:condition\n}",
"{\nzzz\nMemcheck:Addr8\nfun:brilliant\n}",
]
TestStack(test_memcheck_stack_1,
positive_memcheck_suppressions_1,
negative_memcheck_suppressions_1)
TestStack(test_memcheck_stack_2,
positive_memcheck_suppressions_2,
negative_memcheck_suppressions_2)
TestStack(test_memcheck_stack_3,
positive_memcheck_suppressions_3,
negative_memcheck_suppressions_3)
TestStack(test_memcheck_stack_4,
positive_memcheck_suppressions_4,
negative_memcheck_suppressions_4)
# TODO(timurrrr): add TestFailPresubmit tests.
### DrMemory self tests.
# http://crbug.com/96010 suppression.
stack_96010 = """{
UNADDRESSABLE ACCESS
name=<insert_a_suppression_name_here>
*!TestingProfile::FinishInit
*!TestingProfile::TestingProfile
*!BrowserAboutHandlerTest_WillHandleBrowserAboutURL_Test::TestBody
*!testing::Test::Run
}"""
suppress_96010 = [
"UNADDRESSABLE ACCESS\nname=zzz\n...\n*!testing::Test::Run\n",
("UNADDRESSABLE ACCESS\nname=zzz\n...\n" +
"*!BrowserAboutHandlerTest_WillHandleBrowserAboutURL_Test::TestBody\n"),
"UNADDRESSABLE ACCESS\nname=zzz\n...\n*!BrowserAboutHandlerTest*\n",
"UNADDRESSABLE ACCESS\nname=zzz\n*!TestingProfile::FinishInit\n",
# No name should be needed
"UNADDRESSABLE ACCESS\n*!TestingProfile::FinishInit\n",
# Whole trace
("UNADDRESSABLE ACCESS\n" +
"*!TestingProfile::FinishInit\n" +
"*!TestingProfile::TestingProfile\n" +
"*!BrowserAboutHandlerTest_WillHandleBrowserAboutURL_Test::TestBody\n" +
"*!testing::Test::Run\n"),
]
negative_96010 = [
# Wrong type
"UNINITIALIZED READ\nname=zzz\n*!TestingProfile::FinishInit\n",
# No ellipsis
"UNADDRESSABLE ACCESS\nname=zzz\n*!BrowserAboutHandlerTest*\n",
]
TestStack(stack_96010, suppress_96010, negative_96010,
suppression_parser=ReadDrMemorySuppressions)
# Invalid heap arg
stack_invalid = """{
INVALID HEAP ARGUMENT
name=asdf
*!foo
}"""
suppress_invalid = [
"INVALID HEAP ARGUMENT\n*!foo\n",
]
negative_invalid = [
"UNADDRESSABLE ACCESS\n*!foo\n",
]
TestStack(stack_invalid, suppress_invalid, negative_invalid,
suppression_parser=ReadDrMemorySuppressions)
# Suppress only ntdll
stack_in_ntdll = """{
UNADDRESSABLE ACCESS
name=<insert_a_suppression_name_here>
ntdll.dll!RtlTryEnterCriticalSection
}"""
stack_not_ntdll = """{
UNADDRESSABLE ACCESS
name=<insert_a_suppression_name_here>
notntdll.dll!RtlTryEnterCriticalSection
}"""
suppress_in_ntdll = [
"UNADDRESSABLE ACCESS\nntdll.dll!RtlTryEnterCriticalSection\n",
]
suppress_in_any = [
"UNADDRESSABLE ACCESS\n*!RtlTryEnterCriticalSection\n",
]
TestStack(stack_in_ntdll, suppress_in_ntdll + suppress_in_any, [],
suppression_parser=ReadDrMemorySuppressions)
# Make sure we don't wildcard away the "not" part and match ntdll.dll by
# accident.
TestStack(stack_not_ntdll, suppress_in_any, suppress_in_ntdll,
suppression_parser=ReadDrMemorySuppressions)
# Suppress a POSSIBLE LEAK with LEAK.
stack_foo_possible = """{
POSSIBLE LEAK
name=foo possible
*!foo
}"""
suppress_foo_possible = [ "POSSIBLE LEAK\n*!foo\n" ]
suppress_foo_leak = [ "LEAK\n*!foo\n" ]
TestStack(stack_foo_possible, suppress_foo_possible + suppress_foo_leak, [],
suppression_parser=ReadDrMemorySuppressions)
# Don't suppress LEAK with POSSIBLE LEAK.
stack_foo_leak = """{
LEAK
name=foo leak
*!foo
}"""
TestStack(stack_foo_leak, suppress_foo_leak, suppress_foo_possible,
suppression_parser=ReadDrMemorySuppressions)
# Test case insensitivity of module names.
stack_user32_mixed_case = """{
LEAK
name=<insert>
USER32.dll!foo
user32.DLL!bar
user32.dll!baz
}"""
suppress_user32 = [ # Module name case doesn't matter.
"LEAK\nuser32.dll!foo\nuser32.dll!bar\nuser32.dll!baz\n",
"LEAK\nUSER32.DLL!foo\nUSER32.DLL!bar\nUSER32.DLL!baz\n",
]
no_suppress_user32 = [ # Function name case matters.
"LEAK\nuser32.dll!FOO\nuser32.dll!BAR\nuser32.dll!BAZ\n",
"LEAK\nUSER32.DLL!FOO\nUSER32.DLL!BAR\nUSER32.DLL!BAZ\n",
]
TestStack(stack_user32_mixed_case, suppress_user32, no_suppress_user32,
suppression_parser=ReadDrMemorySuppressions)
# Test mod!... frames.
stack_kernel32_through_ntdll = """{
LEAK
name=<insert>
kernel32.dll!foo
KERNEL32.dll!bar
kernel32.DLL!baz
ntdll.dll!quux
}"""
suppress_mod_ellipsis = [
"LEAK\nkernel32.dll!...\nntdll.dll!quux\n",
"LEAK\nKERNEL32.DLL!...\nntdll.dll!quux\n",
]
no_suppress_mod_ellipsis = [
# Need one or more matching frames, not zero, unlike regular ellipsis.
"LEAK\nuser32.dll!...\nkernel32.dll!...\nntdll.dll!quux\n",
]
TestStack(stack_kernel32_through_ntdll, suppress_mod_ellipsis,
no_suppress_mod_ellipsis,
suppression_parser=ReadDrMemorySuppressions)
# Test that the presubmit checks work.
forgot_to_name = """
UNADDRESSABLE ACCESS
name=<insert_a_suppression_name_here>
ntdll.dll!RtlTryEnterCriticalSection
"""
TestFailPresubmit(forgot_to_name, 'forgotten to put a suppression',
suppression_parser=ReadDrMemorySuppressions)
named_twice = """
UNADDRESSABLE ACCESS
name=http://crbug.com/1234
*!foo
UNADDRESSABLE ACCESS
name=http://crbug.com/1234
*!bar
"""
TestFailPresubmit(named_twice, 'defined more than once',
suppression_parser=ReadDrMemorySuppressions)
forgot_stack = """
UNADDRESSABLE ACCESS
name=http://crbug.com/1234
"""
TestFailPresubmit(forgot_stack, 'has no stack frames',
suppression_parser=ReadDrMemorySuppressions)
ends_in_ellipsis = """
UNADDRESSABLE ACCESS
name=http://crbug.com/1234
ntdll.dll!RtlTryEnterCriticalSection
...
"""
TestFailPresubmit(ends_in_ellipsis, 'ends in an ellipsis',
suppression_parser=ReadDrMemorySuppressions)
bad_stack_frame = """
UNADDRESSABLE ACCESS
name=http://crbug.com/1234
fun:memcheck_style_frame
"""
TestFailPresubmit(bad_stack_frame, 'Unexpected stack frame pattern',
suppression_parser=ReadDrMemorySuppressions)
# Test FilenameToTool.
filenames_to_tools = {
"tools/valgrind/drmemory/suppressions.txt": "drmemory",
"tools/valgrind/drmemory/suppressions_full.txt": "drmemory",
"tools/valgrind/memcheck/suppressions.txt": "memcheck",
"tools/valgrind/memcheck/suppressions_mac.txt": "memcheck",
"asdf/tools/valgrind/memcheck/suppressions_mac.txt": "memcheck",
"foo/bar/baz/tools/valgrind/memcheck/suppressions_mac.txt": "memcheck",
"foo/bar/baz/tools/valgrind/suppressions.txt": None,
"tools/valgrind/suppressions.txt": None,
}
for (filename, expected_tool) in filenames_to_tools.items():
filename.replace('/', os.sep) # Make the path look native.
tool = FilenameToTool(filename)
assert tool == expected_tool, (
"failed | |
--
The parent image of the image recipe.
dateCreated (string) --
The date on which this image recipe was created.
tags (dict) --
The tags of the image recipe.
(string) --
(string) --
nextToken (string) --
The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.
Exceptions
imagebuilder.Client.exceptions.ServiceException
imagebuilder.Client.exceptions.ClientException
imagebuilder.Client.exceptions.ServiceUnavailableException
imagebuilder.Client.exceptions.InvalidRequestException
imagebuilder.Client.exceptions.InvalidPaginationTokenException
imagebuilder.Client.exceptions.ForbiddenException
imagebuilder.Client.exceptions.CallRateLimitExceededException
:return: {
'requestId': 'string',
'imageRecipeSummaryList': [
{
'arn': 'string',
'name': 'string',
'platform': 'Windows'|'Linux',
'owner': 'string',
'parentImage': 'string',
'dateCreated': 'string',
'tags': {
'string': 'string'
}
},
],
'nextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def list_images(owner=None, filters=None, maxResults=None, nextToken=None):
"""
Returns the list of image build versions for the specified semantic version.
See also: AWS API Documentation
Exceptions
:example: response = client.list_images(
owner='Self'|'Shared'|'Amazon',
filters=[
{
'name': 'string',
'values': [
'string',
]
},
],
maxResults=123,
nextToken='string'
)
:type owner: string
:param owner: The owner defines which images you want to list. By default, this request will only show images owned by your account. You can use this field to specify if you want to view images owned by yourself, by Amazon, or those images that have been shared with you by other customers.
:type filters: list
:param filters: The filters.\n\n(dict) --A filter name and value pair that is used to return a more specific list of results from a list operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs.\n\nname (string) --The name of the filter. Filter names are case-sensitive.\n\nvalues (list) --The filter values. Filter values are case-sensitive.\n\n(string) --\n\n\n\n\n\n
:type maxResults: integer
:param maxResults: The maximum items to return in a request.
:type nextToken: string
:param nextToken: A token to specify where to start paginating. This is the NextToken from a previously truncated response.
:rtype: dict
ReturnsResponse Syntax
{
'requestId': 'string',
'imageVersionList': [
{
'arn': 'string',
'name': 'string',
'version': 'string',
'platform': 'Windows'|'Linux',
'osVersion': 'string',
'owner': 'string',
'dateCreated': 'string'
},
],
'nextToken': 'string'
}
Response Structure
(dict) --
requestId (string) --
The request ID that uniquely identifies this request.
imageVersionList (list) --
The list of image semantic versions.
(dict) --
An image semantic version.
arn (string) --
The Amazon Resource Name (ARN) of the image semantic version.
name (string) --
The name of the image semantic version.
version (string) --
The semantic version of the image semantic version.
platform (string) --
The platform of the image semantic version.
osVersion (string) --
The operating system version of the instance. For example, Amazon Linux 2, Ubuntu 18, or Microsoft Windows Server 2019.
owner (string) --
The owner of the image semantic version.
dateCreated (string) --
The date at which this image semantic version was created.
nextToken (string) --
The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.
Exceptions
imagebuilder.Client.exceptions.ServiceException
imagebuilder.Client.exceptions.ClientException
imagebuilder.Client.exceptions.ServiceUnavailableException
imagebuilder.Client.exceptions.InvalidRequestException
imagebuilder.Client.exceptions.InvalidPaginationTokenException
imagebuilder.Client.exceptions.ForbiddenException
imagebuilder.Client.exceptions.CallRateLimitExceededException
:return: {
'requestId': 'string',
'imageVersionList': [
{
'arn': 'string',
'name': 'string',
'version': 'string',
'platform': 'Windows'|'Linux',
'osVersion': 'string',
'owner': 'string',
'dateCreated': 'string'
},
],
'nextToken': 'string'
}
:returns:
imagebuilder.Client.exceptions.ServiceException
imagebuilder.Client.exceptions.ClientException
imagebuilder.Client.exceptions.ServiceUnavailableException
imagebuilder.Client.exceptions.InvalidRequestException
imagebuilder.Client.exceptions.InvalidPaginationTokenException
imagebuilder.Client.exceptions.ForbiddenException
imagebuilder.Client.exceptions.CallRateLimitExceededException
"""
pass
def list_infrastructure_configurations(filters=None, maxResults=None, nextToken=None):
"""
Returns a list of infrastructure configurations.
See also: AWS API Documentation
Exceptions
:example: response = client.list_infrastructure_configurations(
filters=[
{
'name': 'string',
'values': [
'string',
]
},
],
maxResults=123,
nextToken='string'
)
:type filters: list
:param filters: The filters.\n\n(dict) --A filter name and value pair that is used to return a more specific list of results from a list operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs.\n\nname (string) --The name of the filter. Filter names are case-sensitive.\n\nvalues (list) --The filter values. Filter values are case-sensitive.\n\n(string) --\n\n\n\n\n\n
:type maxResults: integer
:param maxResults: The maximum items to return in a request.
:type nextToken: string
:param nextToken: A token to specify where to start paginating. This is the NextToken from a previously truncated response.
:rtype: dict
ReturnsResponse Syntax
{
'requestId': 'string',
'infrastructureConfigurationSummaryList': [
{
'arn': 'string',
'name': 'string',
'description': 'string',
'dateCreated': 'string',
'dateUpdated': 'string',
'tags': {
'string': 'string'
}
},
],
'nextToken': 'string'
}
Response Structure
(dict) --
requestId (string) --
The request ID that uniquely identifies this request.
infrastructureConfigurationSummaryList (list) --
The list of infrastructure configurations.
(dict) --
The infrastructure used when building EC2 AMIs.
arn (string) --
The Amazon Resource Name (ARN) of the infrastructure configuration.
name (string) --
The name of the infrastructure configuration.
description (string) --
The description of the infrastructure configuration.
dateCreated (string) --
The date on which the infrastructure configuration was created.
dateUpdated (string) --
The date on which the infrastructure configuration was last updated.
tags (dict) --
The tags of the infrastructure configuration.
(string) --
(string) --
nextToken (string) --
The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.
Exceptions
imagebuilder.Client.exceptions.ServiceException
imagebuilder.Client.exceptions.ClientException
imagebuilder.Client.exceptions.ServiceUnavailableException
imagebuilder.Client.exceptions.InvalidRequestException
imagebuilder.Client.exceptions.InvalidPaginationTokenException
imagebuilder.Client.exceptions.ForbiddenException
imagebuilder.Client.exceptions.CallRateLimitExceededException
:return: {
'requestId': 'string',
'infrastructureConfigurationSummaryList': [
{
'arn': 'string',
'name': 'string',
'description': 'string',
'dateCreated': 'string',
'dateUpdated': 'string',
'tags': {
'string': 'string'
}
},
],
'nextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def list_tags_for_resource(resourceArn=None):
"""
Returns the list of tags for the specified resource.
See also: AWS API Documentation
Exceptions
:example: response = client.list_tags_for_resource(
resourceArn='string'
)
:type resourceArn: string
:param resourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource whose tags you want to retrieve.\n
:rtype: dict
ReturnsResponse Syntax{
'tags': {
'string': 'string'
}
}
Response Structure
(dict) --
tags (dict) --The tags for the specified resource.
(string) --
(string) --
Exceptions
imagebuilder.Client.exceptions.ServiceException
imagebuilder.Client.exceptions.InvalidParameterException
imagebuilder.Client.exceptions.ResourceNotFoundException
:return: {
'tags': {
'string': 'string'
}
}
:returns:
imagebuilder.Client.exceptions.ServiceException
imagebuilder.Client.exceptions.InvalidParameterException
imagebuilder.Client.exceptions.ResourceNotFoundException
"""
pass
def put_component_policy(componentArn=None, policy=None):
"""
Applies a policy to a component. We recommend that you call the RAM API CreateResourceShare to share resources. If you call the Image Builder API PutComponentPolicy , you must also call the RAM API PromoteResourceShareCreatedFromPolicy in order for the resource to be visible to all principals with whom the resource is shared.
See also: AWS API Documentation
Exceptions
:example: response = client.put_component_policy(
componentArn='string',
policy='string'
)
:type componentArn: string
:param componentArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the component that this policy should be applied to.\n
:type policy: string
:param policy: [REQUIRED]\nThe policy to apply.\n
:rtype: dict
ReturnsResponse Syntax
{
'requestId': 'string',
'componentArn': 'string'
}
Response Structure
(dict) --
requestId (string) --
The request ID that uniquely identifies this request.
componentArn (string) --
The Amazon Resource Name (ARN) of the component that this policy was applied to.
Exceptions
imagebuilder.Client.exceptions.ServiceException
imagebuilder.Client.exceptions.ClientException
imagebuilder.Client.exceptions.ServiceUnavailableException
imagebuilder.Client.exceptions.InvalidRequestException
imagebuilder.Client.exceptions.InvalidParameterValueException
imagebuilder.Client.exceptions.ResourceNotFoundException
imagebuilder.Client.exceptions.ForbiddenException
imagebuilder.Client.exceptions.CallRateLimitExceededException
:return: {
'requestId': 'string',
'componentArn': 'string'
}
:returns:
imagebuilder.Client.exceptions.ServiceException
imagebuilder.Client.exceptions.ClientException
imagebuilder.Client.exceptions.ServiceUnavailableException
imagebuilder.Client.exceptions.InvalidRequestException
imagebuilder.Client.exceptions.InvalidParameterValueException
imagebuilder.Client.exceptions.ResourceNotFoundException
imagebuilder.Client.exceptions.ForbiddenException
imagebuilder.Client.exceptions.CallRateLimitExceededException
"""
pass
def put_image_policy(imageArn=None, policy=None):
"""
Applies a policy to an image. We recommend that you call the RAM API CreateResourceShare to share resources. If you call the Image Builder API PutImagePolicy , you must also call the RAM API PromoteResourceShareCreatedFromPolicy in order for the resource to be visible to all principals with whom the resource is shared.
See also: AWS API Documentation
Exceptions
:example: response = client.put_image_policy(
imageArn='string',
policy='string'
)
:type imageArn: string
:param imageArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the image that this policy should be applied to.\n
:type policy: string
:param policy: [REQUIRED]\nThe policy to apply.\n
:rtype: dict
ReturnsResponse Syntax
{
'requestId': 'string',
'imageArn': 'string'
}
Response Structure
(dict) --
requestId (string) --
The request ID that uniquely identifies this request.
imageArn (string) --
The Amazon Resource Name (ARN) of the image that this policy was applied to.
Exceptions
imagebuilder.Client.exceptions.ServiceException
imagebuilder.Client.exceptions.ClientException
imagebuilder.Client.exceptions.ServiceUnavailableException
imagebuilder.Client.exceptions.InvalidRequestException
imagebuilder.Client.exceptions.InvalidParameterValueException
imagebuilder.Client.exceptions.ResourceNotFoundException
imagebuilder.Client.exceptions.ForbiddenException
imagebuilder.Client.exceptions.CallRateLimitExceededException
:return: {
'requestId': 'string',
'imageArn': 'string'
}
:returns:
imagebuilder.Client.exceptions.ServiceException
imagebuilder.Client.exceptions.ClientException
imagebuilder.Client.exceptions.ServiceUnavailableException
imagebuilder.Client.exceptions.InvalidRequestException
imagebuilder.Client.exceptions.InvalidParameterValueException
imagebuilder.Client.exceptions.ResourceNotFoundException
imagebuilder.Client.exceptions.ForbiddenException
imagebuilder.Client.exceptions.CallRateLimitExceededException
"""
pass
def put_image_recipe_policy(imageRecipeArn=None, policy=None):
"""
Applies a policy to an image recipe. We recommend that you call the RAM API CreateResourceShare to share resources. If you call the Image Builder API PutImageRecipePolicy , you must also call the RAM API PromoteResourceShareCreatedFromPolicy in order for the resource to be visible to all principals with whom the resource is shared.
See also: | |
or the default value if either is defined, `None` otherwise."""
return self.fixed if self.fixed is not None else self.default
# Global element's exclusive properties
@property
def abstract(self):
return self._abstract if self.ref is None else self.ref.abstract
@property
def final(self):
if self.ref is not None:
return self.ref.final
elif self._final is not None:
return self._final
return self.schema.final_default
@property
def block(self):
if self.ref is not None:
return self.ref.block
elif self._block is not None:
return self._block
return self.schema.block_default
@property
def nillable(self):
return self._nillable if self.ref is None else self.ref.nillable
@property
def substitution_group(self):
return self._substitution_group if self.ref is None else self.ref.substitution_group
@property
def default(self):
return self.elem.get('default') if self.ref is None else self.ref.default
@property
def fixed(self):
return self.elem.get('fixed') if self.ref is None else self.ref.fixed
@property
def form(self):
return self._form if self.ref is None else self.ref.form
def get_attribute(self, name):
if name[0] != '{':
return self.type.attributes[get_qname(self.type.target_namespace, name)]
return self.type.attributes[name]
def get_type(self, elem, inherited=None):
return self._head_type or self.type
def get_attributes(self, xsd_type):
try:
return xsd_type.attributes
except AttributeError:
if xsd_type is self.type:
return self.attributes
else:
return self.schema.create_empty_attribute_group(self)
def get_path(self, ancestor=None, reverse=False):
"""
Returns the XPath expression of the element. The path is relative to the schema instance
in which the element is contained or is relative to a specific ancestor passed as argument.
In the latter case returns `None` if the argument is not an ancestor.
:param ancestor: optional XSD component of the same schema, that maybe \
an ancestor of the element.
:param reverse: if set to `True` returns the reverse path, from the element to ancestor.
"""
path = []
xsd_component = self
while xsd_component is not None:
if xsd_component is ancestor:
return '/'.join(reversed(path)) or '.'
elif hasattr(xsd_component, 'tag'):
path.append('..' if reverse else xsd_component.name)
xsd_component = xsd_component.parent
else:
if ancestor is None:
return '/'.join(reversed(path)) or '.'
def iter_components(self, xsd_classes=None):
if xsd_classes is None:
yield self
yield from self.identities.values()
else:
if isinstance(self, xsd_classes):
yield self
for obj in self.identities.values():
if isinstance(obj, xsd_classes):
yield obj
if self.ref is None and self.type.parent is not None:
yield from self.type.iter_components(xsd_classes)
def iter_substitutes(self):
if self.parent is None or self.ref is not None:
for xsd_element in self.maps.substitution_groups.get(self.name, ()):
if not xsd_element.abstract:
yield xsd_element
for e in xsd_element.iter_substitutes():
if not e.abstract:
yield e
def data_value(self, elem):
"""Returns the decoded data value of the provided element as XPath fn:data()."""
text = elem.text
if text is None:
text = self.fixed if self.fixed is not None else self.default
if text is None:
return
return self.type.text_decode(text)
def check_dynamic_context(self, elem, **kwargs):
try:
locations = kwargs['locations']
except KeyError:
return
for ns, url in etree_iter_location_hints(elem):
if ns not in locations:
locations[ns] = url
elif locations[ns] is None:
reason = "schemaLocation declaration after namespace start"
raise XMLSchemaValidationError(self, elem, reason)
if ns == self.target_namespace:
schema = self.schema.include_schema(url, self.schema.base_url)
else:
schema = self.schema.import_namespace(ns, url, self.schema.base_url)
if not schema.built:
reason = "dynamic loaded schema change the assessment"
raise XMLSchemaValidationError(self, elem, reason)
if elem.attrib:
for name in elem.attrib:
if name[0] == '{':
ns = get_namespace(name)
if ns not in locations:
locations[ns] = None
if elem.tag[0] == '{':
ns = get_namespace(elem.tag)
if ns not in locations:
locations[ns] = None
def start_identities(self, identities):
"""
Start tracking of XSD element's identities.
:param identities: a dictionary containing the identities counters.
"""
for constraint in self.identities.values():
try:
identities[constraint].clear()
except KeyError:
identities[constraint] = constraint.get_counter()
def stop_identities(self, identities):
"""
Stop tracking of XSD element's identities.
:param identities: a dictionary containing the identities counters.
"""
for identity in self.identities.values():
try:
identities[identity].enabled = False
except KeyError:
identities[identity] = identity.get_counter(enabled=False)
def iter_decode(self, elem, validation='lax', **kwargs):
"""
Creates an iterator for decoding an Element instance.
:param elem: the Element that has to be decoded.
:param validation: the validation mode, can be 'lax', 'strict' or 'skip'.
:param kwargs: keyword arguments for the decoding process.
:return: yields a decoded object, eventually preceded by a sequence of \
validation or decoding errors.
"""
if self.abstract:
reason = "cannot use an abstract element for validation"
yield self.validation_error(validation, reason, elem, **kwargs)
try:
namespaces = kwargs['namespaces']
except KeyError:
namespaces = None
try:
level = kwargs['level']
except KeyError:
level = kwargs['level'] = 0
try:
identities = kwargs['identities']
except KeyError:
identities = kwargs['identities'] = {}
self.start_identities(identities)
try:
converter = kwargs['converter']
except KeyError:
converter = kwargs['converter'] = self.schema.get_converter(**kwargs)
else:
if not isinstance(converter, XMLSchemaConverter) and converter is not None:
converter = kwargs['converter'] = self.schema.get_converter(**kwargs)
try:
pass # self.check_dynamic_context(elem, **kwargs) TODO: dynamic schema load
except XMLSchemaValidationError as err:
yield self.validation_error(validation, err, elem, **kwargs)
inherited = kwargs.get('inherited')
value = content = attributes = None
nilled = False
# Get the instance effective type
xsd_type = self.get_type(elem, inherited)
if XSI_TYPE in elem.attrib:
type_name = elem.attrib[XSI_TYPE].strip()
try:
xsd_type = self.maps.get_instance_type(type_name, xsd_type, namespaces)
except (KeyError, TypeError) as err:
yield self.validation_error(validation, err, elem, **kwargs)
if xsd_type.is_blocked(self):
reason = "usage of %r is blocked" % xsd_type
yield self.validation_error(validation, reason, elem, **kwargs)
if xsd_type.abstract:
yield self.validation_error(validation, "%r is abstract", elem, **kwargs)
if xsd_type.is_complex() and self.xsd_version == '1.1':
kwargs['id_list'] = [] # Track XSD 1.1 multiple xs:ID attributes/children
content_decoder = xsd_type.content if xsd_type.is_complex() else xsd_type
# Decode attributes
attribute_group = self.get_attributes(xsd_type)
for result in attribute_group.iter_decode(elem.attrib, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self.validation_error(validation, result, elem, **kwargs)
else:
attributes = result
if self.inheritable and any(name in self.inheritable for name in elem.attrib):
if inherited:
inherited = inherited.copy()
inherited.update((k, v) for k, v in elem.attrib.items() if k in self.inheritable)
else:
inherited = {k: v for k, v in elem.attrib.items() if k in self.inheritable}
kwargs['inherited'] = inherited
# Checks the xsi:nil attribute of the instance
if XSI_NIL in elem.attrib:
xsi_nil = elem.attrib[XSI_NIL].strip()
if not self.nillable:
reason = "element is not nillable."
yield self.validation_error(validation, reason, elem, **kwargs)
elif xsi_nil not in {'0', '1', 'false', 'true'}:
reason = "xsi:nil attribute must have a boolean value."
yield self.validation_error(validation, reason, elem, **kwargs)
elif xsi_nil in ('0', 'false'):
pass
elif self.fixed is not None:
reason = "xsi:nil='true' but the element has a fixed value."
yield self.validation_error(validation, reason, elem, **kwargs)
elif elem.text is not None or len(elem):
reason = "xsi:nil='true' but the element is not empty."
yield self.validation_error(validation, reason, elem, **kwargs)
else:
nilled = True
if xsd_type.is_empty() and elem.text:
reason = "character data is not allowed because content is empty"
yield self.validation_error(validation, reason, elem, **kwargs)
if nilled:
pass
elif xsd_type.model_group is not None:
for assertion in xsd_type.assertions:
for error in assertion(elem, **kwargs):
yield self.validation_error(validation, error, **kwargs)
for result in content_decoder.iter_decode(elem, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self.validation_error(validation, result, elem, **kwargs)
else:
content = result
if len(content) == 1 and content[0][0] == 1:
value, content = content[0][1], None
if self.fixed is not None and \
(len(elem) > 0 or value is not None and self.fixed != value):
reason = "must have the fixed value %r." % self.fixed
yield self.validation_error(validation, reason, elem, **kwargs)
else:
if len(elem):
reason = "a simple content element can't have child elements."
yield self.validation_error(validation, reason, elem, **kwargs)
text = elem.text
if self.fixed is not None:
if text is None:
text = self.fixed
elif text == self.fixed:
pass
elif not strictly_equal(xsd_type.text_decode(text),
xsd_type.text_decode(self.fixed)):
reason = "must have the fixed value %r." % self.fixed
yield self.validation_error(validation, reason, elem, **kwargs)
elif not text and kwargs.get('use_defaults') and self.default is not None:
text = self.default
if xsd_type.is_complex():
for assertion in xsd_type.assertions:
for error in assertion(elem, value=text, **kwargs):
yield self.validation_error(validation, error, **kwargs)
if text and content_decoder.is_list():
value = text.split()
else:
value = text
elif xsd_type.is_notation():
if xsd_type.name == XSD_NOTATION_TYPE:
msg = "cannot validate against xs:NOTATION directly, " \
"only against a subtype with an enumeration facet"
yield self.validation_error(validation, msg, text, **kwargs)
elif not xsd_type.enumeration:
msg = "missing enumeration facet in xs:NOTATION subtype"
yield self.validation_error(validation, msg, text, **kwargs)
if text is None:
for result in content_decoder.iter_decode('', validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self.validation_error(validation, result, elem, **kwargs)
if 'filler' in kwargs:
value = kwargs['filler'](self)
else:
for result in content_decoder.iter_decode(text, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self.validation_error(validation, result, elem, **kwargs)
elif result is None and 'filler' in kwargs:
value = kwargs['filler'](self)
else:
value = result
if isinstance(value, Decimal):
try:
| |
= spars['calculation_type']
else: ftype = 'No Data'
dec = 'No Data'
inc = 'No Data'
mad = 'No Data'
dang = 'No Data'
a95 = 'No Data'
sk = 'No Data'
sr2 = 'No Data'
else:
if 'measurement_step_min' in list(pars.keys()): fmin = str(fit.tmin)
else: fmin = "N/A"
if 'measurement_step_max' in list(pars.keys()): fmax = str(fit.tmax)
else: fmax = "N/A"
if 'specimen_n' in list(pars.keys()): n = str(pars['specimen_n'])
else: n = "N/A"
if 'calculation_type' in list(pars.keys()): ftype = pars['calculation_type']
else: ftype = "N/A"
if 'specimen_dec' in list(pars.keys()): dec = "%.1f"%pars['specimen_dec']
else: dec = "N/A"
if 'specimen_inc' in list(pars.keys()): inc = "%.1f"%pars['specimen_inc']
else: inc = "N/A"
if 'specimen_mad' in list(pars.keys()): mad = "%.1f"%pars['specimen_mad']
else: mad = "N/A"
if 'specimen_dang' in list(pars.keys()): dang = "%.1f"%pars['specimen_dang']
else: dang = "N/A"
if 'specimen_alpha95' in list(pars.keys()): a95 = "%.1f"%pars['specimen_alpha95']
else: a95 = "N/A"
if 'specimen_k' in list(pars.keys()): sk = "%.1f"%pars['specimen_k']
else: sk = "N/A"
if 'specimen_r' in list(pars.keys()): sr2 = "%.1f"%pars['specimen_r']
else: sr2 = "N/A"
if self.search_query != "":
entry = (specimen+name+fmin+fmax+n+ftype+dec+inc+mad+dang+a95+sk+sr2).replace(" ","").lower()
if self.search_query not in entry:
self.fit_list.pop(i)
if i < self.logger.GetItemCount():
self.logger.DeleteItem(i)
return "s"
for e in (specimen,name,fmin,fmax,n,ftype,dec,inc,mad,dang,a95,sk,sr2):
if e not in self.search_choices:
self.search_choices.append(e)
if i < self.logger.GetItemCount():
self.logger.DeleteItem(i)
self.logger.InsertItem(i, str(specimen))
self.logger.SetItem(i, 1, name)
self.logger.SetItem(i, 2, fmin)
self.logger.SetItem(i, 3, fmax)
self.logger.SetItem(i, 4, n)
self.logger.SetItem(i, 5, ftype)
self.logger.SetItem(i, 6, dec)
self.logger.SetItem(i, 7, inc)
self.logger.SetItem(i, 8, mad)
self.logger.SetItem(i, 9, dang)
self.logger.SetItem(i, 10, a95)
self.logger.SetItem(i, 11, sk)
self.logger.SetItem(i, 12, sr2)
self.logger.SetItemBackgroundColour(i,"WHITE")
a,b = False,False
if fit in self.parent.bad_fits:
self.logger.SetItemBackgroundColour(i,"red")
b = True
if self.parent.current_fit == fit:
self.logger.SetItemBackgroundColour(i,"LIGHT BLUE")
self.logger_focus(i)
self.current_fit_index = i
a = True
if a and b:
self.logger.SetItemBackgroundColour(i,"red")
def update_current_fit_data(self):
"""
updates the current_fit of the parent Zeq_GUI entry in the case of it's data being changed
"""
if self.current_fit_index:
self.update_logger_entry(self.current_fit_index)
def change_selected(self,new_fit):
"""
updates passed in fit or index as current fit for the editor (does not affect parent),
if no parameters are passed in it sets first fit as current
@param: new_fit -> fit object to highlight as selected
"""
if len(self.fit_list)==0: return
if self.search_query and self.parent.current_fit not in [x[0] for x in self.fit_list]: return
if self.current_fit_index == None:
if not self.parent.current_fit: return
for i,(fit,specimen) in enumerate(self.fit_list):
if fit == self.parent.current_fit:
self.current_fit_index = i
break
i = 0
if isinstance(new_fit, Fit):
for i, (fit,speci) in enumerate(self.fit_list):
if fit == new_fit:
break
elif type(new_fit) is int:
i = new_fit
elif new_fit != None:
print(('cannot select fit of type: ' + str(type(new_fit))))
if self.current_fit_index != None and \
len(self.fit_list) > 0 and \
self.fit_list[self.current_fit_index][0] in self.parent.bad_fits:
self.logger.SetItemBackgroundColour(self.current_fit_index,"")
else:
self.logger.SetItemBackgroundColour(self.current_fit_index,"WHITE")
self.current_fit_index = i
if self.fit_list[self.current_fit_index][0] in self.parent.bad_fits:
self.logger.SetItemBackgroundColour(self.current_fit_index,"red")
else:
self.logger.SetItemBackgroundColour(self.current_fit_index,"LIGHT BLUE")
def logger_focus(self,i,focus_shift=16):
"""
focuses the logger on an index 12 entries below i
@param: i -> index to focus on
"""
if self.logger.GetItemCount()-1 > i+focus_shift:
i += focus_shift
else:
i = self.logger.GetItemCount()-1
self.logger.Focus(i)
def OnClick_listctrl(self, event):
"""
Edits the logger and the Zeq_GUI parent object to select the fit that was newly selected by a double click
@param: event -> wx.ListCtrlEvent that triggered this function
"""
i = event.GetIndex()
if self.parent.current_fit == self.fit_list[i][0]: return
self.parent.initialize_CART_rot(self.fit_list[i][1])
si = self.parent.specimens.index(self.fit_list[i][1])
self.parent.specimens_box.SetSelection(si)
self.parent.select_specimen(self.fit_list[i][1])
self.change_selected(i)
fi = 0
while (self.parent.s == self.fit_list[i][1] and i >= 0): i,fi = (i-1,fi+1)
self.parent.update_fit_box()
self.parent.fit_box.SetSelection(fi-1)
self.parent.update_selection()
def OnRightClickListctrl(self, event):
"""
Edits the logger and the Zeq_GUI parent object so that the selected interpretation is now marked as bad
@param: event -> wx.ListCtrlEvent that triggered this function
"""
i = event.GetIndex()
fit,spec = self.fit_list[i][0],self.fit_list[i][1]
if fit in self.parent.bad_fits:
if not self.parent.mark_fit_good(fit,spec=spec): return
if i == self.current_fit_index:
self.logger.SetItemBackgroundColour(i,"LIGHT BLUE")
else:
self.logger.SetItemBackgroundColour(i,"WHITE")
else:
if not self.parent.mark_fit_bad(fit): return
if i == self.current_fit_index:
self.logger.SetItemBackgroundColour(i,"red")
else:
self.logger.SetItemBackgroundColour(i,"red")
self.parent.calculate_high_levels_data()
self.parent.plot_high_levels_data()
self.logger_focus(i)
##################################Search Bar Functions###############################
def on_enter_search_bar(self,event):
self.search_query = self.search_bar.GetValue().replace(" ","").lower()
self.update_editor()
# def on_complete_search_bar(self,event):
# self.search_bar.AutoComplete(self.search_choices)
###################################ComboBox Functions################################
def update_bounds_boxes(self,B_list):
self.tmin_box.SetItems(B_list)
self.tmax_box.SetItems(B_list)
def add_new_color(self,event):
new_color = self.color_box.GetValue()
if ':' in new_color:
color_list = new_color.split(':')
color_name = color_list[0]
if len(color_list[1])==7 and color_list[1].startswith('#'):
for c in color_list[1][1:]:
if ord(c) < 48 or ord(c) > 70:
self.parent.user_warning('invalid hex color must be of form #0F0F0F');return
color_val = color_list[1]
elif '(' in color_list[1] and ')' in color_list[1]:
color_val = list(map(eval, tuple(color_list[1].strip('( )').split(','))))
for val in color_val:
if val > 1 or val < 0: self.parent.user_warning("invalid RGB sequence"); return
else: self.parent.user_warning("colors must be given as a valid hex color or rgb tuple"); return
else:
self.parent.user_warning("New colors must be passed in as $colorname:$colorval where $colorval is a valid hex color or rgb tuple"); return
self.color_dict[color_name] = color_val
#clear old box
self.color_box.Clear()
#update fit box
self.color_box.SetItems([''] + sorted(self.color_dict.keys()))
def on_select_coordinates(self,event):
self.parent.coordinates_box.SetStringSelection(self.coordinates_box.GetStringSelection())
self.parent.onSelect_coordinates(event)
def on_select_show_box(self,event):
"""
Changes the type of mean shown on the high levels mean plot so that single dots represent one of whatever the value of this box is.
@param: event -> the wx.COMBOBOXEVENT that triggered this function
"""
self.parent.UPPER_LEVEL_SHOW=self.show_box.GetValue()
self.parent.calculate_high_levels_data()
self.parent.plot_high_levels_data()
def on_select_high_level(self,event,called_by_parent=False):
"""
alters the possible entries in level_names combobox to give the user selections for which specimen interpretations to display in the logger
@param: event -> the wx.COMBOBOXEVENT that triggered this function
"""
UPPER_LEVEL=self.level_box.GetValue()
if UPPER_LEVEL=='sample':
self.level_names.SetItems(self.parent.samples)
self.level_names.SetStringSelection(self.parent.Data_hierarchy['sample_of_specimen'][self.parent.s])
if UPPER_LEVEL=='site':
self.level_names.SetItems(self.parent.sites)
self.level_names.SetStringSelection(self.parent.Data_hierarchy['site_of_specimen'][self.parent.s])
if UPPER_LEVEL=='location':
self.level_names.SetItems(self.parent.locations)
self.level_names.SetStringSelection(self.parent.Data_hierarchy['location_of_specimen'][self.parent.s])
if UPPER_LEVEL=='study':
self.level_names.SetItems(['this study'])
self.level_names.SetStringSelection('this study')
if not called_by_parent:
self.parent.level_box.SetStringSelection(UPPER_LEVEL)
self.parent.onSelect_high_level(event,True)
self.on_select_level_name(event)
def on_select_level_name(self,event,called_by_parent=False):
"""
change this objects specimens_list to control which specimen interpretatoins are displayed in this objects logger
@param: event -> the wx.ComboBoxEvent that triggered this function
"""
high_level_name=str(self.level_names.GetValue())
if self.level_box.GetValue()=='sample':
self.specimens_list=self.parent.Data_hierarchy['samples'][high_level_name]['specimens']
elif self.level_box.GetValue()=='site':
self.specimens_list=self.parent.Data_hierarchy['sites'][high_level_name]['specimens']
elif self.level_box.GetValue()=='location':
self.specimens_list=self.parent.Data_hierarchy['locations'][high_level_name]['specimens']
elif self.level_box.GetValue()=='study':
self.specimens_list=self.parent.Data_hierarchy['study']['this study']['specimens']
if not called_by_parent:
self.parent.level_names.SetStringSelection(high_level_name)
self.parent.onSelect_level_name(event,True)
self.specimens_list.sort(key=spec_key_func)
self.update_editor()
def on_select_mean_type_box(self, event):
"""
set parent Zeq_GUI to reflect change in this box and change the
@param: event -> the wx.ComboBoxEvent that triggered this function
"""
new_mean_type = self.mean_type_box.GetValue()
if new_mean_type == "None":
self.parent.clear_high_level_pars()
self.parent.mean_type_box.SetStringSelection(new_mean_type)
self.parent.onSelect_mean_type_box(event)
def on_select_mean_fit_box(self, event):
"""
set parent Zeq_GUI to reflect the change in this box then replot the high level means plot
@param: event -> the wx.COMBOBOXEVENT that triggered this function
"""
new_mean_fit = self.mean_fit_box.GetValue()
self.parent.mean_fit_box.SetStringSelection(new_mean_fit)
self.parent.onSelect_mean_fit_box(event)
###################################Button Functions##################################
def on_select_stats_button(self,event):
"""
"""
i = self.switch_stats_button.GetValue()
self.parent.switch_stats_button.SetValue(i)
self.parent.update_high_level_stats()
def add_highlighted_fits(self, evnet):
"""
adds a new interpretation to each specimen highlighted in logger if multiple interpretations are highlighted of the same specimen only one new interpretation is added
@param: event -> the wx.ButtonEvent that triggered this function
"""
specimens = []
next_i = self.logger.GetNextSelected(-1)
if next_i == -1: return
while next_i != -1:
fit,specimen = self.fit_list[next_i]
if specimen in specimens:
next_i = self.logger.GetNextSelected(next_i)
continue
else: specimens.append(specimen)
next_i = self.logger.GetNextSelected(next_i)
for specimen in specimens:
self.add_fit_to_specimen(specimen)
self.update_editor()
self.parent.update_selection()
def add_fit_to_all(self,event):
for specimen in self.parent.specimens:
self.add_fit_to_specimen(specimen)
self.update_editor()
self.parent.update_selection()
def add_fit_to_specimen(self,specimen):
if specimen not in self.parent.pmag_results_data['specimens']:
self.parent.pmag_results_data['specimens'][specimen] = []
new_name = self.name_box.GetLineText(0)
new_color = self.color_box.GetValue()
new_tmin = self.tmin_box.GetValue()
new_tmax = self.tmax_box.GetValue()
if not new_name:
next_fit = str(len(self.parent.pmag_results_data['specimens'][specimen]) + 1)
while ("Fit " + next_fit) in [x.name for x in self.parent.pmag_results_data['specimens'][specimen]]:
next_fit = str(int(next_fit) + 1)
new_name = ("Fit " + next_fit)
if not new_color:
next_fit = str(len(self.parent.pmag_results_data['specimens'][specimen]) + 1)
new_color = self.parent.colors[(int(next_fit)-1) % len(self.parent.colors)]
else: new_color = self.color_dict[new_color]
if not new_tmin: new_tmin = None
if not new_tmax: new_tmax = None
if new_name in [x.name for x in self.parent.pmag_results_data['specimens'][specimen]]:
print(('-E- interpretation called ' + new_name + ' already exsists for specimen ' + specimen))
return
self.parent.add_fit(specimen, new_name, new_tmin, new_tmax, color=new_color,suppress_warnings=True)
def delete_highlighted_fits(self, event):
"""
iterates through all highlighted fits in the logger of this object and removes them from the logger and the Zeq_GUI parent object
@param: event -> the wx.ButtonEvent that triggered this function
"""
next_i = -1
deleted_items = []
while True:
next_i = self.logger.GetNextSelected(next_i)
if next_i == -1:
break
deleted_items.append(next_i)
deleted_items.sort(reverse=True)
for item in deleted_items:
self.delete_entry(index=item)
self.parent.update_selection()
def delete_entry(self, fit = None, index = None):
"""
deletes the single item from the logger of this object that corrisponds to either the passed in fit or index. Note this function mutaits the logger of this object if deleting more than one entry be sure to pass items to delete in from highest index to lowest or else odd things | |
pairs.
Additionally, the entire unparsed log event is returned within ``@message`` .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetLogRecord>`_
**Request Syntax**
::
response = client.get_log_record(
logRecordPointer='string'
)
**Response Syntax**
::
{
'logRecord': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --*
- **logRecord** *(dict) --*
The requested log event, as a JSON string.
- *(string) --*
- *(string) --*
:type logRecordPointer: string
:param logRecordPointer: **[REQUIRED]**
The pointer corresponding to the log event record you want to retrieve. You get this from the response of a ``GetQueryResults`` operation. In that response, the value of the ``@ptr`` field for a log event is the value to use as ``logRecordPointer`` to retrieve that complete log event record.
:rtype: dict
:returns:
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_query_results(self, queryId: str) -> Dict:
"""
Returns the results from the specified query. If the query is in progress, partial results of that current execution are returned. Only the fields requested in the query are returned.
``GetQueryResults`` does not start a query execution. To run a query, use .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/GetQueryResults>`_
**Request Syntax**
::
response = client.get_query_results(
queryId='string'
)
**Response Syntax**
::
{
'results': [
[
{
'field': 'string',
'value': 'string'
},
],
],
'statistics': {
'recordsMatched': 123.0,
'recordsScanned': 123.0,
'bytesScanned': 123.0
},
'status': 'Scheduled'|'Running'|'Complete'|'Failed'|'Cancelled'
}
**Response Structure**
- *(dict) --*
- **results** *(list) --*
The log events that matched the query criteria during the most recent time it ran.
The ``results`` value is an array of arrays. Each log event is one object in the top-level array. Each of these log event objects is an array of ``field`` /``value`` pairs.
- *(list) --*
- *(dict) --*
Contains one field from one log event returned by a CloudWatch Logs Insights query, along with the value of that field.
- **field** *(string) --*
The log event field.
- **value** *(string) --*
The value of this field.
- **statistics** *(dict) --*
Includes the number of log events scanned by the query, the number of log events that matched the query criteria, and the total number of bytes in the log events that were scanned.
- **recordsMatched** *(float) --*
The number of log events that matched the query string.
- **recordsScanned** *(float) --*
The total number of log events scanned during the query.
- **bytesScanned** *(float) --*
The total number of bytes in the log events scanned during the query.
- **status** *(string) --*
The status of the most recent running of the query. Possible values are ``Cancelled`` , ``Complete`` , ``Failed`` , ``Running`` , ``Scheduled`` , and ``Unknown`` .
:type queryId: string
:param queryId: **[REQUIRED]**
The ID number of the query.
:rtype: dict
:returns:
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def list_tags_log_group(self, logGroupName: str) -> Dict:
"""
Lists the tags for the specified log group.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/ListTagsLogGroup>`_
**Request Syntax**
::
response = client.list_tags_log_group(
logGroupName='string'
)
**Response Syntax**
::
{
'tags': {
'string': 'string'
}
}
**Response Structure**
- *(dict) --*
- **tags** *(dict) --*
The tags for the log group.
- *(string) --*
- *(string) --*
:type logGroupName: string
:param logGroupName: **[REQUIRED]**
The name of the log group.
:rtype: dict
:returns:
"""
pass
def put_destination(self, destinationName: str, targetArn: str, roleArn: str) -> Dict:
"""
Creates or updates a destination. A destination encapsulates a physical resource (such as an Amazon Kinesis stream) and enables you to subscribe to a real-time stream of log events for a different account, ingested using PutLogEvents . Currently, the only supported physical resource is a Kinesis stream belonging to the same account as the destination.
Through an access policy, a destination controls what is written to its Kinesis stream. By default, ``PutDestination`` does not set any access policy with the destination, which means a cross-account user cannot call PutSubscriptionFilter against this destination. To enable this, the destination owner must call PutDestinationPolicy after ``PutDestination`` .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutDestination>`_
**Request Syntax**
::
response = client.put_destination(
destinationName='string',
targetArn='string',
roleArn='string'
)
**Response Syntax**
::
{
'destination': {
'destinationName': 'string',
'targetArn': 'string',
'roleArn': 'string',
'accessPolicy': 'string',
'arn': 'string',
'creationTime': 123
}
}
**Response Structure**
- *(dict) --*
- **destination** *(dict) --*
The destination.
- **destinationName** *(string) --*
The name of the destination.
- **targetArn** *(string) --*
The Amazon Resource Name (ARN) of the physical target to where the log events are delivered (for example, a Kinesis stream).
- **roleArn** *(string) --*
A role for impersonation, used when delivering log events to the target.
- **accessPolicy** *(string) --*
An IAM policy document that governs which AWS accounts can create subscription filters against this destination.
- **arn** *(string) --*
The ARN of this destination.
- **creationTime** *(integer) --*
The creation time of the destination, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
:type destinationName: string
:param destinationName: **[REQUIRED]**
A name for the destination.
:type targetArn: string
:param targetArn: **[REQUIRED]**
The ARN of an Amazon Kinesis stream to which to deliver matching log events.
:type roleArn: string
:param roleArn: **[REQUIRED]**
The ARN of an IAM role that grants CloudWatch Logs permissions to call the Amazon Kinesis PutRecord operation on the destination stream.
:rtype: dict
:returns:
"""
pass
def put_destination_policy(self, destinationName: str, accessPolicy: str):
"""
Creates or updates an access policy associated with an existing destination. An access policy is an `IAM policy document <https://docs.aws.amazon.com/IAM/latest/UserGuide/policies_overview.html>`__ that is used to authorize claims to register a subscription filter against a given destination.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutDestinationPolicy>`_
**Request Syntax**
::
response = client.put_destination_policy(
destinationName='string',
accessPolicy='string'
)
:type destinationName: string
:param destinationName: **[REQUIRED]**
A name for an existing destination.
:type accessPolicy: string
:param accessPolicy: **[REQUIRED]**
An IAM policy document that authorizes cross-account users to deliver their log events to the associated destination.
:returns: None
"""
pass
def put_log_events(self, logGroupName: str, logStreamName: str, logEvents: List, sequenceToken: str = None) -> Dict:
"""
Uploads a batch of log events to the specified log stream.
You must include the sequence token obtained from the response of the previous call. An upload in a newly created log stream does not require a sequence token. You can also get the sequence token using DescribeLogStreams . If you call ``PutLogEvents`` twice within a narrow time period using the same value for ``sequenceToken`` , both calls may be successful, or one may be rejected.
The batch of events must satisfy the following constraints:
* The maximum batch size is 1,048,576 bytes, and this size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event.
* None of the log events in the batch can be more than 2 hours in the future.
* None of the log events | |
self.get_transform(frames[0], frames[1])
self.bounding_box = CompoundBoundingBox.validate(transform_0, cbbox, selector_args=selector_args,
order='F')
else:
raise NotImplementedError('Compound bounding box is not supported for your version of astropy')
def _get_axes_indices(self):
try:
axes_ind = np.argsort(self.input_frame.axes_order)
except AttributeError:
# the case of a frame being a string
axes_ind = np.arange(self.forward_transform.n_inputs)
return axes_ind
def __str__(self):
from astropy.table import Table
#col1 = [item[0] for item in self._pipeline]
col1 = [step.frame for step in self._pipeline]
col2 = []
for item in self._pipeline[: -1]:
#model = item[1]
model = item.transform
if model.name is not None:
col2.append(model.name)
else:
col2.append(model.__class__.__name__)
col2.append(None)
t = Table([col1, col2], names=['From', 'Transform'])
return str(t)
def __repr__(self):
fmt = "<WCS(output_frame={0}, input_frame={1}, forward_transform={2})>".format(
self.output_frame, self.input_frame, self.forward_transform)
return fmt
def footprint(self, bounding_box=None, center=False, axis_type="all"):
"""
Return the footprint in world coordinates.
Parameters
----------
bounding_box : tuple of floats: (start, stop)
`prop: bounding_box`
center : bool
If `True` use the center of the pixel, otherwise use the corner.
axis_type : str
A supported ``output_frame.axes_type`` or "all" (default).
One of ['spatial', 'spectral', 'temporal'] or a custom type.
Returns
-------
coord : ndarray
Array of coordinates in the output_frame mapping
corners to the output frame. For spatial coordinates the order
is clockwise, starting from the bottom left corner.
"""
def _order_clockwise(v):
return np.asarray([[v[0][0], v[1][0]], [v[0][0], v[1][1]],
[v[0][1], v[1][1]], [v[0][1], v[1][0]]]).T
if bounding_box is None:
if self.bounding_box is None:
raise TypeError("Need a valid bounding_box to compute the footprint.")
bb = self.bounding_box
else:
bb = bounding_box
all_spatial = all([t.lower() == "spatial" for t in self.output_frame.axes_type])
if all_spatial:
vertices = _order_clockwise(bb)
else:
vertices = np.array(list(itertools.product(*bb))).T
if center:
vertices = utils._toindex(vertices)
result = np.asarray(self.__call__(*vertices, **{'with_bounding_box': False}))
axis_type = axis_type.lower()
if axis_type == 'spatial' and all_spatial:
return result.T
if axis_type != "all":
axtyp_ind = np.array([t.lower() for t in self.output_frame.axes_type]) == axis_type
if not axtyp_ind.any():
raise ValueError('This WCS does not have axis of type "{}".'.format(axis_type))
result = np.asarray([(r.min(), r.max()) for r in result[axtyp_ind]])
if axis_type == "spatial":
result = _order_clockwise(result)
else:
result.sort()
result = np.squeeze(result)
return result.T
def fix_inputs(self, fixed):
"""
Return a new unique WCS by fixing inputs to constant values.
Parameters
----------
fixed : dict
Keyword arguments with fixed values corresponding to `self.selector`.
Returns
-------
new_wcs : `WCS`
A new unique WCS corresponding to the values in `fixed`.
Examples
--------
>>> w = WCS(pipeline, selector={"spectral_order": [1, 2]}) # doctest: +SKIP
>>> new_wcs = w.set_inputs(spectral_order=2) # doctest: +SKIP
>>> new_wcs.inputs # doctest: +SKIP
("x", "y")
"""
new_pipeline = []
step0 = self.pipeline[0]
new_transform = fix_inputs(step0[1], fixed)
new_pipeline.append((step0[0], new_transform))
new_pipeline.extend(self.pipeline[1:])
return self.__class__(new_pipeline)
def to_fits_sip(self, bounding_box=None, max_pix_error=0.25, degree=None,
max_inv_pix_error=0.25, inv_degree=None,
npoints=32, crpix=None, projection='TAN',
verbose=False):
"""
Construct a SIP-based approximation to the WCS for the axes
corresponding to the `~gwcs.coordinate_frames.CelestialFrame`
in the form of a FITS header.
The default mode in using this attempts to achieve roughly 0.25 pixel
accuracy over the whole image.
Parameters
----------
bounding_box : tuple, optional
A pair of tuples, each consisting of two numbers
Represents the range of pixel values in both dimensions
((xmin, xmax), (ymin, ymax))
max_pix_error : float, optional
Maximum allowed error over the domain of the pixel array. This
error is the equivalent pixel error that corresponds to the maximum
error in the output coordinate resulting from the fit based on
a nominal plate scale.
degree : int, iterable, None, optional
Degree of the SIP polynomial. Default value `None` indicates that
all allowed degree values (``[1...9]``) will be considered and
the lowest degree that meets accuracy requerements set by
``max_pix_error`` will be returned. Alternatively, ``degree`` can be
an iterable containing allowed values for the SIP polynomial degree.
This option is similar to default `None` but it allows caller to
restrict the range of allowed SIP degrees used for fitting.
Finally, ``degree`` can be an integer indicating the exact SIP degree
to be fit to the WCS transformation. In this case
``max_pixel_error`` is ignored.
max_inv_pix_error : float, optional
Maximum allowed inverse error over the domain of the pixel array
in pixel units. If None, no inverse is generated.
inv_degree : int, iterable, None, optional
Degree of the SIP polynomial. Default value `None` indicates that
all allowed degree values (``[1...9]``) will be considered and
the lowest degree that meets accuracy requerements set by
``max_pix_error`` will be returned. Alternatively, ``degree`` can be
an iterable containing allowed values for the SIP polynomial degree.
This option is similar to default `None` but it allows caller to
restrict the range of allowed SIP degrees used for fitting.
Finally, ``degree`` can be an integer indicating the exact SIP degree
to be fit to the WCS transformation. In this case
``max_inv_pixel_error`` is ignored.
npoints : int, optional
The number of points in each dimension to sample the bounding box
for use in the SIP fit. Minimum number of points is 3.
crpix : list of float, None, optional
Coordinates (1-based) of the reference point for the new FITS WCS.
When not provided, i.e., when set to `None` (default) the reference
pixel will be chosen near the center of the bounding box for axes
corresponding to the celestial frame.
projection : str, `~astropy.modeling.projections.Pix2SkyProjection`, optional
Projection to be used for the created FITS WCS. It can be specified
as a string of three characters specifying a FITS projection code
from Table 13 in
`Representations of World Coordinates in FITS \
<https://doi.org/10.1051/0004-6361:20021326>`_
(Paper I), <NAME>., and <NAME>., A & A, 395,
1061-1075, 2002. Alternatively, it can be an instance of one of the
`astropy's Pix2Sky_* <https://docs.astropy.org/en/stable/modeling/\
reference_api.html#module-astropy.modeling.projections>`_
projection models inherited from
:py:class:`~astropy.modeling.projections.Pix2SkyProjection`.
verbose : bool, optional
Print progress of fits.
Returns
-------
FITS header with all SIP WCS keywords
Raises
------
ValueError
If the WCS is not at least 2D, an exception will be raised. If the
specified accuracy (both forward and inverse, both rms and maximum)
is not achieved an exception will be raised.
Notes
-----
Use of this requires a judicious choice of required accuracies.
Attempts to use higher degrees (~7 or higher) will typically fail due
to floating point problems that arise with high powers.
"""
_, _, celestial_group = self._separable_groups(detect_celestial=True)
if celestial_group is None:
raise ValueError("The to_fits_sip requires an output celestial frame.")
hdr = self._to_fits_sip(
celestial_group=celestial_group,
keep_axis_position=False,
bounding_box=bounding_box,
max_pix_error=max_pix_error,
degree=degree,
max_inv_pix_error=max_inv_pix_error,
inv_degree=inv_degree,
npoints=npoints,
crpix=crpix,
projection=projection,
matrix_type='CD',
verbose=verbose
)
return hdr
def _to_fits_sip(self, celestial_group, keep_axis_position,
bounding_box, max_pix_error, degree,
max_inv_pix_error, inv_degree,
npoints, crpix, projection, matrix_type,
verbose):
r"""
Construct a SIP-based approximation to the WCS for the axes
corresponding to the `~gwcs.coordinate_frames.CelestialFrame`
in the form of a FITS header.
The default mode in using this attempts to achieve roughly 0.25 pixel
accuracy over the whole image.
Below we describe only parameters additional to the ones explained for
`to_fits_sip`.
Other Parameters
----------------
frame : gwcs.coordinate_frames.CelestialFrame
A celestial frame.
celestial_group : list of ``_WorldAxisInfo``
A group of two celestial axes to be represented using standard
image FITS WCS and maybe ``-SIP`` polynomials.
keep_axis_position : bool
This parameter controls whether to keep/preserve output axes
indices in this WCS object when creating FITS WCS and create a FITS
header with ``CTYPE`` axes indices preserved from the ``frame``
object or whether to reset the indices of output celestial axes
to 1 and 2 with ``CTYPE1``, ``CTYPE2``. Default is `False`.
.. warning::
Returned header will have both ``NAXIS`` and ``WCSAXES`` set
to 2. If ``max(axes_mapping) > 2`` this will lead to an invalid
WCS. It is caller's responsibility to adjust NAXIS to a valid
value.
.. note::
The ``lon``/``lat`` order is still preserved regardless of this
setting.
matrix_type : {'CD', 'PC-CDELT1', 'PC-SUM1', 'PC-DET1', 'PC-SCALE'}
Specifies formalism (``PC`` or ``CD``) to be used for the linear
transformation matrix and normalization for the ``PC`` matrix
*when non-linear polynomial terms are not required to achieve
requested accuracy*.
.. note:: ``CD`` matrix is always used when requested SIP
approximation accuracy requires non-linear terms (when
``CTYPE`` ends in ``-SIP``). This parameter is ignored when
non-linear polynomial terms are used.
- ``'CD'``: use ``CD`` matrix;
- ``'PC-CDELT1'``: set | |
in
keys = sorted(row.Events)
arr, basisLength = self._trimGroups(ind, row, keys, station)
if basisLength == 0:
msg = (('subspace %d on %s is failing alignment and '
'trimming, deleting it') % (ind, station))
detex.log(__name__, msg, level='warn')
self._drop_subspace(station, ind)
continue
if normalize:
arr = np.array([x / np.linalg.norm(x) for x in arr])
tparr = np.transpose(arr)
# perform SVD
U, s, Vh = scipy.linalg.svd(tparr, full_matrices=False)
# make dict with sing. value as key and sing. vector as value
for einum, eival in enumerate(s):
svdDict[eival] = U[:, einum]
# asign Parameters back to subspace dataframes
self.subspaces[station].SVD[ind] = svdDict # assign SVD
fracEnergy = self._getFracEnergy(ind, row, svdDict, U)
usedBasis = self._getUsedBasis(ind, row, svdDict, fracEnergy,
selectCriteria, selectValue)
# Add fracEnergy and SVD keys (sing. vals) to main DataFrames
self.subspaces[station].FracEnergy[ind] = fracEnergy
self.subspaces[station].UsedSVDKeys[ind] = usedBasis
self.subspaces[station].SVDdefined[ind] = True
numBas = len(self.subspaces[station].UsedSVDKeys[ind])
self.subspaces[station].NumBasis[ind] = numBas
if len(self.ssStations) > 0:
self._setThresholds(selectCriteria, selectValue, conDatNum,
threshold, basisLength, backupThreshold, kwargs)
if len(self.singStations) > 0 and useSingles:
self.setSinglesThresholds(conDatNum=conDatNum, threshold=threshold,
backupThreshold=backupThreshold,
kwargs=kwargs)
def _drop_subspace(self, station, ssnum):
"""
Drop a subspace that is misbehaving
"""
space = self.subspaces[station]
self.subspaces[station] = space[space.index != int(ssnum)]
def _trimGroups(self, ind, row, keys, station):
"""
function to get trimed subspaces if trim times are defined, and
return an array of the aligned waveforms for the SVD to act on
"""
stkeys = row.SampleTrims.keys()
aliTD = row.AlignedTD
if 'Starttime' in stkeys and 'Endtime' in stkeys:
stim = row.SampleTrims['Starttime']
etim = row.SampleTrims['Endtime']
if stim < 0: # make sure stim is not less than 0
stim = 0
Arr = np.vstack([aliTD[x][stim:etim] -
np.mean(aliTD[x][stim:etim]) for x in keys])
basisLength = Arr.shape[1]
else:
msg = ('No trim times for %s and station %s, try running '
'pickTimes or attachPickTimes' % (row.Name, station))
detex.log(__name__, msg, level='warn', pri=True)
Arr = np.vstack([aliTD[x] - np.mean(aliTD[x]) for x in keys])
basisLength = Arr.shape[1]
return Arr, basisLength
def _checkSelection(self, selectCriteria, selectValue, threshold):
"""
Make sure all user defined values are kosher for SVD call
"""
if selectCriteria in [1, 2, 3]:
if selectValue > 1 or selectValue < 0:
msg = ('When selectCriteria==%d selectValue must be a float'
' between 0 and 1' % selectCriteria)
detex.log(__name__, msg, level='error', e=ValueError)
elif selectCriteria == 4:
if selectValue < 0 or not isinstance(selectValue, int):
msg = ('When selectCriteria==3 selectValue must be an'
'integer greater than 0')
detex.log(__name__, msg, level='error', e=ValueError)
else:
msg = 'selectCriteria of %s is not supported' % selectCriteria
detex.log(__name__, msg, level='error')
if threshold is not None:
if not isinstance(threshold, numbers.Number) or threshold < 0:
msg = 'Unsupported type for threshold, must be None or float'
detex.log(__name__, msg, level='error', e=ValueError)
def _getFracEnergy(self, ind, row, svdDict, U):
"""
calculates the % energy capture for each stubspace for each possible
dimension of rep. (up to # of events that go into the subspace)
"""
fracDict = {}
keys = row.Events
svales = svdDict.keys()
svales.sort(reverse=True)
stkeys = row.SampleTrims.keys() # dict defining sample trims
for key in keys:
aliTD = row.AlignedTD[key] # aligned waveform for event key
if 'Starttime' in stkeys and 'Endtime' in stkeys:
start = row.SampleTrims['Starttime'] # start of trim in samps
end = row.SampleTrims['Endtime'] # end of trim in samps
aliwf = aliTD[start: end]
else:
aliwf = aliTD
Ut = np.transpose(U) # transpose of basis vects
# normalized dot product (mat. mult.)
normUtAliwf = scipy.dot(Ut, aliwf) / scipy.linalg.norm(aliwf)
# add 0% energy capture for dim of 0
repvect = np.insert(np.square(normUtAliwf), 0, 0)
# cumul. energy captured for increasing dim. reps
cumrepvect = [np.sum(repvect[:x + 1]) for x in range(len(repvect))]
fracDict[key] = cumrepvect # add cumul. to keys
# get average and min energy capture, append value to dict
fracDict['Average'] = np.average([fracDict[x] for x in keys], axis=0)
fracDict['Minimum'] = np.min([fracDict[x] for x in keys], axis=0)
return (fracDict)
def _getUsedBasis(self, ind, row, svdDict, cumFracEnergy,
selectCriteria, selectValue):
"""
function to populate the keys of the selected SVD basis vectors
"""
keys = svdDict.keys()
keys.sort(reverse=True)
if selectCriteria in [1, 2, 3]:
# make sure last element is exactly 1
cumFracEnergy['Average'][-1] = 1.00
ndim = np.argmax(cumFracEnergy['Average'] >= selectValue)
selKeys = keys[:ndim] # selected keys
if selectCriteria == 4:
selKeys = keys[:selectValue + 1]
return selKeys
def _setThresholds(self, selectCriteria, selectValue, conDatNum,
threshold, basisLength, backupThreshold, kwargs={}):
if threshold > 0:
for station in self.ssStations:
subspa = self.subspaces[station]
for ind, row in subspa.iterrows():
self.subspaces[station].Threshold[ind] = threshold
elif selectCriteria == 1:
msg = 'selectCriteria 1 currently not supported'
detex.log(__name__, msg, level='error', e=ValueError)
elif selectCriteria in [2, 4]:
# call getFAS to estimate null space dist.
self.getFAS(conDatNum, **kwargs)
for station in self.ssStations:
subspa = self.subspaces[station]
for ind, row in subspa.iterrows():
beta_a, beta_b = row.FAS['betadist'][0:2]
# get threshold from beta dist.
# TODO consider implementing other dist. options as well
th = scipy.stats.beta.isf(self.Pf, beta_a, beta_b, 0, 1)
if th > .9:
th, Pftemp = self._approxThld(beta_a, beta_b, station,
row, self.Pf, 1000, 3,
backupThreshold)
msg = ('Scipy.stats.beta.isf failed with pf=%e, '
'approximated threshold to %f with a Pf of %e '
'for station %s %s using forward grid search' %
(self.Pf, th, Pftemp, station, row.Name))
detex.log(__name__, msg, level='warning')
self.subspaces[station].Threshold[ind] = th
elif selectCriteria == 3:
for station in self.ssStations:
subspa = self.subspaces[station]
for ind, row in subspa.iterrows():
th = row.FracEnergy['Minimum'][row.NumBasis] * selectValue
self.subspaces[station].Threshold[ind] = th
def setSinglesThresholds(self, conDatNum=50, recalc=False,
threshold=None, backupThreshold=None, **kwargs):
"""
Set thresholds for the singletons (unclustered events) by fitting
a beta distribution to estimation of null space
Parameters
----------
condatNum : int
The number of continuous data chunks to use to fit PDF
recalc : boolean
If true recalculate the the False Alarm Statistics
threshold : None or float between 0 and 1
If number, don't call getFAS simply use given threshold
backupThreshold : None or float
If approximate a threshold fails then use backupThreshold. If None
then raise.
Note
----------
Any singles without pick times will not be used. In this way singles
can be rejected
"""
for sta in self.singStations:
sing = self.singles[sta] # singles on station
sampTrims = self.singles[sta].SampleTrims
self.singles[sta].Name = ['SG%d' % x for x in range(len(sing))]
# get singles that have phase picks
singsAccepted = sing[[len(x.keys()) > 0 for x in sampTrims]]
self.singles[sta] = singsAccepted
self.singles[sta].reset_index(inplace=True, drop=True)
if threshold is None:
# get empirical dist unless manual threshold is passed
self.getFAS(conDatNum, useSingles=True,
useSubSpaces=False, **kwargs)
for sta in self.singStations:
for ind, row in self.singles[sta].iterrows():
if len(row.SampleTrims.keys()) < 1: # skip singles with no pick times
continue
if threshold:
th = threshold
else:
beta_a, beta_b = row.FAS[0]['betadist'][0:2]
th = scipy.stats.beta.isf(self.Pf, beta_a, beta_b, 0, 1)
if th > .9:
th, Pftemp = self._approxThld(beta_a, beta_b, sta,
row, self.Pf, 1000, 3,
backupThreshold)
msg = ('Scipy.stats.beta.isf failed with pf=%e, '
'approximated threshold to %f with a Pf of %e '
'for station %s %s using forward grid search' %
(self.Pf, th, Pftemp, sta, row.Name))
detex.log(__name__, msg, level='warning')
self.singles[sta]['Threshold'][ind] = th
def _approxThld(self, beta_a, beta_b, sta, row, target, numint, numloops,
backupThreshold):
"""
Because scipy.stats.beta.isf can break, if it returns a value near 1
when this is obviously wrong initialize grid search algorithm to get
close to desired threshold using forward problem which seems to work
where inverse fails See this bug report:
https://github.com/scipy/scipy/issues/4677
"""
startVal, stopVal = 0, 1
loops = 0
while loops < numloops:
Xs = np.linspace(startVal, stopVal, numint)
pfs = np.array([scipy.stats.beta.sf(x, beta_a, beta_b) for x in Xs])
resids = abs(pfs - target)
minind = resids.argmin()
if minind == 0 or minind == numint - 1:
msg1 = (('Grid search for threshold failing for %s on %s, '
'set it manually or use default') % (sta, row.name))
msg2 = (('Grid search for threshold failing for %s on %s, '
'using backup %.2f') % (sta, row.name, backupThreshold))
if backupThreshold is None:
detex.log(__name__, msg1, level='error', e=ValueError)
else:
detex.log(__name__, msg2, level='warn', pri=True)
return backupThreshold, target
bestPf = pfs[minind]
bestX = Xs[minind]
startVal, stopVal = Xs[minind - 1], Xs[minind + 1]
loops += 1
return | |
ballotpedia_district_id_list:
if one_ballotpedia_district_id not in merged_district_list:
# Build up a list of ballotpedia districts that we need to retrieve races for
merged_district_list.append(one_ballotpedia_district_id)
if success:
polling_locations_with_data += 1
else:
polling_locations_without_data += 1
messages.add_message(request, messages.INFO,
'Electoral data retrieved from Ballotpedia. '
'polling_locations_with_data: {polling_locations_with_data}, '
'polling_locations_without_data: {polling_locations_without_data}. '
''.format(
polling_locations_with_data=polling_locations_with_data,
polling_locations_without_data=polling_locations_without_data))
return HttpResponseRedirect(reverse('electoral_district:electoral_district_list', args=()) +
'?state_code=' + str(state_code) +
'&google_civic_election_id=' + str(google_civic_election_id))
@login_required
def retrieve_ballotpedia_candidates_by_district_from_api_view(request):
"""
Reach out to Ballotpedia API to retrieve candidates.
"""
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
only_retrieve_if_zero_candidates = request.GET.get('only_retrieve_if_zero_candidates', False)
state_code = request.GET.get('state_code', "")
election_manager = ElectionManager()
election_local_id = 0
is_national_election = False
results = election_manager.retrieve_election(google_civic_election_id)
if results['election_found']:
election = results['election']
election_local_id = election.id
is_national_election = election.is_national_election
if positive_value_exists(is_national_election) and not positive_value_exists(state_code):
messages.add_message(request, messages.ERROR,
'For National elections, a State Code is required in order to run any '
'Ballotpedia data preparation.')
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
results = retrieve_ballotpedia_candidates_by_district_from_api(google_civic_election_id, state_code,
only_retrieve_if_zero_candidates)
kind_of_batch = ""
if 'kind_of_batch' in results:
kind_of_batch = results['kind_of_batch']
if not positive_value_exists(kind_of_batch):
kind_of_batch = CANDIDATE
batch_header_id = 0
if 'batch_saved' in results and results['batch_saved']:
messages.add_message(request, messages.INFO, 'Import batch for {google_civic_election_id} election saved.'
''.format(google_civic_election_id=google_civic_election_id))
batch_header_id = results['batch_header_id']
elif 'batch_header_id' in results and results['batch_header_id']:
messages.add_message(request, messages.INFO, 'Import batch for {google_civic_election_id} election saved, '
'batch_header_id.'
''.format(google_civic_election_id=google_civic_election_id))
batch_header_id = results['batch_header_id']
else:
messages.add_message(request, messages.ERROR, results['status'])
if positive_value_exists(batch_header_id):
# Go straight to the new batch
return HttpResponseRedirect(reverse('import_export_batches:batch_action_list', args=()) +
"?batch_header_id=" + str(batch_header_id) +
"&kind_of_batch=" + str(kind_of_batch) +
"&google_civic_election_id=" + str(google_civic_election_id))
else:
# Go to the office listing page
return HttpResponseRedirect(reverse('office:office_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
@login_required
def retrieve_ballotpedia_data_for_polling_locations_view(request, election_local_id=0):
"""
Reach out to Ballotpedia and retrieve (for one election):
1) Polling locations (so we can use those addresses to retrieve a representative set of ballots)
2) Cycle through a portion of those polling locations, enough that we are caching all of the possible ballot items
:param request:
:param election_local_id:
:return:
"""
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
force_district_retrieve_from_ballotpedia = request.GET.get('force_district_retrieve_from_ballotpedia', False)
state_code = request.GET.get('state_code', '')
retrieve_races = positive_value_exists(request.GET.get('retrieve_races', False))
retrieve_measures = positive_value_exists(request.GET.get('retrieve_measures', False))
import_limit = convert_to_int(request.GET.get('import_limit', 500))
polling_location_list = []
polling_location_count = 0
status = ""
try:
if positive_value_exists(election_local_id):
election_on_stage = Election.objects.get(id=election_local_id)
ballotpedia_election_id = election_on_stage.ballotpedia_election_id
google_civic_election_id = election_on_stage.google_civic_election_id
election_state_code = election_on_stage.get_election_state()
election_name = election_on_stage.election_name
is_national_election = election_on_stage.is_national_election
else:
messages.add_message(request, messages.ERROR,
'Could not retrieve Ballotpedia data. Missing election_local_id.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
except Election.MultipleObjectsReturned as e:
messages.add_message(request, messages.ERROR, 'Could not retrieve Ballotpedia data. '
'More than one election found.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
except Election.DoesNotExist:
messages.add_message(request, messages.ERROR, 'Could not retrieve Ballotpedia data. '
'Election could not be found.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
# Check to see if we have polling location data related to the region(s) covered by this election
# We request the ballot data for each polling location as a way to build up our local data
if not positive_value_exists(state_code) and positive_value_exists(google_civic_election_id):
state_code = election_state_code
if positive_value_exists(is_national_election) and not positive_value_exists(state_code):
messages.add_message(request, messages.ERROR,
'For National elections, a State Code is required in order to run any '
'Ballotpedia data preparation.')
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
try:
polling_location_count_query = PollingLocation.objects.all()
polling_location_count_query = polling_location_count_query.filter(state__iexact=state_code)
polling_location_count_query = polling_location_count_query.filter(use_for_bulk_retrieve=True)
polling_location_count_query = polling_location_count_query.filter(polling_location_deleted=False)
polling_location_count = polling_location_count_query.count()
if positive_value_exists(polling_location_count):
polling_location_query = PollingLocation.objects.all()
polling_location_query = polling_location_query.filter(state__iexact=state_code)
polling_location_query = polling_location_query.filter(use_for_bulk_retrieve=True)
polling_location_query = polling_location_query.filter(polling_location_deleted=False)
# We used to have a limit of 500 ballots to pull per election, but now retrieve all
# Ordering by "location_name" creates a bit of (locational) random order
polling_location_list = polling_location_query.order_by('location_name')[:import_limit]
except Exception as e:
status += "COULD_NOT_FIND_POLLING_LOCATION_LIST " + str(e) + " "
if polling_location_count == 0:
# We didn't find any polling locations marked for bulk retrieve, so just retrieve up to the import_limit
try:
polling_location_count_query = PollingLocation.objects.all()
polling_location_count_query = \
polling_location_count_query.exclude(Q(latitude__isnull=True) | Q(latitude__exact=0.0))
polling_location_count_query = \
polling_location_count_query.exclude(Q(zip_long__isnull=True) | Q(zip_long__exact='0') |
Q(zip_long__exact=''))
polling_location_count_query = polling_location_count_query.filter(state__iexact=state_code)
polling_location_count_query = polling_location_count_query.filter(polling_location_deleted=False)
polling_location_count = polling_location_count_query.count()
if positive_value_exists(polling_location_count):
polling_location_query = PollingLocation.objects.all()
polling_location_query = \
polling_location_query.exclude(Q(latitude__isnull=True) | Q(latitude__exact=0.0))
polling_location_query = \
polling_location_query.exclude(Q(zip_long__isnull=True) | Q(zip_long__exact='0') |
Q(zip_long__exact=''))
polling_location_query = polling_location_query.filter(state__iexact=state_code)
polling_location_query = polling_location_query.filter(polling_location_deleted=False)
# Ordering by "location_name" creates a bit of (locational) random order
polling_location_list = polling_location_query.order_by('location_name')[:import_limit]
except PollingLocation.DoesNotExist:
messages.add_message(request, messages.INFO,
'Could not retrieve ballot data for the {election_name}. '
'No polling locations exist for the state \'{state}\'. '
'Data needed from VIP.'.format(
election_name=election_name,
state=state_code))
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
if polling_location_count == 0:
messages.add_message(request, messages.ERROR,
'Could not retrieve ballot data for the {election_name}. '
'No polling locations returned for the state \'{state}\'. (error 2)'.format(
election_name=election_name,
state=state_code))
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
# If here, we know that we have some polling_locations to use in order to retrieve ballotpedia districts
ballots_retrieved = 0
ballots_not_retrieved = 0
# Step though our set of polling locations, until we find one that contains a ballot. Some won't contain ballots
# due to data quality issues.
if retrieve_races or retrieve_measures or force_district_retrieve_from_ballotpedia:
polling_locations_with_data = 0
polling_locations_without_data = 0
# If here we just want to retrieve the races for this election
merged_district_list = []
for polling_location in polling_location_list:
one_ballot_results = retrieve_ballotpedia_district_id_list_for_polling_location(
google_civic_election_id, polling_location=polling_location,
force_district_retrieve_from_ballotpedia=force_district_retrieve_from_ballotpedia)
success = False
if one_ballot_results['success']:
success = True
ballotpedia_district_id_list = one_ballot_results['ballotpedia_district_id_list']
if len(ballotpedia_district_id_list):
for one_ballotpedia_district_id in ballotpedia_district_id_list:
if one_ballotpedia_district_id not in merged_district_list:
# Build up a list of ballotpedia districts that we need to retrieve races for
merged_district_list.append(one_ballotpedia_district_id)
if success:
polling_locations_with_data += 1
else:
polling_locations_without_data += 1
# Once we have a summary of all ballotpedia districts, we want to request all of the races or measures
if len(merged_district_list):
kind_of_batch = "Unknown"
results = {}
if retrieve_races:
results = retrieve_ballotpedia_offices_by_district_from_api(google_civic_election_id, state_code,
merged_district_list)
kind_of_batch = ""
if 'kind_of_batch' in results:
kind_of_batch = results['kind_of_batch']
if not positive_value_exists(kind_of_batch):
kind_of_batch = CONTEST_OFFICE
status += results['status']
elif retrieve_measures:
results = retrieve_ballotpedia_measures_by_district_from_api(google_civic_election_id, state_code,
merged_district_list)
kind_of_batch = ""
if 'kind_of_batch' in results:
kind_of_batch = results['kind_of_batch']
if not positive_value_exists(kind_of_batch):
kind_of_batch = MEASURE
status += results['status']
batch_header_id = 0
if 'batch_saved' in results and results['batch_saved']:
messages.add_message(request, messages.INFO,
kind_of_batch +
' import batch for {google_civic_election_id} election saved. '
'status: {status}'
''.format(google_civic_election_id=google_civic_election_id,
status=status))
batch_header_id = results['batch_header_id']
elif 'batch_header_id' in results and results['batch_header_id']:
messages.add_message(request, messages.INFO,
kind_of_batch +
' import batch for {google_civic_election_id} election saved, '
'batch_header_id. status: {status}'
''.format(google_civic_election_id=google_civic_election_id,
status=status))
batch_header_id = results['batch_header_id']
else:
messages.add_message(request, messages.ERROR, results['status'])
if positive_value_exists(batch_header_id):
# Go straight to the new batch
return HttpResponseRedirect(reverse('import_export_batches:batch_action_list', args=()) +
"?batch_header_id=" + str(batch_header_id) +
"&kind_of_batch=" + str(kind_of_batch) +
"&google_civic_election_id=" + str(google_civic_election_id))
else:
if retrieve_races:
# Go to the office listing page
return HttpResponseRedirect(reverse('office:office_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
elif retrieve_measures:
# Go to the measure listing page
return HttpResponseRedirect(reverse('measure:measure_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
messages.add_message(request, messages.INFO,
'Races or measures retrieved from Ballotpedia for the {election_name}. '
'polling_locations_with_data: {polling_locations_with_data}, '
'polling_locations_without_data: {polling_locations_without_data}. '
''.format(
polling_locations_with_data=polling_locations_with_data,
polling_locations_without_data=polling_locations_with_data,
election_name=election_name))
return HttpResponseRedirect(reverse('import_export_batches:batch_set_list', args=()) +
'?kind_of_batch=IMPORT_BALLOTPEDIA_BALLOT_ITEMS' +
'&google_civic_election_id=' + str(google_civic_election_id))
else:
# Create Batch Set for ballot items
import_date = date.today()
batch_set_id = 0
batch_set_name = "Ballotpedia ballot locations for " + election_name + \
" (state " + str(state_code.upper()) + ")" + \
" - ballotpedia: " + str(ballotpedia_election_id) + \
" - " + str(import_date)
# create batch_set object
try:
batch_set = BatchSet.objects.create(batch_set_description_text="", batch_set_name=batch_set_name,
batch_set_source=BATCH_SET_SOURCE_IMPORT_BALLOTPEDIA_BALLOT_ITEMS,
google_civic_election_id=google_civic_election_id,
source_uri=BALLOTPEDIA_API_CONTAINS_URL, import_date=import_date)
batch_set_id = batch_set.id
if positive_value_exists(batch_set_id):
status += " BATCH_SET_SAVED"
success = True
except Exception as e:
# Stop trying to save rows -- break out of the for loop
status += " EXCEPTION_BATCH_SET "
# If here, we assume we have already retrieved races for this election, and now we want to
# put ballot items for this location onto a ballot
for polling_location in polling_location_list:
one_ballot_results = retrieve_ballot_items_from_polling_location(
google_civic_election_id, polling_location=polling_location, batch_set_id=batch_set_id,
state_code=state_code)
success = False
if one_ballot_results['success']:
success = True
if success:
ballots_retrieved += 1
else:
ballots_not_retrieved += 1
# We used to only retrieve up to 500 locations from each state, but we don't limit now
# # Break out of this loop, assuming we have a minimum number of ballots with contests retrieved
# # If we don't achieve the minimum number of ballots_with_contests_retrieved, break out | |
import collections
from collections import defaultdict, OrderedDict
import dill
import json
from .abstract_test import load_test, read_pred_file
from .test_types import MFT, INV, DIR
from .viewer.suite_summarizer import SuiteSummarizer
class TestSuite:
def __init__(self, format_example_fn=None, print_fn=None):
self.tests = OrderedDict()
self.info = defaultdict(lambda: defaultdict(lambda: ''))
self.format_example_fn = format_example_fn
self.print_fn = print_fn
self.test_ranges = {}
@staticmethod
def from_file(path):
"""Loads suite from file
Parameters
----------
path : string
pickled (dill) file
Returns
-------
TestSuite
the suite
"""
return load_test(path)
def add(self, test, name=None, capability=None, description=None, format_example_fn=None, print_fn=None, overwrite=False):
"""Adds a test to suite
Parameters
----------
test : AbstractTest
test
name : string
test name. If test has test.name, this is optional.
capability : string
test capability. If test has test.capability, this is optional.
description : string
test description. If test has test.capability, this is optional.
format_example_fn : function
If not None, use this to print a failed example within a test case
Arguments: (x, pred, conf, label=None, meta=None)
print_fn : function
If not None, use this to print a failed test case.
Arguments: (xs, preds, confs, expect_results, labels=None, meta=None)
overwrite : bool
If False, will raise exception if test with same name is already in suite.
"""
if name is None and test.name is None:
raise(Exception('If test does not have test.name, you must specify a name'))
if capability is None and test.capability is None:
raise(Exception('If test does not have test.capabiliy, you must specify a capability'))
if name is None:
name = test.name
if capability is None:
capability = test.capability
if description is None:
description = test.description
if name in self.tests and not overwrite:
raise(Exception('There is already a test named %s suite. Run with overwrite=True to overwrite' % name))
if name in self.info:
del self.info[name]
type_map = {
MFT: 'MFT',
INV: 'INV',
DIR: 'DIR',
}
typez = type_map[type(test)]
self.tests[name] = test
self.info[name]['capability'] = capability
self.info[name]['type'] = typez
if description:
self.info[name]['description'] = description
if format_example_fn:
self.info[name]['format_example_fn'] = format_example_fn
if print_fn:
self.info[name]['print_fn'] = format_example_fn
def remove(self, name):
"""Removes test from suite
Parameters
----------
name : string
test name
"""
if name not in self.tests:
raise(Exception('%s not in suite.' % name))
del self.tests[name]
del self.info[name]
def to_dict(self, example_to_dict_fn=None, n=None, seed=None, new_sample=False):
if example_to_dict_fn is None:
try:
example_to_dict_fn = self.example_to_dict_fn
except AttributeError:
raise(Exception('suite does not have example_to_dict_fn, must pass function as argument.'))
examples = self.get_raw_examples(format_fn=lambda x:x, n=n, seed=seed, new_sample=new_sample)
data_keys = list(example_to_dict_fn(examples[0]).keys())
keys = data_keys + ['test_name', 'test_case', 'example_idx']
hf_dict = { k:[] for k in keys }
for e in examples:
m = example_to_dict_fn(e)
for k,v in m.items():
hf_dict[k].append(v)
for test_name, r in sorted(self.test_ranges.items(), key=lambda x:x[1][0]):
test = self.tests[test_name]
size = r[1] - r[0]
hf_dict['test_name'].extend([test_name for _ in range(size)])
hf_dict['test_case'].extend(test.result_indexes)
cnt = collections.defaultdict(lambda: 0)
example_idx = []
for i in test.result_indexes:
example_idx.append(cnt[i])
cnt[i] += 1
hf_dict['example_idx'].extend(example_idx)
return hf_dict
def get_raw_examples(self, file_format=None, format_fn=None, n=None, seed=None, new_sample=True):
if new_sample or len(self.test_ranges) == 0:
self.test_ranges = {}
all_examples = self.create_raw_example_list(file_format=file_format, format_fn=format_fn, n=n, seed=seed)
else:
all_examples = self.get_raw_example_list(file_format=file_format, format_fn=format_fn)
return all_examples
def get_raw_example_list(self, file_format=None, format_fn=None):
if not self.test_ranges:
raise(Exception('example list not created. please call create_raw_example_list, or to_raw_file first'))
examples = []
for test_name, r in sorted(self.test_ranges.items(), key=lambda x:x[1][0]):
test = self.tests[test_name]
test_examples = test.to_raw_examples(file_format=file_format, format_fn=format_fn,
n=None, seed=None, new_sample=False)
assert len(test_examples) == r[1] - r[0]
examples.extend(test_examples)
return examples
def create_raw_example_list(self, file_format, format_fn, n, seed):
self.test_ranges = {}
current_idx = 0
all_examples = []
for name, t in self.tests.items():
examples = t.to_raw_examples(file_format=file_format, format_fn=format_fn, n=n, seed=seed, new_sample=True)
self.test_ranges[name] = (current_idx, current_idx + len(examples))
current_idx += len(examples)
all_examples.extend(examples)
return all_examples
def to_raw_file(self, path, file_format=None, format_fn=None, header=None, n=None, seed=None, new_sample=True):
"""Flatten all tests into individual examples and print them to file.
Indices of example to test case will be stored in each test.
If n is not None, test.run_idxs will store the test case indexes.
The line ranges for each test will be saved in self.test_ranges.
Parameters
----------
path : string
File path
file_format : string, must be one of 'jsonl', 'squad', 'qqp_test', or None
None just calls str(x) for each example in self.data
squad assumes x has x['question'] and x['passage'], or that format_fn does this
format_fn : function or None
If not None, call this function to format each example in self.data
header : string
If not None, first line of file
n : int
If not None, number of samples to draw
seed : int
Seed to use if n is not None
new_sample: bool
If False, will rely on a previous sample and ignore the 'n' and 'seed' parameters
"""
ret = ''
all_examples = []
add_id = False
if file_format == 'qqp_test':
add_id = True
file_format = 'tsv'
header = 'id\tquestion1\tquestion2'
if header is not None:
ret += header.strip('\n') + '\n'
all_examples = self.get_raw_examples(file_format=file_format, format_fn=format_fn, n=n, seed=seed, new_sample=new_sample)
if add_id and file_format == 'tsv':
all_examples = ['%d\t%s' % (i, x) for i, x in enumerate(all_examples)]
if file_format == 'squad':
ret_map = {'version': 'fake',
'data': []}
for i, x in enumerate(all_examples):
r = {'title': '',
'paragraphs': [{
'context': x['passage'],
'qas': [{'question' : x['question'],
'id': str(i)
}]
}]
}
ret_map['data'].append(r)
ret = json.dumps(ret_map)
else:
ret += '\n'.join(all_examples)
f = open(path, 'w')
f.write(ret)
f.close()
def run_from_preds_confs(self, preds, confs, overwrite):
for n, t in self.tests.items():
p = preds[slice(*self.test_ranges[n])]
c = confs[slice(*self.test_ranges[n])]
t.run_from_preds_confs(p, c, overwrite=overwrite)
def run_from_file(self, path, file_format=None, format_fn=None, ignore_header=False, overwrite=False):
"""Update test.results (run tests) for every test, from a prediction file
Parameters
----------
path : string
prediction file path
file_format : string
None, or one of 'pred_only', 'softmax', binary_conf', 'pred_and_conf', 'pred_and_softmax', 'squad',
pred_only: each line has a prediction
softmax: each line has prediction probabilities separated by spaces
binary_conf: each line has the prediction probability of class 1 (binary)
pred_and_conf: each line has a prediction and a confidence value, separated by a space
pred_and_softmax: each line has a prediction and all softmax probabilities, separated by a space
squad: TODO
format_fn : function
If not None, function that reads a line in the input file and outputs a tuple of (prediction, confidence)
ignore_header : bool
If True, skip first line in the file
overwrite : bool
If False, raise exception if results already exist
"""
preds, confs = read_pred_file(path, file_format=file_format,
format_fn=format_fn,
ignore_header=ignore_header)
self.run_from_preds_confs(preds, confs, overwrite=overwrite)
def run(self, predict_and_confidence_fn, verbose=True, **kwargs):
"""Runs all tests in the suite
See run in abstract_test.py .
Parameters
----------
predict_and_confidence_fn : function
Takes as input a list of examples
Outputs a tuple (predictions, confidences)
overwrite : bool
If False, raise exception if results already exist
verbose : bool
If True, print extra information
n : int
If not None, number of samples to draw
seed : int
Seed to use if n is not None
"""
for n, t in self.tests.items():
if verbose:
print('Running', n)
t.run(predict_and_confidence_fn, verbose=verbose, **kwargs)
def summary(self, types=None, capabilities=None, **kwargs):
"""Print stats and example failures for each test.
See summary in abstract_test.py
Parameters
----------
types : list(string)
If not None, will only show tests of these test types.
Options are MFT, INV, and DIR
capabilities : list(string)
If not None, will only show tests with these capabilities.
**kwargs : type
Will be passed as arguments to each test.summary()
"""
vals = collections.defaultdict(lambda: 100, {'MFT': 0, 'INV': 1, 'DIR': 2})
tests = self.tests.keys()
capability_order = ['Vocabulary', 'Taxonomy', 'Robustness', 'NER', 'Fairness', 'Temporal', 'Negation', 'Coref', 'SRL', 'Logic']
cap_order = lambda x:capability_order.index(x) if x in capability_order else 100
caps = sorted(set([x['capability'] for x in self.info.values()]), key=cap_order)
for capability in caps:
if capabilities is not None and capability not in capabilities:
continue
print(capability)
print()
tests = [x for x in self.tests if self.info[x]['capability'] == capability]
for n in tests:
if types is not None and self.info[n]['type'] not in types:
continue
print(n)
if 'format_example_fn' not in kwargs:
kwargs['format_example_fn'] = self.info[n].get('format_example_fn', self.format_example_fn)
if 'print_fn' not in kwargs:
kwargs['print_fn'] = self.info[n].get('print_fn', self.print_fn)
self.tests[n].summary(**kwargs)
print()
print()
print()
print()
def visual_summary_by_test(self, testname):
"""Displays visual summary for a single test.
Parameters
----------
testname : string
name of the test
Returns
-------
test.visual_summary
summary
"""
if not testname in self.tests:
raise(Exception(f"There's no test | |
887617, 887629, 887633, 887641, 887651,
887657, 887659, 887669, 887671, 887681, 887693, 887701, 887707,
887717, 887743, 887749, 887759, 887819, 887827, 887837, 887839,
887849, 887867, 887903, 887911, 887921, 887923, 887941, 887947,
887987, 887989, 888001, 888011, 888047, 888059, 888061, 888077,
888091, 888103, 888109, 888133, 888143, 888157, 888161, 888163,
888179, 888203, 888211, 888247, 888257, 888263, 888271, 888287,
888313, 888319, 888323, 888359, 888361, 888373, 888389, 888397,
888409, 888413, 888427, 888431, 888443, 888451, 888457, 888469,
888479, 888493, 888499, 888533, 888541, 888557, 888623, 888631,
888637, 888653, 888659, 888661, 888683, 888689, 888691, 888721,
888737, 888751, 888761, 888773, 888779, 888781, 888793, 888799,
888809, 888827, 888857, 888869, 888871, 888887, 888917, 888919,
888931, 888959, 888961, 888967, 888983, 888989, 888997, 889001,
889027, 889037, 889039, 889043, 889051, 889069, 889081, 889087,
889123, 889139, 889171, 889177, 889211, 889237, 889247, 889261,
889271, 889279, 889289, 889309, 889313, 889327, 889337, 889349,
889351, 889363, 889367, 889373, 889391, 889411, 889429, 889439,
889453, 889481, 889489, 889501, 889519, 889579, 889589, 889597,
889631, 889639, 889657, 889673, 889687, 889697, 889699, 889703,
889727, 889747, 889769, 889783, 889829, 889871, 889873, 889877,
889879, 889891, 889901, 889907, 889909, 889921, 889937, 889951,
889957, 889963, 889997, 890003, 890011, 890027, 890053, 890063,
890083, 890107, 890111, 890117, 890119, 890129, 890147, 890159,
890161, 890177, 890221, 890231, 890237, 890287, 890291, 890303,
890317, 890333, 890371, 890377, 890419, 890429, 890437, 890441,
890459, 890467, 890501, 890531, 890543, 890551, 890563, 890597,
890609, 890653, 890657, 890671, 890683, 890707, 890711, 890717,
890737, 890761, 890789, 890797, 890803, 890809, 890821, 890833,
890843, 890861, 890863, 890867, 890881, 890887, 890893, 890927,
890933, 890941, 890957, 890963, 890969, 890993, 890999, 891001,
891017, 891047, 891049, 891061, 891067, 891091, 891101, 891103,
891133, 891151, 891161, 891173, 891179, 891223, 891239, 891251,
891277, 891287, 891311, 891323, 891329, 891349, 891377, 891379,
891389, 891391, 891409, 891421, 891427, 891439, 891481, 891487,
891491, 891493, 891509, 891521, 891523, 891551, 891557, 891559,
891563, 891571, 891577, 891587, 891593, 891601, 891617, 891629,
891643, 891647, 891659, 891661, 891677, 891679, 891707, 891743,
891749, 891763, 891767, 891797, 891799, 891809, 891817, 891823,
891827, 891829, 891851, 891859, 891887, 891889, 891893, 891899,
891907, 891923, 891929, 891967, 891983, 891991, 891997, 892019,
892027, 892049, 892057, 892079, 892091, 892093, 892097, 892103,
892123, 892141, 892153, 892159, 892169, 892189, 892219, 892237,
892249, 892253, 892261, 892267, 892271, 892291, 892321, 892351,
892357, 892387, 892391, 892421, 892433, 892439, 892457, 892471,
892481, 892513, 892523, 892531, 892547, 892553, 892559, 892579,
892597, 892603, 892609, 892627, 892643, 892657, 892663, 892667,
892709, 892733, 892747, 892757, 892763, 892777, 892781, 892783,
892817, 892841, 892849, 892861, 892877, 892901, 892919, 892933,
892951, 892973, 892987, 892999, 893003, 893023, 893029, 893033,
893041, 893051, 893059, 893093, 893099, 893107, 893111, 893117,
893119, 893131, 893147, 893149, 893161, 893183, 893213, 893219,
893227, 893237, 893257, 893261, 893281, 893317, 893339, 893341,
893351, 893359, 893363, 893381, 893383, 893407, 893413, 893419,
893429, 893441, 893449, 893479, 893489, 893509, 893521, 893549,
893567, 893591, 893603, 893609, 893653, 893657, 893671, 893681,
893701, 893719, 893723, 893743, 893777, 893797, 893821, 893839,
893857, 893863, 893873, 893881, 893897, 893903, 893917, 893929,
893933, 893939, 893989, 893999, 894011, 894037, 894059, 894067,
894073, 894097, 894109, 894119, 894137, 894139, 894151, 894161,
894167, 894181, 894191, 894193, 894203, 894209, 894211, 894221,
894227, 894233, 894239, 894247, 894259, 894277, 894281, 894287,
894301, 894329, 894343, 894371, 894391, 894403, 894407, 894409,
894419, 894427, 894431, 894449, 894451, 894503, 894511, 894521,
894527, 894541, 894547, 894559, 894581, 894589, 894611, 894613,
894637, 894643, 894667, 894689, 894709, 894713, 894721, 894731,
894749, 894763, 894779, 894791, 894793, 894811, 894869, 894871,
894893, 894917, 894923, 894947, 894973, 894997, 895003, 895007,
895009, 895039, 895049, 895051, 895079, 895087, 895127, 895133,
895151, 895157, 895159, 895171, 895189, 895211, 895231, 895241,
895243, 895247, 895253, 895277, 895283, 895291, 895309, 895313,
895319, 895333, 895343, 895351, 895357, 895361, 895387, 895393,
895421, 895423, 895457, 895463, 895469, 895471, 895507, 895529,
895553, 895571, 895579, 895591, 895613, 895627, 895633, 895649,
895651, 895667, 895669, 895673, 895681, 895691, 895703, 895709,
895721, 895729, 895757, 895771, 895777, 895787, 895789, 895799,
895801, 895813, 895823, 895841, 895861, 895879, 895889, 895901,
895903, 895913, 895927, 895933, 895957, 895987, 896003, 896009,
896047, 896069, 896101, 896107, 896111, 896113, 896123, 896143,
896167, 896191, 896201, 896263, 896281, 896293, 896297, 896299,
896323, 896327, 896341, 896347, 896353, 896369, 896381, 896417,
896443, 896447, 896449, 896453, 896479, 896491, 896509, 896521,
896531, 896537, 896543, 896549, 896557, 896561, 896573, 896587,
896617, 896633, 896647, 896669, 896677, 896681, 896717, 896719,
896723, 896771, 896783, 896803, 896837, 896867, 896879, 896897,
896921, 896927, 896947, 896953, 896963, 896983, 897007, 897011,
897019, 897049, 897053, 897059, 897067, 897077, 897101, 897103,
897119, 897133, 897137, 897157, 897163, 897191, 897223, 897229,
897241, 897251, 897263, 897269, 897271, 897301, 897307, 897317,
897319, 897329, 897349, 897359, 897373, 897401, 897433, 897443,
897461, 897467, 897469, 897473, 897497, 897499, 897517, 897527,
897553, 897557, 897563, 897571, 897577, 897581, 897593, 897601,
897607, 897629, 897647, 897649, 897671, 897691, 897703, 897707,
897709, 897727, 897751, 897779, 897781, 897817, 897829, 897847,
897877, 897881, 897887, 897899, 897907, 897931, 897947, 897971,
897983, 898013, 898019, 898033, 898063, 898067, 898069, 898091,
898097, 898109, 898129, 898133, 898147, 898153, 898171, 898181,
898189, 898199, 898211, 898213, 898223, 898231, 898241, 898243,
898253, 898259, 898279, 898283, 898291, 898307, 898319, 898327,
898361, 898369, 898409, 898421, 898423, 898427, 898439, 898459,
898477, 898481, 898483, 898493, 898519, 898523, 898543, 898549,
898553, 898561, 898607, 898613, 898621, 898661, 898663, 898669,
898673, 898691, 898717, 898727, 898753, 898763, 898769, 898787,
898813, 898819, 898823, 898853, 898867, 898873, 898889, 898897,
898921, 898927, 898951, 898981, 898987, 899009, 899051, 899057,
899069, 899123, 899149, 899153, 899159, 899161, 899177, 899179,
899183, 899189, 899209, 899221, 899233, 899237, 899263, 899273,
899291, 899309, 899321, 899387, 899401, 899413, 899429, 899447,
899467, 899473, 899477, 899491, 899519, 899531, 899537, 899611,
899617, 899659, 899671, 899681, 899687, 899693, 899711, 899719,
899749, 899753, 899761, 899779, 899791, 899807, 899831, 899849,
899851, 899863, 899881, 899891, 899893, 899903, 899917, 899939,
899971, 899981, 900001, 900007, 900019, 900037, 900061, 900089,
900091, 900103, 900121, 900139, 900143, 900149, 900157, 900161,
900169, 900187, 900217, 900233, 900241, 900253, 900259, 900283,
900287, 900293, 900307, 900329, 900331, 900349, 900397, 900409,
900443, 900461, 900481, 900491, 900511, 900539, 900551, 900553,
900563, 900569, 900577, 900583, 900587, 900589, 900593, 900607,
900623, 900649, 900659, 900671, 900673, 900689, 900701, 900719,
900737, 900743, 900751, 900761, 900763, 900773, 900797, 900803,
900817, 900821, 900863, 900869, 900917, 900929, 900931, 900937,
900959, 900971, 900973, 900997, 901007, 901009, 901013, 901063,
901067, 901079, 901093, 901097, 901111, 901133, 901141, 901169,
901171, 901177, 901183, 901193, 901207, 901211, 901213, 901247,
901249, 901253, 901273, 901279, 901309, 901333, 901339, 901367,
901399, 901403, 901423, 901427, 901429, 901441, 901447, 901451,
901457, 901471, 901489, 901499, 901501, 901513, 901517, 901529,
901547, 901567, 901591, 901613, 901643, 901657, 901679, 901687,
901709, 901717, 901739, 901741, 901751, 901781, 901787, 901811,
901819, 901841, 901861, 901891, 901907, 901909, 901919, 901931,
901937, 901963, 901973, 901993, 901997, 902009, 902017, 902029,
902039, 902047, 902053, 902087, 902089, 902119, 902137, 902141,
902179, 902191, 902201, 902227, 902261, 902263, 902281, 902299,
902303, 902311, 902333, 902347, 902351, 902357, 902389, 902401,
902413, 902437, 902449, 902471, 902477, 902483, 902501, 902507,
902521, 902563, 902569, 902579, 902591, 902597, 902599, 902611,
902639, 902653, 902659, 902669, 902677, 902687, 902719, 902723,
902753, 902761, 902767, 902771, 902777, 902789, 902807, 902821,
902827, 902849, 902873, 902903, 902933, 902953, 902963, 902971,
902977, 902981, 902987, 903017, 903029, 903037, 903073, 903079,
903103, 903109, 903143, 903151, 903163, 903179, 903197, 903211,
903223, 903251, 903257, 903269, 903311, 903323, 903337, 903347,
903359, 903367, 903389, 903391, 903403, 903407, 903421, 903443,
903449, 903451, 903457, 903479, 903493, 903527, 903541, 903547,
903563, 903569, 903607, 903613, 903641, 903649, 903673, 903677,
903691, 903701, 903709, 903751, 903757, 903761, 903781, 903803,
903827, 903841, 903871, 903883, 903899, 903913, 903919, 903949,
903967, 903979, 904019, 904027, 904049, 904067, 904069, 904073,
904087, 904093, 904097, 904103, 904117, 904121, 904147, 904157,
904181, 904193, 904201, 904207, 904217, 904219, 904261, 904283,
904289, 904297, 904303, 904357, 904361, 904369, 904399, 904441,
904459, 904483, 904489, 904499, 904511, 904513, 904517, 904523,
904531, 904559, 904573, 904577, 904601, 904619, 904627, 904633,
904637, 904643, 904661, 904663, 904667, 904679, 904681, 904693,
904697, 904721, 904727, 904733, 904759, 904769, 904777, 904781,
904789, 904793, 904801, 904811, 904823, 904847, 904861, 904867,
904873, 904879, 904901, 904903, 904907, 904919, 904931, 904933,
904987, 904997, 904999, 905011, 905053, 905059, 905071, 905083,
905087, 905111, 905123, 905137, 905143, 905147, 905161, 905167,
905171, 905189, 905197, 905207, 905209, 905213, 905227, 905249,
905269, 905291, 905297, | |
<filename>fabfile/data.py
#!/usr/bin/env python
"""
Commands that update or process the application data.
"""
import app_config
import csv
import json
import logging
import math
import os
import re
from time import sleep
import copytext
from fabric.api import execute, hide, local, task, settings, shell_env
from fabric.state import env
from models import models
import requests
import yaml
logging.basicConfig(format=app_config.LOG_FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(app_config.LOG_LEVEL)
CENSUS_REPORTER_URL = 'https://api.censusreporter.org/1.0/data/show/acs2016_5yr'
FIPS_TEMPLATE = '05000US{0}'
CENSUS_TABLES = ['B01003', 'B02001', 'B03002', 'B19013', 'B15001']
@task
def bootstrap_db():
"""
Build the database.
"""
create_db()
create_tables()
load_results(initialize=True)
create_calls()
create_race_meta()
@task
def create_db():
with settings(warn_only=True), hide('output', 'running'):
if env.get('settings'):
execute('servers.stop_service', 'uwsgi')
execute('servers.stop_service', 'fetch_and_publish_results')
with shell_env(**app_config.database):
local('dropdb --host={PGHOST} --port={PGPORT} --username={PGUSER} --if-exists {PGDATABASE}'.format(**app_config.database))
local('createdb --host={PGHOST} --port={PGPORT} --username={PGUSER} {PGDATABASE}'.format(**app_config.database))
if env.get('settings'):
execute('servers.start_service', 'uwsgi')
execute('servers.start_service', 'fetch_and_publish_results')
@task
def create_tables():
models.Result.create_table()
models.Call.create_table()
models.RaceMeta.create_table()
@task
def delete_results():
"""
Delete results without droppping database.
"""
where_clause = ''
with shell_env(**app_config.database), hide('output', 'running'):
# Bypass the foreign-key constraint on deletion by using `session_replication_role`.
# This is an opaque hack, and should be replaced with clearer,
# more SQL-native database logic in the future
local('psql {0} -c "set session_replication_role = replica; DELETE FROM result {1}; set session_replication_role = default;"'.format(
app_config.database['PGURI'],
where_clause
))
def get_valid_filename(s):
"""
Return the given string converted to a string that can be used for a clean
filename. Remove leading and trailing spaces; convert other spaces to
underscores; and remove anything that is not an alphanumeric, dash,
underscore, or dot.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
Function sourced from Django 2.1
https://github.com/django/django/blob/master/django/utils/text.py
"""
s = str(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
@task
def load_results(initialize=False):
"""
Load AP results. Defaults to next election, or specify a date as a parameter.
"""
if initialize is True:
flag_sets = app_config.ELEX_INIT_FLAG_SETS
else:
flag_sets = app_config.ELEX_FLAG_SETS
if not os.path.isdir(app_config.ELEX_OUTPUT_FOLDER):
os.makedirs(app_config.ELEX_OUTPUT_FOLDER)
RESULTS_FILENAME_PREFIX = 'results-'
# Need separate filenames for the different possible elex flag sets,
# so the simplest way is to use a hash of those flag-strings
cmds = [
'elex results {0} {1} > {2}'.format(
flag_set,
app_config.NEXT_ELECTION_DATE,
os.path.join(
app_config.ELEX_OUTPUT_FOLDER,
RESULTS_FILENAME_PREFIX + get_valid_filename(flag_set) + '.csv'
)
)
for flag_set in flag_sets
]
with shell_env(**app_config.database):
for cmd in cmds:
# The `warn_only` option turns errors into warning messages
# This allows us to handle errors on our own terms,
# like the `64` code below
with settings(warn_only=True), hide('output', 'running'):
cmd_output = local(cmd, capture=True)
# `elex` exit code `64` indicates that no new data was found,
# and that the previous set of results will be re-used instead
if not cmd_output.succeeded and cmd_output.return_code != 64:
logger.critical("ERROR GETTING RESULTS")
logger.critical(cmd_output.stderr)
break
else:
delete_results()
results_filenames = [
os.path.join(
app_config.ELEX_OUTPUT_FOLDER,
RESULTS_FILENAME_PREFIX + get_valid_filename(flag_set) + '.csv'
)
for flag_set in flag_sets
]
with hide('output', 'running'):
local('csvstack {0} | psql {1} -c "COPY result FROM stdin DELIMITER \',\' CSV HEADER;"'.format(
' '.join(results_filenames),
app_config.database['PGURI']
))
# Implement candidate party overrides, in a way that's
# transparent to all downstream parts of the data processing
with hide('output', 'running'):
for party, polids in app_config.PARTY_OVERRIDES.items():
local('''psql {} -c "
UPDATE result
SET party = '{}'
WHERE polID = ANY('{{ {} }}'::text[]);
"'''.format(
app_config.database['PGURI'],
party,
','.join(polids)
))
logger.info('results loaded')
@task
def fetch_ftp_results():
"""
Load flat csv gathered from a previous elex run
"""
flags = app_config.ELEX_FTP_FLAGS
cmd = 'python elex_ftp {0} > {1}/ftp_results.csv'.format(
flags,
app_config.ELEX_OUTPUT_FOLDER)
with hide('output', 'running'):
local(cmd)
logger.info('ftp results fetched')
@task
def create_calls():
"""
Create database of race calls for all races in results data.
"""
models.Call.delete().execute()
results = models.Result.select().where(models.Result.level == 'state')
for result in results:
models.Call.create(call_id=result.id)
@task
def create_race_meta():
models.RaceMeta.delete().execute()
calendar = copytext.Copy(app_config.CALENDAR_PATH)
calendar_sheet = calendar['poll_times']
senate_sheet = calendar['senate_seats']
house_sheet = calendar['house_seats']
governor_sheet = calendar['governorships']
ballot_measure_sheet = calendar['ballot_measures']
results = models.Result.select()
for result in results:
meta_obj = {
'result_id': result.id
}
if result.level == 'county' or result.level == 'township':
continue
if (result.level == 'state' or result.level == 'district') \
and result.statepostal != 'US':
calendar_row = list(filter(lambda x: x['key'] == result.statepostal, calendar_sheet))[0]
meta_obj['poll_closing'] = calendar_row['time_est']
meta_obj['first_results'] = calendar_row['first_results_est']
meta_obj['full_poll_closing'] = calendar_row['time_all_est']
# Ignore special House elections, to avoid mis-assigning metadata
# These races should still get the poll metadata from above
if result.level == 'state' and \
result.officename == 'U.S. House' and \
not result.is_special_election:
seat = '{0}-{1}'.format(result.statepostal, result.seatnum)
house_rows = list(filter(
lambda x: x['seat'] == seat,
house_sheet
))
assert len(house_rows) == 1, "Could not properly match Result to House spreadsheet"
house_row = house_rows[0]
meta_obj['current_party'] = house_row['party']
# Handle non-voting members that are tracked in our visuals,
# such as DC's House representative
meta_obj['voting_member'] = (house_row['voting_member'] == 'True')
meta_obj['key_race'] = (house_row['key_race'] == 'True')
if result.level == 'state' and result.officename == 'U.S. Senate':
senate_rows = list(filter(
# Make sure to assign special election metadata accurately
# This doesn't need to happen for any other office type,
# since no other office has special elections that matter
# _and_ has multiple seats per state
lambda x: x['state'] == result.statepostal and result.is_special_election == (x['special'] == 'True'),
senate_sheet
))
assert len(senate_rows) == 1, "Could not properly match Result to Senate spreadsheet"
senate_row = senate_rows[0]
meta_obj['current_party'] = senate_row['party']
if result.level == 'state' and result.officename == 'Governor':
governor_rows = list(filter(
lambda x: x['state'] == result.statepostal,
governor_sheet
))
assert len(governor_rows) == 1, "Could not properly match Result to governor spreadsheet"
governor_row = governor_rows[0]
meta_obj['current_party'] = governor_row['party']
if result.level == 'state' and result.is_ballot_measure:
measure_rows = list(filter(
lambda x: x['state'] == result.statepostal and x['raceid'] == result.raceid,
ballot_measure_sheet
))
assert len(measure_rows) == 1, "Could not properly match Result to ballot-measure spreadsheet"
measure_row = measure_rows[0]
meta_obj['ballot_measure_theme'] = measure_row['big_board_theme']
models.RaceMeta.create(**meta_obj)
@task
def copy_data_for_graphics():
assert os.path.isdir(app_config.GRAPHICS_DATA_OUTPUT_FOLDER), \
"Make sure that the local data output directory exists: `{}`".format(app_config.GRAPHICS_DATA_OUTPUT_FOLDER)
with hide('output', 'running'):
local('cp -r {0}/*.json {1}'.format(
app_config.DATA_OUTPUT_FOLDER,
app_config.GRAPHICS_DATA_OUTPUT_FOLDER
))
@task
def build_current_congress():
party_dict = {
'Democrat': 'Dem',
'Republican': 'GOP',
'Independent': 'Ind'
}
house_fieldnames = ['first', 'last', 'party', 'state', 'seat']
senate_fieldnames = ['first', 'last', 'party', 'state']
with open('data/house-seats.csv', 'w') as h, open('data/senate-seats.csv', 'w') as s:
house_writer = csv.DictWriter(h, fieldnames=house_fieldnames)
house_writer.writeheader()
senate_writer = csv.DictWriter(s, fieldnames=senate_fieldnames)
senate_writer.writeheader()
with open('etc/legislators-current.yaml') as f:
data = yaml.load(f)
for legislator in data:
current_term = legislator['terms'][-1]
if current_term['end'][:4] == '2017':
obj = {
'first': legislator['name']['first'],
'last': legislator['name']['last'],
'state': current_term['state'],
'party': party_dict[current_term['party']]
}
if current_term.get('district'):
obj['seat'] = '{0}-{1}'.format(current_term['state'], current_term['district'])
if current_term['type'] == 'sen':
senate_writer.writerow(obj)
elif current_term['type'] == 'rep':
house_writer.writerow(obj)
@task
def write_unemployment_csv(start_state='AA'):
"""
Write county-level unemployment data to data/unemployment.csv.
Will overwrite anything that was there.
Assumes you have a document in data/unemployment.tsv
that is similar to https://www.bls.gov/lau/laucnty17.txt
which was found at https://www.bls.gov/lau/#cntyaa
"""
pass
# LAUS Code,State FIPS Code,County FIPS Code,County Name/State Abbreviation,Year,Labor Force,Employed,Unemployed,Unemployment Rate (%)
# CN0100100000000,01,001,"Autauga County, AL",2015,"25,308 ","23,981 ","1,327 ",5.2
# CN0100300000000,01,003,"Baldwin County, AL",2015,"87,316 ","82,525 ","4,791 ",5.5
@task
def get_census_data(start_state='AA'):
# SD 46102 manually entered from 2012-2016 American Community Survey 5-Year Estimates
state_results = models.Result.select(models.Result.statepostal).distinct().order_by(models.Result.statepostal)
for state_result in state_results:
state = state_result.statepostal
sorts = sorted([start_state, state])
if sorts[0] == state:
logging.info('skipping', state)
continue
logging.info('getting', state)
output = {}
fips_results = models.Result.select(models.Result.fipscode).distinct().where(models.Result.statepostal == state).order_by(models.Result.fipscode)
count = 0
total = len(fips_results)
for result in fips_results:
if result.fipscode:
count += 1
if result.fipscode == '02000':
geo_id = '04000US02'
elif result.fipscode == '46102':
geo_id = FIPS_TEMPLATE.format('46113')
else:
geo_id = FIPS_TEMPLATE.format(result.fipscode)
params = {
'geo_ids': geo_id,
'table_ids': ','.join(CENSUS_TABLES)
}
response = requests.get(CENSUS_REPORTER_URL, params=params)
if response.status_code == 200:
print('fipscode succeeded', result.fipscode, count, 'counties done, out of', total, 'in', state)
output[result.fipscode] = response.json()
sleep(1)
else:
print('fipscode failed:', result.fipscode, response.status_code)
sleep(10)
continue
with open('data/census/{0}.json'.format(state), 'w') as f:
json.dump(output, f)
@task
def extract_census_data(fipscode, census_json):
fips_census = census_json.get(fipscode)
if fips_census:
data = fips_census.get('data')
for county, tables in data.items():
population = tables['B01003']['estimate']
race = tables['B02001']['estimate']
hispanic = tables['B03002']['estimate']
education = tables['B15001']['estimate']
education_error = tables['B15001']['error']
income = tables['B19013']['estimate']
total_population = population['B01003001']
race_total = race['B02001001']
percent_black = race['B02001003'] / race_total
hispanic_total = hispanic['B03002001']
percent_white = hispanic['B03002003'] / hispanic_total
percent_hispanic = hispanic['B03002012'] / hispanic_total
median_income = income['B19013001']
percent_bachelors, error = calculate_percent_bachelors(education, education_error)
print(fipscode, percent_bachelors, error)
return {
'population': total_population,
'percent_white': percent_white,
'percent_black': percent_black,
'percent_hispanic': percent_hispanic,
'median_income': median_income,
'percent_bachelors': percent_bachelors,
'error': error
}
else:
return None
def calculate_percent_bachelors(education, education_error):
ed_total_population = education['B15001001']
male_18_bachelors = education['B15001009']
male_18_grad = education['B15001010']
male_18 = male_18_bachelors + male_18_grad
male_25_bachelors = education['B15001017']
male_25_grad = education['B15001018']
male_25 = male_25_bachelors + male_25_grad
male_35_bachelors = education['B15001025']
male_35_grad = education['B15001026']
male_35 = male_35_bachelors + male_35_grad
male_45_bachelors = education['B15001033']
male_45_grad = education['B15001034']
male_45 = male_45_bachelors + male_45_grad
male_65_bachelors = education['B15001041']
male_65_grad = education['B15001042']
| |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-28 22:03
# -- with some slight modifications
# flake8: noqa
from django.conf import settings
from django.contrib.postgres import fields
from django.contrib.postgres.fields import hstore
from django.db import migrations, models
from django.db.models.deletion import CASCADE, PROTECT, SET_NULL
import main.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(help_text='Path to file data.', max_length=255, upload_to='%Y/%m/%d', verbose_name='File Path')),
('filename', models.CharField(help_text='Name of attachment file.', max_length=255, verbose_name='File Name')),
('description', models.TextField(blank=True, help_text='Description of attachment file contents.', verbose_name='Description')),
('mime_type', models.CharField(blank=True, help_text='MIME ContentType of the attachment.', max_length=255, null=True, verbose_name='MIME')),
('file_size', models.IntegerField(default=0, help_text='Total byte size of the attachment.', verbose_name='Size')),
],
options={
'db_table': 'attachment',
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField(help_text='Content of the comment.', verbose_name='Comment')),
],
options={
'db_table': 'comment',
},
),
migrations.CreateModel(
name='Datasource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='The source used for information on a measurement type.', max_length=255, verbose_name='Datasource')),
('url', models.CharField(blank=True, default='', help_text='URL of the source.', max_length=255, verbose_name='URL')),
('download_date', models.DateField(auto_now=True, help_text='Date when information was accessed and copied.', verbose_name='Download Date')),
],
),
migrations.CreateModel(
name='EDDObject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('meta_store', hstore.HStoreField(blank=True, default=dict, help_text='Metadata dictionary.', verbose_name='Metadata')),
('name', models.CharField(help_text='Name of this object.', max_length=255, verbose_name='Name')),
('description', models.TextField(blank=True, help_text='Description of this object.', null=True, verbose_name='Description')),
('active', models.BooleanField(default=True, help_text='Flag showing if this object is active and displayed.', verbose_name='Active')),
('uuid', models.UUIDField(editable=False, help_text='Unique identifier for this object.', unique=True, verbose_name='UUID')),
],
options={
'db_table': 'edd_object',
},
bases=(models.Model, main.models.EDDSerialize),
),
migrations.CreateModel(
name='EveryonePermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('permission_type', models.CharField(choices=[('N', 'None'), ('R', 'Read'), ('W', 'Write')], default='N', help_text='Type of permission.', max_length=8, verbose_name='Permission')),
],
options={
'db_table': 'study_public_permission',
},
),
migrations.CreateModel(
name='GroupPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('permission_type', models.CharField(choices=[('N', 'None'), ('R', 'Read'), ('W', 'Write')], default='N', help_text='Type of permission.', max_length=8, verbose_name='Permission')),
('group', models.ForeignKey(help_text='Group this permission applies to.', on_delete=CASCADE, related_name='grouppermission_set', to='auth.Group', verbose_name='Group')),
],
options={
'db_table': 'study_group_permission',
},
),
migrations.CreateModel(
name='Measurement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('meta_store', hstore.HStoreField(blank=True, default=dict, help_text='Metadata dictionary.', verbose_name='Metadata')),
('active', models.BooleanField(default=True, help_text='Flag indicating this Measurement is active and should be displayed.', verbose_name='Active')),
('compartment', models.CharField(choices=[('0', 'N/A'), ('1', 'Intracellular/Cytosol (Cy)'), ('2', 'Extracellular')], default='0', help_text='Compartment of the cell for this Measurement.', max_length=1, verbose_name='Compartment')),
('measurement_format', models.CharField(choices=[('0', 'scalar'), ('1', 'vector'), ('2', 'histogram'), ('3', 'sigma')], default='0', help_text='Enumeration of value formats for this Measurement.', max_length=2, verbose_name='Format')),
('experimenter', models.ForeignKey(blank=True, help_text='Test App User that set up the experimental conditions of this Measurement.', null=True, on_delete=PROTECT, related_name='measurement_experimenter_set', to=settings.AUTH_USER_MODEL, verbose_name='Experimenter')),
],
options={
'db_table': 'measurement',
},
bases=(models.Model, main.models.EDDSerialize),
),
migrations.CreateModel(
name='MeasurementType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type_name', models.CharField(help_text='Name of this Measurement Type.', max_length=255, verbose_name='Measurement Type')),
('short_name', models.CharField(blank=True, help_text='Short name used as an ID for the Measurement Type in SBML output.', max_length=255, null=True, verbose_name='Short Name')),
('type_group', models.CharField(choices=[('_', 'Generic'), ('m', 'Metabolite'), ('g', 'Gene Identifier'), ('p', 'Protein Identifer'), ('h', 'Phosphor')], default='_', help_text='Class of data for this Measurement Type.', max_length=8, verbose_name='Type Group')),
('uuid', models.UUIDField(editable=False, help_text='Unique ID for this Measurement Type.', unique=True, verbose_name='UUID')),
],
options={
'db_table': 'measurement_type',
},
bases=(models.Model, main.models.EDDSerialize),
),
migrations.CreateModel(
name='MeasurementUnit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('unit_name', models.CharField(help_text='Name for unit of measurement.', max_length=255, unique=True, verbose_name='Name')),
('display', models.BooleanField(default=True, help_text='Flag indicating the units should be displayed along with values.', verbose_name='Display')),
('alternate_names', models.CharField(blank=True, help_text='Alternative names for the unit.', max_length=255, null=True, verbose_name='Alternate Names')),
('type_group', models.CharField(choices=[('_', 'Generic'), ('m', 'Metabolite'), ('g', 'Gene Identifier'), ('p', 'Protein Identifer'), ('h', 'Phosphor')], default='_', help_text='Type of measurement for which this unit is used.', max_length=8, verbose_name='Group')),
],
options={
'db_table': 'measurement_unit',
},
),
migrations.CreateModel(
name='MeasurementValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('x', fields.ArrayField(base_field=models.DecimalField(decimal_places=5, max_digits=16), help_text='X-axis value(s) for this point.', size=None, verbose_name='X')),
('y', fields.ArrayField(base_field=models.DecimalField(decimal_places=5, max_digits=16), help_text='Y-axis value(s) for this point.', size=None, verbose_name='Y')),
('measurement', models.ForeignKey(help_text='The Measurement containing this point of data.', on_delete=CASCADE, to='main.Measurement', verbose_name='Measurement')),
],
options={
'db_table': 'measurement_value',
},
),
migrations.CreateModel(
name='MetaboliteExchange',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reactant_name', main.models.fields.VarCharField(help_text='The reactant name used in for this exchange reaction.', verbose_name='Reactant Name')),
('exchange_name', main.models.fields.VarCharField(help_text='The exchange name used in the model.', verbose_name='Exchange Name')),
],
options={
'db_table': 'measurement_type_to_exchange',
},
),
migrations.CreateModel(
name='MetaboliteSpecies',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('species', main.models.fields.VarCharField(help_text='Species name used in the model for this metabolite.', verbose_name='Species')),
],
options={
'db_table': 'measurement_type_to_species',
},
),
migrations.CreateModel(
name='MetadataGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group_name', models.CharField(help_text='Name of the group/class of metadata.', max_length=255, unique=True, verbose_name='Group Name')),
],
options={
'db_table': 'metadata_group',
},
),
migrations.CreateModel(
name='MetadataType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type_name', models.CharField(help_text='Name for Metadata Type', max_length=255, verbose_name='Name')),
('type_i18n', models.CharField(blank=True, help_text='i18n key used for naming this Metadata Type.', max_length=255, null=True, verbose_name='i18n Key')),
('type_field', models.CharField(blank=True, default=None, help_text='Model field where metadata is stored; blank stores in metadata dictionary.', max_length=255, null=True, verbose_name='Field Name')),
('input_size', models.IntegerField(default=6, help_text='Size of input fields for values of this Metadata Type.', verbose_name='Input Size')),
('input_type', models.CharField(blank=True, help_text='Type of input fields for values of this Metadata Type.', max_length=255, null=True, verbose_name='Input Type')),
('default_value', models.CharField(blank=True, help_text='Default value for this Metadata Type.', max_length=255, verbose_name='Default Value')),
('prefix', models.CharField(blank=True, help_text='Prefix text appearing before values of this Metadata Type.', max_length=255, verbose_name='Prefix')),
('postfix', models.CharField(blank=True, help_text='Postfix text appearing after values of this Metadata Type.', max_length=255, verbose_name='Postfix')),
('for_context', models.CharField(choices=[('S', 'Study'), ('L', 'Line'), ('A', 'Assay')], help_text='Type of EDD Object this Metadata Type may be added to.', max_length=8, verbose_name='Context')),
('type_class', models.CharField(blank=True, help_text='Type of data saved for this Metadata Type; blank saves a string type.', max_length=255, null=True, verbose_name='Type Class')),
('uuid', models.UUIDField(editable=False, help_text='Unique identifier for this Metadata Type.', unique=True, verbose_name='UUID')),
('group', models.ForeignKey(blank=True, help_text='Group for this Metadata Type', null=True, on_delete=PROTECT, to='main.MetadataGroup', verbose_name='Group')),
],
options={
'db_table': 'metadata_type',
},
bases=(models.Model, main.models.EDDSerialize),
),
migrations.CreateModel(
name='Update',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mod_time', models.DateTimeField(auto_now_add=True, help_text='Timestamp of the update.', verbose_name='Modified')),
('path', models.TextField(blank=True, help_text='URL path used to trigger this update.', null=True, verbose_name='URL Path')),
('origin', models.TextField(blank=True, help_text='Host origin of the request triggering this update.', null=True, verbose_name='Origin Host')),
('mod_by', models.ForeignKey(editable=False, help_text='The user performing the update.', null=True, on_delete=PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'db_table': 'update_info',
},
bases=(models.Model, main.models.EDDSerialize),
),
migrations.CreateModel(
name='UserPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('permission_type', models.CharField(choices=[('N', 'None'), ('R', 'Read'), ('W', 'Write')], default='N', help_text='Type of permission.', max_length=8, verbose_name='Permission')),
('user', models.ForeignKey(help_text='User this permission applies to.', on_delete=CASCADE, related_name='userpermission_set', to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'db_table': 'study_user_permission',
},
),
migrations.CreateModel(
name='WorklistColumn',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('heading', models.CharField(blank=True, help_text='Column header text.', max_length=255, null=True, verbose_name='Heading')),
('default_value', models.CharField(blank=True, help_text='Default value for this column.', max_length=255, null=True, verbose_name='Default Value')),
('help_text', models.TextField(blank=True, help_text='UI text to display explaining how to modify this column.', null=True, verbose_name='Help Text')),
('ordering', models.IntegerField(blank=True, help_text='Order this column will appear in worklist export.', null=True, unique=True, verbose_name='Ordering')),
('meta_type', models.ForeignKey(blank=True, help_text='Type of Metadata in this column.', null=True, on_delete=PROTECT, to='main.MetadataType', verbose_name='Metadata Type')),
],
options={
'db_table': 'worklist_column',
},
),
migrations.CreateModel(
name='Assay',
fields=[
('object_ref', models.OneToOneField(on_delete=CASCADE, parent_link=True, primary_key=True, serialize=False, to='main.EDDObject')),
('experimenter', models.ForeignKey(blank=True, help_text='Test App User that set up the experimental conditions of this Assay.', null=True, on_delete=PROTECT, related_name='assay_experimenter_set', to=settings.AUTH_USER_MODEL, verbose_name='Experimenter')),
],
options={
'db_table': 'assay',
},
bases=('main.eddobject',),
),
migrations.CreateModel(
name='CarbonSource',
fields=[
('object_ref', models.OneToOneField(on_delete=CASCADE, parent_link=True, primary_key=True, serialize=False, to='main.EDDObject')),
('labeling', models.TextField(help_text='Description of labeling isotopes in this Carbon Source.', verbose_name='Labeling')),
('volume', models.DecimalField(decimal_places=5, help_text='Volume of solution added as a Carbon Source.', max_digits=16, verbose_name='Volume')),
],
options={
'db_table': 'carbon_source',
},
bases=('main.eddobject',),
),
migrations.CreateModel(
name='GeneIdentifier',
fields=[
('measurementtype_ptr', models.OneToOneField(auto_created=True, on_delete=CASCADE, parent_link=True, primary_key=True, serialize=False, to='main.MeasurementType')),
('location_in_genome', models.TextField(blank=True, help_text='Location of this Gene in the organism genome.', null=True, verbose_name='Location')),
('positive_strand', models.BooleanField(default=True, help_text='Flag indicating if transcript is positive (sense).', verbose_name='Positive')),
('location_start', models.IntegerField(blank=True, help_text='Offset location for gene start.', null=True, verbose_name='Start')),
('location_end', models.IntegerField(blank=True, help_text='Offset location for gene end.', null=True, verbose_name='End')),
('gene_length', models.IntegerField(blank=True, help_text='Length of the gene nucleotides.', null=True, verbose_name='Length')),
],
options={
'db_table': 'gene_identifier',
},
bases=('main.measurementtype',),
),
migrations.CreateModel(
name='Line',
fields=[
('control', models.BooleanField(default=False, help_text='Flag indicating whether the sample for this Line is a control.', verbose_name='Control')),
('object_ref', models.OneToOneField(on_delete=CASCADE, parent_link=True, primary_key=True, related_name='+', serialize=False, to='main.EDDObject')),
('contact_extra', models.TextField(help_text='Additional field for contact information about this Line (e.g. contact is not a User of Test App).', verbose_name='Contact (extra)')),
('carbon_source', models.ManyToManyField(blank=True, db_table='line_carbon_source', help_text='Carbon source(s) used in this Line.', to='main.CarbonSource', verbose_name='Carbon Source(s)')),
('contact', models.ForeignKey(blank=True, help_text='TeselaGen User to contact about this Line.', null=True, on_delete=PROTECT, related_name='line_contact_set', to=settings.AUTH_USER_MODEL, verbose_name='Contact')),
('experimenter', models.ForeignKey(blank=True, help_text='Test App User that set up the experimental conditions of this Line.', null=True, on_delete=PROTECT, related_name='line_experimenter_set', to=settings.AUTH_USER_MODEL, verbose_name='Experimenter')),
],
options={
'db_table': 'line',
},
bases=('main.eddobject',),
),
migrations.CreateModel(
name='Metabolite',
fields=[
('measurementtype_ptr', models.OneToOneField(auto_created=True, on_delete=CASCADE, parent_link=True, primary_key=True, serialize=False, to='main.MeasurementType')),
('charge', models.IntegerField(help_text='The charge of this molecule.', verbose_name='Charge')),
('carbon_count', models.IntegerField(help_text='Count of carbons present in this molecule.', verbose_name='Carbon Count')),
| |
road in one of a set of positions'''
def __init__(self, game, positions, *, no_cost = False):
self.game = game
self.selected = 0
self.positions = positions
self.no_cost = no_cost
if self.can_cancel():
self.display_message = \
'[Arrows]: Select position [Enter]: Build road [Space]: Cancel'
else:
self.display_message = \
'[Arrows]: Select position [Enter]: Build road'
def player_input(self, ch):
n_positions = len(self.positions)
if ch == curses.KEY_LEFT or ch == curses.KEY_UP:
self.selected = (self.selected - 1) % n_positions
elif ch == curses.KEY_RIGHT or ch == curses.KEY_DOWN:
self.selected = (self.selected + 1) % n_positions
elif ch == ord('\n'):
self.select_position(*self.positions[self.selected])
return DIE
elif ch == ord(' ') and self.can_cancel():
return DIE
else:
return PASS
return CONSUME
def select_position(self, pos, edge):
self.game.place_road(pos, edge, no_cost = self.no_cost)
def can_cancel(self):
return not self.game.phase.startswith('setup')
def draw_state(self, y, x):
pos, edge = self.positions[self.selected]
self.game.draw_road_at(y, x, pos, edge,
curses.A_BOLD | curses.A_REVERSE |
curses.color_pair(COLOR_PLAYER0 + self.game.self_player))
class BuildTwoRoads(State):
display_message = '[Arrows]: Select position [Enter]: Build road'
def __init__(self, game):
self.game = game
self.roads_left = 2
self.state = self.next_state()
def player_input(self, ch):
if self.state is None:
return DIE
if ch == ord(' '):
return PASS
res = self.state.player_input(ch)
if res == DIE:
self.roads_left -= 1
self.state = self.next_state()
return DIE if self.roads_left == 0 else CONSUME
return res
def next_state(self):
if self.roads_left:
places = self.game.road_places()
if not places:
return
return BuildRoad(self.game, self.game.road_places(), no_cost = True)
def draw_state(self, y, x):
return self.state.draw_state(y, x)
class HalveResources(State):
'''Forces targeted users to discard some resources'''
display_message = '[Arrows]: Select resources [Enter]: Discard'
priority = PRI_HIGH
def __init__(self, game, players):
'''
players is a dict: { player index: discards required }
'''
self.game = game
self.required = players
self.discards = {}
if game.self_player in players:
self.ui = SelectResourcesUI('You must discard {n} resources',
players[game.self_player],
game.cashan.players[game.self_player].resources)
def accepts_input(self, player):
selfp = self.game.self_player
# If the human player is part of this, let them go first so that
# AI-generated action messages do not interrupt.
if selfp in self.required and selfp not in self.discards:
return player == selfp
return player in self.required and player not in self.discards
def player_input(self, ch):
if self.ui.player_input(ch) == CONSUME:
return CONSUME
elif ch == ord('\n'):
req = self.required[self.game.self_player]
if sum(self.ui.resources.values()) == req:
if self.set_discard(self.game.self_player, self.ui.resources):
return DIE
return CONSUME
else:
return PASS
return CONSUME
def set_discard(self, player, resources):
'''
Sets the amounts of resources to be discarded for the given player.
If this is the last player to set a discard set, it returns True
and triggers discard and further action in the game state.
'''
req = self.required[player]
dis = sum(resources.values())
if req != dis:
raise Exception('set_discard got wrong resource count: '
'expected {}; got {}'.format(req, dis))
self.discards[player] = resources
self.game.player_set_discard(player, resources)
return self.finished()
def finished(self):
if len(self.required) == len(self.discards):
for p, r in self.discards.items():
self.game.player_discard(p, r)
self.game.activate_robber(False)
return True
return False
def draw_state(self, y, x):
self_p = self.game.self_player
if self_p in self.required and self_p not in self.discards:
self.ui.draw(self.game.stdscr, y, x)
class SelectResourcesUI:
def __init__(self, message, max, bounds):
self.message = message
self.max = max
self.bounds = bounds
self.resources = resource_cards(0)
self.selected = 0
def draw(self, win, y, x):
w = 50
h = 10
sub = win.subwin(h, w, (y - h) // 2, (x - w) // 2)
sub.clear()
sub.border()
sub.addstr(2, 3, self.message.format(n = self.max))
for i, r in enumerate(RESOURCES):
name, color = resource_name(r)
sub.addstr(4, 5 + i * 9, name.strip(), curses.color_pair(color))
sub.addstr(5, 6 + i * 9, '{:>2}'.format(self.resources[r]),
curses.A_REVERSE if self.selected == i else 0)
def get_resource(self, index):
return RESOURCES[index]
def player_input(self, ch):
if ch == curses.KEY_LEFT:
self.selected = (self.selected - 1) % 5
elif ch == curses.KEY_RIGHT:
self.selected = (self.selected + 1) % 5
elif ch == curses.KEY_UP:
r = self.get_resource(self.selected)
if self.bounds[r] != self.resources[r] and \
sum(self.resources.values()) < self.max:
self.resources[r] += 1
elif ch == curses.KEY_DOWN:
r = self.get_resource(self.selected)
if self.resources[r] > 0:
self.resources[r] -= 1
else:
return PASS
return CONSUME
class SelectCell(State):
'''Selects a cell on the map and calls a callback with the result'''
def __init__(self, game, action, callback, deny = None):
self.game = game
self.action = action
self.callback = callback
self.selected = (0, 0)
self.deny = deny
if deny == (0, 0):
self.selected = (0, -1)
self.display_message = '[Arrows]: Select cell [Enter]: {}'.format(action)
def player_input(self, ch):
x, y = self.selected
if ch == curses.KEY_LEFT:
x -= 1
elif ch == curses.KEY_RIGHT:
x += 1
elif ch == curses.KEY_UP:
y -= 1
elif ch == curses.KEY_DOWN:
y += 1
elif ch == ord('\n'):
self.select_position(self.selected)
return DIE
else:
return PASS
if self.deny != (x, y) and self.game.cashan.cell_exists((x, y)):
self.selected = (x, y)
return CONSUME
def select_position(self, pos):
self.callback(pos)
def draw_state(self, y, x):
cell = self.game.cashan.grid[self.selected]
self.game.draw_name_at(y, x, self.selected, cell, curses.A_REVERSE)
class SelectResource(State):
display_message = '[Arrows]: Select resource [Enter]: Take resources'
priority = PRI_HIGH
def __init__(self, game):
self.game = game
self.selected = 0
def player_input(self, ch):
selected = self.selected
if ch == curses.KEY_UP:
self.selected = (selected - 1) % len(RESOURCES)
elif ch == curses.KEY_DOWN:
self.selected = (selected + 1) % len(RESOURCES)
elif ch == ord('\n'):
self.select_resource(RESOURCES[self.selected])
return DIE
else:
return PASS
return CONSUME
def select_resource(self, resource):
self.game.take_all_resource(resource)
def draw_state(self, y, x):
w = 50
h = 15
sub = self.game.stdscr.subwin(h, w, (y - h) // 2, (x - w) // 2)
sub.clear()
sub.border()
sub.addstr(2, 5, 'Select a resource')
sub.addstr(4 + self.selected, 3, '*')
for i, r in enumerate(RESOURCES):
ScreenWriter(sub, h, w, 4 + i, 5).write_resource_name(r)
class SelectResourceCards(State):
display_message = '[Arrows]: Select resources [Enter]: Acquire resources'
priority = PRI_HIGH
def __init__(self, game):
self.game = game
self.n_resources = n = min(2, sum(game.cashan.resources.values()))
self.ui = SelectResourcesUI('Select {n} resources',
n, game.cashan.resources)
def player_input(self, ch):
if self.ui.player_input(ch) == CONSUME:
return CONSUME
elif ch == ord('\n'):
if sum(self.ui.resources.values()) == self.n_resources:
self.select_resources(self.ui.resources)
return DIE
else:
return PASS
return CONSUME
def select_resources(self, resources):
self.game.acquire_resources(resources)
def draw_state(self, y, x):
self.ui.draw(self.game.stdscr, y, x)
class StartTurn(State):
'''Represents the beginning of a normal turn'''
def __init__(self, game):
self.game = game
self.rolled = False
self.played = False
self.bought = defaultdict(int)
def player_input(self, ch):
if ch == ord('r') and not self.rolled:
self.roll()
elif ch == ord('b'):
self.game.push_state(Buy(self.game, self))
elif ch == ord('t') and self.can_trade():
self.game.push_state(Trade(self.game, self))
elif ch == ord('p') and self.can_play():
self.game.push_state(Play(self.game, self))
elif ch == ord('v') and self.can_declare():
self.game.declare_victory()
elif ch == ord('e') and self.can_end():
return DIE
else:
return PASS
return CONSUME
def roll(self):
self.rolled = True
self.game.player_roll()
def can_buy(self):
player = self.game.current_player
return any(player.can_buy(i) for i in purchasable())
def can_declare(self):
return self.game.cashan.count_victory_points(
self.game.current_player) >= 10
def can_end(self):
return self.rolled
def can_play(self):
return not self.played and any(self.is_playable(card)
for card in self.game.current_player.development_cards.keys())
def is_playable(self, card):
hand = self.game.current_player.development_cards
return card is not VictoryPoint and hand[card] - self.bought[card] > 0
def can_roll(self):
return not self.rolled
def can_trade(self):
return any(self.game.current_player.resources.values())
def buy_item(self, item):
if item is Road:
self.game.push_state(BuildRoad(self.game,
self.game.road_places()))
elif item is Settlement:
self.game.push_state(BuildSettlement(self.game,
self.game.settlement_places()))
elif item is City:
self.game.push_state(BuildCity(self.game,
self.game.city_places()))
elif item is DevelopmentCard:
self.bought[self.game.buy_development_card()] += 1
else:
raise Exception('invalid item: {!r}'.format(item))
def play_card(self, card):
self.played = True
self.game.play_card(card)
def propose_trade(self, mode, n, resource):
'''
Proposes a trade of given mode ('offer' or 'request').
'''
return self.game.propose_trade(mode, n, resource)
def perform_maritime_trade(self, give, recv):
'''Trades give: (n, resource) for recv: (n, resource) with the "bank"'''
self.game.trade_with_bank(give, recv)
@property
def display_message(self):
msg = []
if self.can_roll():
msg.append('[R]oll dice')
if self.can_buy():
msg.append('[B]uy')
else:
# Always show 'Buy' so that players can check costs
msg.append('[B]: Check costs')
if self.can_trade():
msg.append('[T]rade')
if self.can_play():
msg.append('[P]lay')
if self.can_declare():
msg.append('[V]: Declare victory')
if self.can_end():
msg.append('[E]nd turn')
return ' '.join(msg)
class TradeOffer(State):
priority = PRI_HIGH
def __init__(self, game, mode, n, resource):
self.game = game
self.mode = mode
self.n = n
self.resource = resource
# [ (player, n, resource), ... ]
self.offers = []
# { player: ( 'offered' | 'rejected' ), ... }
self.states = {}
owner = game.player_turn == game.self_player
self.ui = TradeOwnerUI(self) if owner else TradeOtherUI(self)
@property
def display_message(self):
return self.ui.display_message
def accepts_input(self, player):
return True
def player_input(self, ch):
return self.ui.player_input(ch)
def accept_offer(self, offer):
player, n, resource = offer
if self.mode == 'offer':
self.game.trade_with_player(player, | |
import os
def qeSubparser(subparsers):
# --------------------------------------------------------------------------
# Quantum ESPRESSO
# --------------------------------------------------------------------------
subparser = subparsers.add_parser("qe", help="using quantum espresso as calculator")
gp = subparser.add_argument_group(title="overall running control")
gp.add_argument("-r", "--runtype", type=int, default=0,
choices=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
help="choices of runtype. 0->static_run; 1->relax; 2->vc-relax; 3->cubic-cell; 4->hexagonal-cell; 5->tetragonal-cell; 6->neb; 7->dfpt; 8->phonopy; 9->pp.x; 10->abc; 11->converge")
gp.add_argument("--static", type=str, default="all",
choices=["all", "scf"],
help="in case of all(default), run scf, nscf, bands in a single run; in case of scf, run scf only")
gp.add_argument("-d", "--directory", type=str, default="matflow-running",
help="Directory for the running.")
# run option
gp.add_argument("--runopt", type=str, default="gen",
choices=["gen", "run", "genrun"],
help="Generate or run or both at the same time.")
gp.add_argument("--auto", type=int, default=3,
choices=[0, 1, 2, 3],
help="auto:0 nothing, 1: copying files to server, 2: copying and executing, 3: pymatflow run inserver with direct submit, in order use auto=1, 2, you must make sure there is a working ~/.pymatflow/server_[pbs|llhpc].conf")
# -----------------------------------------------------------------
# run params
# -----------------------------------------------------------------
gp.add_argument("--mpi", type=str, default="",
help="MPI command: like 'mpirun -np 4'")
gp.add_argument("--server", type=str, default="pbs",
choices=["pbs", "llhpc", "tianhe2", "cdcloud"],
help="type of remote server, can be pbs or llhpc, cdcloud")
gp.add_argument("--jobname", type=str, default="matflow-job",
help="jobname on the pbs server")
gp.add_argument("--nodes", type=int, default=1,
help="Nodes used in server")
gp.add_argument("--ppn", type=int, default=32,
help="ppn of the server")
gp.add_argument("--queue", type=str, default=None,
help="the queue to submit to job, default is not set")
# llhpc
gp.add_argument("--partition", type=str, default="free",
help="choose partition to submit job")
gp.add_argument("--ntask", type=int, default=24,
help="choose task number")
gp.add_argument("--stdout", type=str, default="slurm.out",
help="set standard out")
gp.add_argument("--stderr", type=str, default="slurm.err",
help="set standard err")
# structure file: either xyz or cif. they are exclusive
# actually this can be put in the main subparser, but it will make the command not like git sub-cmmand
# so we put them in every subsubparser
gp = subparser.add_mutually_exclusive_group(required=True) # at leaset one of cif and xyz is provided
# argparse will make sure only one of argument in structfile(xyz, cif) appear on command line
gp.add_argument("--xyz", type=str, default=None,
help="The xyz structure file with the second line specifying the cell parameter")
gp.add_argument("--cif", type=str, default=None,
help="The cif structure file")
gp.add_argument("--xsd", type=str, default=None,
help="The xsd structure file")
gp.add_argument("--xsf", type=str, default=None,
help="The xsf structure file")
gp.add_argument("--images", type=str, nargs="+",
help="the image stucture file(--images first.cif final.xsd), can only be cif, xsd, xsd, or xyz(second line is cell parameter) format")
# potential file
gp = subparser.add_argument_group(title="pseudopotential")
gp.add_argument("--pot", type=str, default="./",
help="specify the path to the dir containing all the needed pseudopotential, default behavior is find them in the current directory automatically. if you pass 'auto' to it, matflow will get the pots automatically(need simple configuration, see manual)")
gp.add_argument("--pot-type", type=str, default="sssp_efficiency",
choices=["paw_pbe", "PAW_PBE", "sssp_efficiency", "SSSP_EFFICIENCY", "sssp_precision", "SSSP_PRECISION"],
help="specify type of pseudo potentials to prepare, when --pot auto")
# -------------------------------------------------------------------
# scf related parameters
# -------------------------------------------------------------------
# &control
gp = subparser.add_argument_group(title="pw.x->control")
gp.add_argument("--etot-conv-thr", type=float, default=None, #1.0e-4,
help="convergence threshold of energy for geometric optimization")
gp.add_argument("--forc-conv-thr", type=float, default=None, #1.0e-3,
help="convergence threshold for force in optimization,(usually it is more important than energy)")
gp.add_argument("--tstress", type=str, default=".false.",
choices=[".true.", ".false."],
help="calculate stress. default=.false.")
# &system
gp = subparser.add_argument_group(title="pw.x->system")
gp.add_argument("--occupations", type=str, default="smearing",
choices=["smearing", "tetrahedra", "tetrahedra_lin", "tetrahedra_opt", "fixed", "from_input"],
help="Occupation method for the calculation.")
gp.add_argument("--smearing", type=str, default="gaussian",
choices=["gaussian", "methfessel-paxton", "marzari-vanderbilt", "fermi-dirac"],
help="Smearing type for occupations by smearing, default is gaussian in this script")
gp.add_argument("--degauss", type=float, default=0.001,
help="Value of the gaussian spreading (Ry) for brillouin-zone integration in metals.(defualt: 0.001 Ry)")
gp.add_argument("--nbnd", type=int, default=None,
help="Number of electronic states (bands) to be calculated")
gp.add_argument("--ecutwfc",
type=int, default=100)
gp.add_argument("--ecutrho", type=int, default=None,
help="Kinetic energy cutoff for charge density and potential in unit of Rydberg, default value: None")
gp.add_argument("--vdw-corr", help="vdw_corr = dft-d, dft-d3, ts-vdw, xdm", type=str, default="none")
gp.add_argument("--tot-charge", type=int, default=None,
help="Total charge of the system. Useful for simulations with charged cells. tot_charge=+1 means one electron missing from the system, tot_charge=-1 means one additional electron, and so on.")
gp.add_argument("--nosym", type=str, default=None,
choices=[".true.", ".false."],
help="Do not use this option unless you know exactly what you want and what you get. May be useful in the following cases: - in low-symmetry large cells, if you cannot afford a k-point grid with the correct symmetry - in MD simulations - in calculations for isolated atoms")
gp.add_argument("--nosym-evc", type=str, default=None,
choices=[".true.", ".false."],
help="if (.TRUE.) symmetry is not used, and k points are forced to have the symmetry of the Bravais lattice;")
gp.add_argument("--noinv", type=str, default=None,
choices=[".true.", ".false"],
help="if (.TRUE.) disable the usage of k => -k symmetry(time reversal) in k-point generation")
# magnetic related parameters
gp.add_argument("--nspin", type=int, default=None,
choices=[1, 2],
help="choose either 1 or 2, and 4 should not be used as suggested by pwscf official documentation.")
gp.add_argument("--starting-magnetization", type=float, nargs="+", default=None,
help="starting_magnetization(i), i=1,ntyp -> Starting spin polarization on atomic type i in a spin polarized calculation. Values range between -1 (all spins down for the valence electrons of atom type i) to 1 (all spins up).")
gp.add_argument("--noncolin", type=str, default=None,
choices=[".true.", ".false."],
help="if .true. the program will perform a noncollinear calculation.")
gp.add_argument("--lspinorb", type=str, default=None,
choices=[".true.", ".false."],
help="if .TRUE. the noncollinear code can use a pseudopotential with spin-orbit.")
# DFT+U
gp.add_argument("--lda-plus-u", type=str, default=None,
choices=[".true.", ".false."],
help="DFT+U (formerly known as LDA+U) currently works only for a few selected elements. Modify Modules/set_hubbard_l.f90 and PW/src/tabd.f90 if you plan to use DFT+U with an element that is not configured there.")
gp.add_argument("--lda-plus-u-kind", type=int, default=None,
choices=[0, 1, 2],
help="0 DFT+U simplified version of Cococcioni and de Gironcoli, using Hubbard_U; 1 DFT+U rotationally invariant scheme of Liechtenstein et al.,using Hubbard U and Hubbard J; 2 DFT+U+V simplified version of Campo Jr and Cococcioni, using Hubbard V")
gp.add_argument("--hubbard-u", type=float, nargs="+", default=None,
help="Hubbard_U(i): U parameter (eV) for species i, DFT+U calculation")
gp.add_argument("--hubbard-j0", type=float, nargs="+", default=None,
help="Hubbard_J0(i): J0 parameter (eV) for species i, DFT+U+J calculation")
gp.add_argument("--hubbard-alpha", type=float, default=None,
help="Hubbard_alpha(i) is the perturbation (on atom i, in eV) used to compute U with the linear-response method of Cococcioni and de Gironcoli(only for lda_plus_u_kind=0)")
gp.add_argument("--hubbard-beta", type=float, default=None,
help="Hubbard_beta(i) is the perturbation (on atom i, in eV) used to compute J0 with the linear-response method of Cococcioni and de Gironcoli(only for lda_plus_u_kind=0)")
gp.add_argument("--u-projection-type", type=str, default=None,
choices=["atomic", "ortho-atomic", "norm-atomic", "file", "pseudo"],
help="Only active when lda_plus_U is .true., specifies the type of projector on localized orbital to be used in the DFT+U scheme. default is atomic")
# Hybrid functional
gp.add_argument("--input-dft", type=str, default="pbe",
choices=["pbe", "pbe0", "b3lyp", "hse", "vdw-DF"],
help="Exchange-correlation functional: eg 'PBE', 'BLYP' etc")
gp.add_argument("--ace", type=str, default=None,
choices=[".true.", ".false."],
help="Use Adaptively Compressed Exchange operator as in <NAME>, J. Chem. Theory Comput. 2016, 12, 2242--2249, Set to false to use standard Exchange (much slower)")
gp.add_argument("--exx-fraction", type=float, default=None,
help="Fraction of EXX for hybrid functional calculations. In the case of input_dft='PBE0', the default value is 0.25, while for input_dft='B3LYP' the exx_fraction default value is 0.20.")
gp.add_argument("--screening-parameter", type=float, default=None,
help="screening_parameter for HSE like hybrid functionals., default is 0.106")
gp.add_argument("--exxdiv-treatment", type=str, default=None,
choices=["gygi-baldereschi", "vcut_spherical", "vcut_ws", "none"],
help="Specific for EXX. It selects the kind of approach to be used for treating the Coulomb potential divergencies at small q vectors.")
gp.add_argument("--x-gamma-extrapolation", type=str, default=None,
choices=[".true.", ".false."],
help="Specific for EXX. If .true., extrapolate the G=0 term of the potential ")
gp.add_argument("--ecutvcut", type=float, default=None,
help="Reciprocal space cutoff for correcting Coulomb potential divergencies at small q vectors.")
gp.add_argument("--nqx", type=float, nargs=3, default=[None, None, None],
help="Three-dimensional mesh for q (k1-k2) sampling of the Fock operator (EXX). Can be smaller than the number of k-points. Currently this defaults to the size of the k-point mesh used. In QE =< 5.0.2 it defaulted to nqx1=nqx2=nqx3=1.")
# &electrons
gp = subparser.add_argument_group(title="pw.x->electrons")
gp.add_argument("--electron-maxstep", type=int, default=None,
help="maximum number of iterations in a scf step")
gp.add_argument("--conv-thr", type=float, default=1.0e-6,
help="the conv_thr for scf, when doing geometric optimization better use a strict covnergec for scf")
gp.add_argument("--mixing-beta", type=float, default=None,
help="mixing factor for self-consistency, default is 0.7")
gp.add_argument("--mixing-ndim", type=float, default=None,
help="number of iterations used in mixing scheme. If you are tight with memory, you may reduce it to 4 or so.")
gp.add_argument("--diagonalization", type=str, default=None,
choices=["david", "cg", "ppcg", "paro"],
help="Available options are: david cg ppcg paro")
gp.add_argument("--scf-must-converge", type=str, default=None,
choices=[".true.", ".false."],
help="If .false. do not stop molecular dynamics or | |
def __str__(self):
return f"{self.name} ({self.id})"
@classmethod
def get_best_available(cls):
return (
cls.objects.filter(status=cls.STATUS_ONLINE).order_by("-priority").first()
)
@property
def sftp_storage(self) -> Union[SFTPStorage, None]:
@backoff.on_exception(
backoff.expo, (socket.gaierror,), max_tries=3, jitter=backoff.full_jitter
)
def _connect(_storage_instance: SFTPStorage):
"""
Attempt to connect to SFTP server, with retries.
"""
# Accessing the .sftp property ensures the connection is still open, reopens it if it's not
_ = _storage_instance.sftp
if not self.available:
msg = f"Cannot access storage for ComputeResource {self.id} - status='{self.status}'"
if self.status == ComputeResource.STATUS_DECOMMISSIONED:
raise ComputeResourceDecommissioned(msg)
raise Exception(msg)
if CACHE_SFTP_CONNECTIONS:
_storage_instance: SFTPStorage = CACHED_SFTP_STORAGE_CLASS_INSTANCES.get(
self.id, None
)
if (
_storage_instance is not None
and _storage_instance._ssh.get_transport() is not None
):
if _storage_instance._ssh.get_transport().is_active():
# This ensures connection is still open, reopens it if it's not
_connect(_storage_instance)
return _storage_instance
else:
_storage_instance.sftp.close()
storage_class = get_storage_class(
SCHEME_STORAGE_CLASS_MAPPING.get("laxy+sftp", None)
)
host = self.hostname
port = self.port
private_key = self.private_key
username = self.extra.get("username")
params = dict(
port=port,
username=username,
pkey=RSAKey.from_private_key(StringIO(private_key)),
)
# storage = SFTPStorage(host=host, params=params)
storage: SFTPStorage = storage_class(host=host, params=params)
_connect(
storage
) # Do this to ensure we can connect before caching the SFTPStorage class
CACHED_SFTP_STORAGE_CLASS_INSTANCES[self.id] = storage
return storage
def ssh_client(self, *args, **kwargs) -> paramiko.SSHClient:
"""
Return an SSHClient instance connected to the ComputeResource.
eg.
with compute_resource.ssh_client() as client:
stdin, stdout, stderr = client.exec_command('ls')
Be sure to use 'with' so the context manager can close the connection when
you are finished with the client.
:return: A paramiko SSHClient instance connected to the compute resource.
:rtype: paramiko.SSHClient
"""
remote_username = self.extra.get("username", "laxy")
# TODO: Cache connections to the same ComputeResource
client = SSHClient()
client.set_missing_host_key_policy(AutoAddPolicy)
# client.load_system_host_keys()
client.connect(
self.hostname,
port=self.port,
username=remote_username,
pkey=RSAKey.from_private_key(StringIO(self.private_key)),
**kwargs,
)
return client
def running_jobs(self):
"""
Returns a Django QuerySet of all the jobs currently running or
on this compute resource.
:return: The pending Jobs
:rtype: django.db.models.query.QuerySet
"""
return Job.objects.filter(compute_resource=self, status=Job.STATUS_RUNNING)
def pending_jobs(self):
"""
Returns a Django QuerySet of all the jobs currently running or
waiting to run on this compute resource.
:return: The pending Jobs
:rtype: django.db.models.query.QuerySet
"""
return Job.objects.filter(
compute_resource=self,
status__in=[Job.STATUS_CREATED, Job.STATUS_RUNNING, Job.STATUS_STARTING],
)
def dispose(self):
"""
Terminates the ComputeResource such that it can no longer be used.
eg, may permanently terminate the associated AWS instance.
Returns False if resource isn't disposable.
:return:
:rtype:
"""
if self.disposable:
orchestration.dispose_compute_resource.apply_async(
args=({"compute_resource_id": self.id},)
)
else:
return False
@property
def available(self):
return self.status == ComputeResource.STATUS_ONLINE
@property
def private_key(self):
return base64.b64decode(self.extra.get("private_key")).decode("ascii")
@property
def hostname(self):
return self.host.split(":")[0]
@property
def port(self) -> int:
"""
Return the port associated with the host, as a int.
:return: The port (eg 22)
:rtype: int
"""
if ":" in self.host:
return int(self.host.split(":").pop())
else:
return "22"
@property
def jobs_dir(self) -> str:
"""
Return the base path for job storage on the host, eg /scratch/jobs/
:return: The path where jobs are stored on the ComputeResource.
:rtype: str
"""
fallback_base_dir = getattr(settings, "DEFAULT_JOB_BASE_PATH", "/tmp")
return self.extra.get("base_dir", fallback_base_dir)
@property
def queue_type(self):
return self.extra.get("queue_type", None)
@queue_type.setter
def queue_type(self, queue_type: str):
self.extra["queue_type"] = queue_type
self.save()
@reversion.register()
class Job(Expires, Timestamped, UUIDModel):
class ExtraMeta:
patchable_fields = ["params", "metadata"]
"""
Represents a processing job (typically a long running remote job managed
by a Celery task queue).
"""
STATUS_CREATED = "created"
STATUS_HOLD = "hold"
STATUS_STARTING = "starting"
STATUS_RUNNING = "running"
STATUS_FAILED = "failed"
STATUS_CANCELLED = "cancelled"
STATUS_COMPLETE = "complete"
JOB_STATUS_CHOICES = (
(STATUS_CREATED, "object_created"),
(STATUS_HOLD, "hold"),
(STATUS_STARTING, "starting"),
(STATUS_RUNNING, "running"),
(STATUS_FAILED, "failed"),
(STATUS_CANCELLED, "cancelled"),
(STATUS_COMPLETE, "complete"),
)
owner = ForeignKey(
User, on_delete=models.CASCADE, blank=True, null=True, related_name="jobs"
)
status = CharField(
max_length=64, choices=JOB_STATUS_CHOICES, default=STATUS_CREATED
)
exit_code = IntegerField(blank=True, null=True)
remote_id = CharField(max_length=64, blank=True, null=True)
# jsonfield or native Postgres
# params = JSONField(load_kwargs={'object_pairs_hook': OrderedDict})
# django-jsonfield or native Postgres
params = JSONField(default=OrderedDict)
# Intended for non-parameter metadata about the job
# - eg, post-run extracted results that might be used in the web frontend
# (eg {"results": {"predicted-strandedness": "forward", "strand-bias": 0.98}} )
metadata = JSONField(default=OrderedDict)
# A JSON-serializable params class that may be specialized via
# multiple-table inheritance
# params = ForeignKey(JobParams,
# blank=True,
# null=True,
# on_delete=models.SET_NULL)
input_files = ForeignKey(
"FileSet",
null=True,
blank=True,
related_name="jobs_as_input",
on_delete=models.SET_NULL,
)
output_files = ForeignKey(
"FileSet",
null=True,
blank=True,
related_name="jobs_as_output",
on_delete=models.SET_NULL,
)
compute_resource = ForeignKey(
ComputeResource,
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name="jobs",
)
completed_time = DateTimeField(blank=True, null=True)
@transaction.atomic()
def _init_filesets(self, save=True):
if not self.input_files:
self.input_files = FileSet(
name=f"Input files for job: {self.id}", path="input", owner=self.owner
)
if save:
self.input_files.save()
if not self.output_files:
self.output_files = FileSet(
name=f"Output files for job: {self.id}", path="output", owner=self.owner
)
if save:
self.output_files.save()
def save(self, *args, **kwargs):
super(Job, self).save(*args, **kwargs)
# For a single-use ('disposable') ComputeResource associated with a
# single job, we automatically name the resource based on the associated
# job UUID.
compute = self.compute_resource
if compute and compute.disposable and not compute.name:
compute.name = generate_cluster_stack_name(self)
compute.save()
def events(self):
return EventLog.objects.filter(object_id=self.id)
def log_event(self, event_type: str, message: str, extra=None) -> EventLog:
if extra is None:
extra = {}
eventlog = EventLog(
event=event_type,
message=message,
extra=extra,
user=self.owner,
object_id=self.id,
content_type=ContentType.objects.get_for_model(self),
)
eventlog.save()
return eventlog
def latest_event(self):
try:
return (
EventLog.objects.filter(object_id=self.id)
.exclude(event__exact="JOB_STATUS_CHANGED")
.latest()
)
except EventLog.DoesNotExist:
return EventLog.objects.none()
def get_files(self) -> models.query.QuerySet:
# Combine querysets
return self.input_files.get_files() | self.output_files.get_files()
@property
def abs_path_on_compute(self):
"""
DEPRECATED: Use job_path_on_compute directly (or rename this property to
something less confusing, given that Job.compute_resource is where the Job ran,
not where the files currently reside)
Returns the absolute path to the job directory on it's ComputeResource.
SSH-centric, but might map to other methods of accessing storage.
:return: An absolute path to the job directory.
:rtype: str
"""
return job_path_on_compute(self, self.compute_resource)
@property
def done(self):
"""
Returns True if the job has finished, either successfully
or unsuccessfully.
:return: True if job is no longer running.
:rtype: bool
"""
return (
self.status == Job.STATUS_COMPLETE
or self.status == Job.STATUS_CANCELLED
or self.status == Job.STATUS_FAILED
)
@transaction.atomic()
def add_files_from_tsv(self, tsv_table: Union[List[dict], str, bytes], save=True):
"""
```tsv
filepath checksum type_tags metadata
input/some_dir/table.txt md5:7d9960c77b363e2c2f41b77733cf57d4 text,csv,google-sheets {}
input/some_dir/sample1_R2.fastq.gz md5:d0cfb796d371b0182cd39d589b1c1ce3 fastq {}
input/some_dir/sample2_R2.fastq.gz md5:a97e04b6d1a0be20fcd77ba164b1206f fastq {}
output/sample2/alignments/sample2.bam md5:7c9f22c433ae679f0d82b12b9a71f5d3 bam,alignment,bam.sorted,jbrowse {}
output/sample2/alignments/sample2.bai md5:e57ea180602b69ab03605dad86166fa7 bai,jbrowse {}
```
..or a version with a location column ..
```tsv
location filepath checksum type_tags metadata
laxy+sftp://BlA4F00/Vl4F1U/input/some_dir/table.txt input/some_dir/table.txt md5:7d9960c77b363e2c2f41b77733cf57d4 text,csv,google-sheets {}
laxy+sftp://BlA4F00/Vl4F1U/input/some_dir/sample1_R2.fastq.gz input/some_dir/sample1_R2.fastq.gz md5:d0cfb796d371b0182cd39d589b1c1ce3 fastq {}
laxy+sftp://BlA4F00/Vl4F1U/input/some_dir/sample2_R2.fastq.gz input/some_dir/sample2_R2.fastq.gz md5:a97e04b6d1a0be20fcd77ba164b1206f fastq {}
laxy+sftp://BlA4F00/Vl4F1U/output/sample2/alignments/sample2.bam output/sample2/alignments/sample2.bam md5:7c9f22c433ae679f0d82b12b9a71f5d3 bam,alignment,bam.sorted,jbrowse {}
laxy+sftp://BlA4F00/Vl4F1U/output/sample2/alignments/sample2.bai output/sample2/alignments/sample2.bai md5:e57ea180602b69ab03605dad86166fa7 bai,jbrowse {}
```
:param tsv_table:
:type tsv_table:
:param save:
:type save:
:return:
:rtype:
"""
from laxy_backend.serializers import FileBulkRegisterSerializer
if isinstance(tsv_table, str) or isinstance(tsv_table, bytes):
table = rows.import_from_csv(BytesIO(tsv_table), skip_header=False)
table = json.loads(rows.export_to_json(table))
elif isinstance(tsv_table, list):
table = tsv_table
else:
raise ValueError("tsv_table must be str, bytes or a list of dicts")
in_files = []
out_files = []
self._init_filesets()
for row in table:
f = FileBulkRegisterSerializer(data=row)
if f.is_valid(raise_exception=True):
# Check if file exists by path in input/output filesets already,
# if so, update existing file
fpath = f.validated_data["path"]
fname = f.validated_data["name"]
existing = self.get_files().filter(name=fname, path=fpath).first()
if existing:
f = FileBulkRegisterSerializer(existing, data=row, partial=True)
f.is_valid(raise_exception=True)
f_obj = f.instance
if not existing:
f_obj = f.create(f.validated_data)
f_obj.owner = self.owner
if not f_obj.location and self.compute_resource is not None:
location = laxy_sftp_url(self, path=f"{f_obj.path}/{f_obj.name}")
f_obj.location = location
if save:
f_obj = f.save()
pathbits = Path(f.validated_data.get("path", "").strip("/")).parts
if pathbits and pathbits[0] == "input":
self.input_files.add(f_obj)
in_files.append(f_obj)
elif pathbits and pathbits[0] == "output":
self.output_files.add(f_obj)
out_files.append(f_obj)
else:
logger.debug(
f"Not adding file {f_obj.full_path} ({f_obj.id}) "
f"- File paths for a Job must begin with input/ or output/"
)
# raise ValueError("File paths for a Job must begin with input/ or output/")
return in_files, out_files
# Alternatives to a pre_save signal here might be using:
# https://github.com/kajic/django-model-changes which gives a more streamlined
# way of watching fields via pre/post_save signals
# or using properties on the model:
# https://www.stavros.io/posts/how-replace-django-model-field-property/
@receiver(pre_save, sender=Job)
def update_job_completed_time(sender, instance, raw, using, update_fields, **kwargs):
"""
Takes actions every time a Job is saved, so changes to certain fields
can have side effects (eg automatically setting completion time).
"""
try:
obj = sender.objects.get(pk=instance.pk)
except sender.DoesNotExist:
pass
else:
if instance.done and not obj.done and not obj.completed_time:
instance.completed_time = timezone.now()
@receiver(pre_save, sender=Job)
def job_status_changed_event_log(sender, instance, raw, using, update_fields, **kwargs):
"""
Creates an event log entry every time a Job is saved with a changed status.
"""
try:
obj = sender.objects.get(pk=instance.pk)
except sender.DoesNotExist:
pass
else:
if instance.status != obj.status:
EventLog.log(
"JOB_STATUS_CHANGED",
message=f"Job status changed: {obj.status} → {instance.status}",
user=instance.owner,
obj=obj,
extra=OrderedDict({"from": obj.status, "to": instance.status}),
)
@receiver(pre_save, sender=Job)
def job_init_filesets(sender, instance: Job, raw, using, update_fields, **kwargs):
instance._init_filesets()
@receiver(post_save, sender=Job)
def new_job_event_log(sender, instance, created, raw, using, update_fields, **kwargs):
"""
Creates | |
level, already_processed, name_):
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
showIndent(outfile, level)
outfile.write('length = %d,\n' % (self.length,))
super(NetworkIntEdge, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(NetworkIntEdge, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('length', node)
if value is not None and 'length' not in already_processed:
already_processed.append('length')
try:
self.length = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
super(NetworkIntEdge, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class NetworkIntEdge
class NetworkFloatEdge(AbstractEdge):
"""A concrete network edge implementation, with float edge."""
subclass = None
superclass = AbstractEdge
def __init__(self, about=None, meta=None, label=None, id=None, source=None, length=None, target=None, valueOf_=None):
super(NetworkFloatEdge, self).__init__(about, meta, label, id, source, length, target, )
self.length = _cast(float, length)
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if NetworkFloatEdge.subclass:
return NetworkFloatEdge.subclass(*args_, **kwargs_)
else:
return NetworkFloatEdge(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def get_length(self): return self.length
def set_length(self, length): self.length = length
def export(self, outfile, level, namespace_='', name_='NetworkFloatEdge', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='NetworkFloatEdge')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='NetworkFloatEdge'):
super(NetworkFloatEdge, self).exportAttributes(outfile, level, already_processed, namespace_, name_='NetworkFloatEdge')
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
outfile.write(' length="%s"' % self.gds_format_double(self.length, input_name='length'))
def exportChildren(self, outfile, level, namespace_='', name_='NetworkFloatEdge', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(NetworkFloatEdge, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='NetworkFloatEdge'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.length is not None and 'length' not in already_processed:
already_processed.append('length')
showIndent(outfile, level)
outfile.write('length = %e,\n' % (self.length,))
super(NetworkFloatEdge, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(NetworkFloatEdge, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('length', node)
if value is not None and 'length' not in already_processed:
already_processed.append('length')
try:
self.length = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (length): %s' % exp)
super(NetworkFloatEdge, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class NetworkFloatEdge
class AbstractNode(OptionalTaxonLinked):
"""The AbstractNode superclass is what concrete nodes inherit from by
restriction. It represents a node element much like that of
GraphML, i.e. an element that is connected into a tree by edge
elements."""
subclass = None
superclass = OptionalTaxonLinked
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, root=False, valueOf_=None):
super(AbstractNode, self).__init__(about, meta, label, id, otu, )
self.root = _cast(bool, root)
pass
def factory(*args_, **kwargs_):
if AbstractNode.subclass:
return AbstractNode.subclass(*args_, **kwargs_)
else:
return AbstractNode(*args_, **kwargs_)
factory = staticmethod(factory)
def get_root(self): return self.root
def set_root(self, root): self.root = root
def export(self, outfile, level, namespace_='', name_='AbstractNode', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractNode')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
if 'xsi:type' not in already_processed:
outfile.write(' xsi:type="AbstractNode"')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='AbstractNode'):
super(AbstractNode, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AbstractNode')
if self.root is not None and 'root' not in already_processed:
already_processed.append('root')
outfile.write(' root="%s"' % self.gds_format_boolean(self.gds_str_lower(str(self.root)), input_name='root'))
def exportChildren(self, outfile, level, namespace_='', name_='AbstractNode', fromsubclass_=False):
super(AbstractNode, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(AbstractNode, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AbstractNode'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.root is not None and 'root' not in already_processed:
already_processed.append('root')
showIndent(outfile, level)
outfile.write('root = %s,\n' % (self.root,))
super(AbstractNode, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AbstractNode, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('root', node)
if value is not None and 'root' not in already_processed:
already_processed.append('root')
if value in ('true', '1'):
self.root = True
elif value in ('false', '0'):
self.root = False
else:
raise_parse_error(node, 'Bad boolean attribute')
super(AbstractNode, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(AbstractNode, self).buildChildren(child_, node, nodeName_, True)
pass
# end class AbstractNode
class TreeNode(AbstractNode):
"""A concrete node implementation."""
subclass = None
superclass = AbstractNode
def __init__(self, about=None, meta=None, label=None, id=None, otu=None, root=False, valueOf_=None):
super(TreeNode, self).__init__(about, meta, label, id, otu, root, )
if meta is None:
self.meta = []
else:
self.meta = meta
def factory(*args_, **kwargs_):
if TreeNode.subclass:
return TreeNode.subclass(*args_, **kwargs_)
else:
return TreeNode(*args_, **kwargs_)
factory = staticmethod(factory)
def get_meta(self): return self.meta
def set_meta(self, meta): self.meta = meta
def add_meta(self, value): self.meta.append(value)
def insert_meta(self, index, value): self.meta[index] = value
def export(self, outfile, level, namespace_='', name_='TreeNode', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='TreeNode')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TreeNode'):
super(TreeNode, self).exportAttributes(outfile, level, already_processed, namespace_, name_='TreeNode')
def exportChildren(self, outfile, level, namespace_='', name_='TreeNode', fromsubclass_=False):
for meta_ in self.get_meta():
meta_.export(outfile, level, namespace_, name_='meta')
def hasContent_(self):
if (
self.meta or
super(TreeNode, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='TreeNode'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(TreeNode, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(TreeNode, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('meta=[\n')
level += 1
for meta_ in self.meta:
showIndent(outfile, level)
outfile.write('model_.Meta(\n')
meta_.exportLiteral(outfile, level, name_='Meta')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(TreeNode, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'meta':
type_name_ = child_.attrib.get('{http://www.w3.org/2001/XMLSchema-instance}type')
if type_name_ is None:
type_name_ = child_.attrib.get('type')
if type_name_ is not None:
type_names_ = type_name_.split(':')
if len(type_names_) == 1:
type_name_ = type_names_[0]
else:
type_name_ = type_names_[1]
class_ = globals()[type_name_]
obj_ = class_.factory()
obj_.build(child_)
else:
raise NotImplementedError(
'Class not implemented for <meta> element')
self.meta.append(obj_)
# end class TreeNode
class Trees(TaxaLinked):
"""A concrete container for tree objects."""
subclass = None
superclass = TaxaLinked
def __init__(self, about=None, meta=None, label=None, id=None, otus=None, network=None, tree=None, set=None, valueOf_=None):
super(Trees, self).__init__(about, meta, label, id, otus, )
if network is None:
self.network = []
else:
self.network = network
if tree is None:
self.tree = []
else:
self.tree = tree
if set is None:
self.set = []
else:
self.set = set
def factory(*args_, **kwargs_):
if Trees.subclass:
return Trees.subclass(*args_, **kwargs_)
else:
return Trees(*args_, **kwargs_)
| |
"""
Constraint generator and surrounding utilities for marginal problem in block diagonal
form by SZ operator. antisymm_sz name comes from the fact that the alpha-alpha
and beta-beta blocks use antisymmetric basis functions as is common in the
2-RDM literature. This is the most efficient non-redundant form for the SZ
symmetry adpating available.
"""
import sys
import numpy as np
from itertools import product
from representability.dualbasis import DualBasisElement, DualBasis
def gen_trans_2rdm(gem_dim, bas_dim):
bas = dict(zip(range(gem_dim), product(range(1, bas_dim + 1),
range(1, bas_dim + 1))))
bas_rev = dict(zip(bas.values(), bas.keys()))
D2ab_abas = {}
D2ab_abas_rev = {}
cnt = 0
for xx in range(bas_dim):
for yy in range(xx + 1, bas_dim):
D2ab_abas[cnt] = (xx + 1, yy + 1)
D2ab_abas_rev[(xx + 1, yy + 1)] = cnt
cnt += 1
D2ab_sbas = {}
D2ab_sbas_rev = {}
cnt = 0
for xx in range(bas_dim):
for yy in range(xx, bas_dim):
D2ab_sbas[cnt] = (xx + 1, yy + 1)
D2ab_sbas_rev[(xx + 1, yy + 1)] = cnt
cnt += 1
trans_mat = np.zeros((gem_dim, gem_dim))
cnt = 0
for xx in D2ab_abas.keys():
i, j = D2ab_abas[xx]
x1 = bas_rev[(i, j)]
x2 = bas_rev[(j, i)]
trans_mat[x1, cnt] = 1. / np.sqrt(2)
trans_mat[x2, cnt] = -1. / np.sqrt(2)
cnt += 1
for xx in D2ab_sbas.keys():
i, j = D2ab_sbas[xx]
x1 = bas_rev[(i, j)]
x2 = bas_rev[(j, i)]
if x1 == x2:
trans_mat[x1, cnt] = 1.0
else:
trans_mat[x1, cnt] = 1. / np.sqrt(2)
trans_mat[x2, cnt] = 1. / np.sqrt(2)
cnt += 1
return trans_mat, D2ab_abas, D2ab_abas_rev, D2ab_sbas, D2ab_sbas_rev
def _trace_map(tname, dim, normalization):
dbe = DualBasisElement()
for i, j in product(range(dim), repeat=2):
if i < j:
dbe.add_element(tname, (i, j, i, j), 1.0)
dbe.dual_scalar = normalization
return dbe
def trace_d2_aa(dim, Na):
dbe = DualBasisElement()
for i, j in product(range(dim), repeat=2):
if i < j:
dbe.add_element('cckk_aa', (i, j, i, j), 2.0)
dbe.dual_scalar = Na * (Na - 1)
return dbe
def trace_d2_bb(dim, Nb):
dbe = DualBasisElement()
for i, j in product(range(dim), repeat=2):
if i < j:
dbe.add_element('cckk_bb', (i, j, i, j), 2.0)
dbe.dual_scalar = Nb * (Nb - 1)
return dbe
def trace_d2_ab(dim, Na, Nb):
dbe = DualBasisElement()
for i, j in product(range(dim), repeat=2):
dbe.add_element('cckk_ab', (i, j, i, j), 1.0)
dbe.dual_scalar = Na * Nb
return dbe
def s_representability_d2ab(dim, N, M, S):
"""
Constraint for S-representability
PHYSICAL REVIEW A 72, 052505 2005
:param dim: number of spatial basis functions
:param N: Total number of electrons
:param M: Sz expected value
:param S: S(S + 1) is eigenvalue of S^{2}
:return:
"""
dbe = DualBasisElement()
for i, j in product(range(dim), repeat=2):
dbe.add_element('cckk_ab', (i, j, j, i), 1.0)
dbe.dual_scalar = N/2.0 + M**2 - S*(S + 1)
return dbe
def s_representability_d2ab_to_d2bb(dim):
"""
Constraint the antisymmetric part of the alpha-beta matrix to be equal
to the aa and bb components if a singlet
:param dim:
:return:
"""
sma = dim * (dim - 1) // 2
sms = dim * (dim + 1) // 2
uadapt, d2ab_abas, d2ab_abas_rev, d2ab_sbas, d2ab_sbas_rev = \
gen_trans_2rdm(dim**2, dim)
d2ab_bas = {}
d2aa_bas = {}
cnt_ab = 0
cnt_aa = 0
for p, q in product(range(dim), repeat=2):
d2ab_bas[(p, q)] = cnt_ab
cnt_ab += 1
if p < q:
d2aa_bas[(p, q)] = cnt_aa
cnt_aa += 1
d2ab_rev = dict(zip(d2ab_bas.values(), d2ab_bas.keys()))
d2aa_rev = dict(zip(d2aa_bas.values(), d2aa_bas.keys()))
assert uadapt.shape == (int(dim)**2, int(dim)**2)
dbe_list = []
for r, s in product(range(dim * (dim - 1) // 2), repeat=2):
if r < s:
dbe = DualBasisElement()
# lower triangle
i, j = d2aa_rev[r]
k, l = d2aa_rev[s]
# aa element should equal the triplet block aa
dbe.add_element('cckk_bb', (i, j, k, l), -0.5)
coeff_mat = uadapt[:, [r]] @ uadapt[:, [s]].T
for p, q in product(range(coeff_mat.shape[0]), repeat=2):
if not np.isclose(coeff_mat[p, q], 0):
ii, jj = d2ab_rev[p]
kk, ll = d2ab_rev[q]
dbe.add_element('cckk_ab', (ii, jj, kk, ll), 0.5 * coeff_mat[p, q])
# upper triangle . Hermitian conjugate
dbe.add_element('cckk_bb', (k, l, i, j), -0.5)
coeff_mat = uadapt[:, [s]] @ uadapt[:, [r]].T
for p, q in product(range(coeff_mat.shape[0]), repeat=2):
if not np.isclose(coeff_mat[p, q], 0):
ii, jj = d2ab_rev[p]
kk, ll = d2ab_rev[q]
dbe.add_element('cckk_ab', (ii, jj, kk, ll),
0.5 * coeff_mat[p, q])
dbe.simplify()
dbe_list.append(dbe)
elif r == s:
i, j = d2aa_rev[r]
k, l = d2aa_rev[s]
dbe = DualBasisElement()
# aa element should equal the triplet block aa
dbe.add_element('cckk_bb', (i, j, k, l), -1.0)
coeff_mat = uadapt[:, [r]] @ uadapt[:, [s]].T
for p, q in product(range(coeff_mat.shape[0]), repeat=2):
if not np.isclose(coeff_mat[p, q], 0):
ii, jj = d2ab_rev[p]
kk, ll = d2ab_rev[q]
dbe.add_element('cckk_ab', (ii, jj, kk, ll), coeff_mat[p, q])
dbe.simplify()
dbe_list.append(dbe)
return DualBasis(elements=dbe_list)
def s_representability_d2ab_to_d2aa(dim):
"""
Constraint the antisymmetric part of the alpha-beta matrix to be equal
to the aa and bb components if a singlet
:param dim:
:return:
"""
sma = dim * (dim - 1) // 2
sms = dim * (dim + 1) // 2
uadapt, d2ab_abas, d2ab_abas_rev, d2ab_sbas, d2ab_sbas_rev = \
gen_trans_2rdm(dim**2, dim)
d2ab_bas = {}
d2aa_bas = {}
cnt_ab = 0
cnt_aa = 0
for p, q in product(range(dim), repeat=2):
d2ab_bas[(p, q)] = cnt_ab
cnt_ab += 1
if p < q:
d2aa_bas[(p, q)] = cnt_aa
cnt_aa += 1
d2ab_rev = dict(zip(d2ab_bas.values(), d2ab_bas.keys()))
d2aa_rev = dict(zip(d2aa_bas.values(), d2aa_bas.keys()))
assert uadapt.shape == (int(dim)**2, int(dim)**2)
dbe_list = []
for r, s in product(range(dim * (dim - 1) // 2), repeat=2):
if r < s:
dbe = DualBasisElement()
# lower triangle
i, j = d2aa_rev[r]
k, l = d2aa_rev[s]
# aa element should equal the triplet block aa
dbe.add_element('cckk_aa', (i, j, k, l), -0.5)
coeff_mat = uadapt[:, [r]] @ uadapt[:, [s]].T
for p, q in product(range(coeff_mat.shape[0]), repeat=2):
if not np.isclose(coeff_mat[p, q], 0):
ii, jj = d2ab_rev[p]
kk, ll = d2ab_rev[q]
dbe.add_element('cckk_ab', (ii, jj, kk, ll), 0.5 * coeff_mat[p, q])
# upper triangle . Hermitian conjugate
dbe.add_element('cckk_aa', (k, l, i, j), -0.5)
coeff_mat = uadapt[:, [s]] @ uadapt[:, [r]].T
for p, q in product(range(coeff_mat.shape[0]), repeat=2):
if not np.isclose(coeff_mat[p, q], 0):
ii, jj = d2ab_rev[p]
kk, ll = d2ab_rev[q]
dbe.add_element('cckk_ab', (ii, jj, kk, ll),
0.5 * coeff_mat[p, q])
dbe.simplify()
dbe_list.append(dbe)
elif r == s:
i, j = d2aa_rev[r]
k, l = d2aa_rev[s]
dbe = DualBasisElement()
# aa element should equal the triplet block aa
dbe.add_element('cckk_aa', (i, j, k, l), -1.0)
coeff_mat = uadapt[:, [r]] @ uadapt[:, [s]].T
for p, q in product(range(coeff_mat.shape[0]), repeat=2):
if not np.isclose(coeff_mat[p, q], 0):
ii, jj = d2ab_rev[p]
kk, ll = d2ab_rev[q]
dbe.add_element('cckk_ab', (ii, jj, kk, ll), coeff_mat[p, q])
dbe.simplify()
dbe_list.append(dbe)
return DualBasis(elements=dbe_list)
def sz_representability(dim, M):
"""
Constraint for S_z-representability
Helgaker, Jorgensen, Olsen. Sz is one-body RDM constraint
:param dim: number of spatial basis functions
:param M: Sz expected value
:return:
"""
dbe = DualBasisElement()
for i in range(dim):
dbe.add_element('ck_a', (i, i), 0.5)
dbe.add_element('ck_b', (i, i), -0.5)
dbe.dual_scalar = M
return dbe
def d2ab_d1a_mapping(dim, Nb):
"""
Map the d2_spin-adapted 2-RDM to the D1 rdm
:param Nb: number of beta electrons
:param dim:
:return:
"""
return _contraction_base('cckk_ab', 'ck_a', dim, Nb, 0)
def d2ab_d1b_mapping(dim, Na):
"""
Map the d2_spin-adapted 2-RDM to the D1 rdm
:param Nb: number of beta electrons
:param dim:
:return:
"""
db = DualBasis()
for i in range(dim):
for j in range(i, dim):
dbe = DualBasisElement()
for r in range(dim):
dbe.add_element('cckk_ab', (r, i, r, j), 0.5)
dbe.add_element('cckk_ab', (r, j, r, i), 0.5)
dbe.add_element('ck_b', (i, j), -0.5 * Na)
dbe.add_element('ck_b', (j, i), -0.5 * Na)
dbe.dual_scalar = 0
# dbe.simplify()
db += dbe
return db
def d2aa_d1a_mapping(dim, Na):
"""
Map the d2_spin-adapted 2-RDM to the D1 rdm
:param Nb: number of beta electrons
:param dim:
:return:
"""
db = DualBasis()
for i in range(dim):
for j in range(i, dim):
dbe = DualBasisElement()
for r in range(dim):
# Not in the basis because always zero
if i == r or j == r:
continue
else:
sir = 1 if i < r else -1
sjr = 1 if j < r else -1
ir_pair = (i, r) if i < r else (r, i)
jr_pair = (j, r) if j < r else (r, j)
if i == j:
dbe.add_element('cckk_aa', (ir_pair[0], ir_pair[1], jr_pair[0], jr_pair[1]), sir * sjr)
else:
dbe.add_element('cckk_aa', (ir_pair[0], | |
<gh_stars>10-100
from discord.ext import commands
import itertools
from views.poll import Poll
# ----------------------------------------------------------------------------------------------------------------
# Bot Cog
# ----------------------------------------------------------------------------------------------------------------
# Commands related to the current instance of MizaBOT
# ----------------------------------------------------------------------------------------------------------------
class Bot(commands.Cog):
"""MizaBot commands."""
def __init__(self, bot):
self.bot = bot
self.color = 0xd12e57
@commands.command(no_pm=True)
@commands.cooldown(1, 500, commands.BucketType.user)
async def invite(self, ctx):
"""Get the MizaBOT invite link"""
if 'invite' not in self.bot.data.save:
msg = await ctx.reply(embed=self.bot.util.embed(title="Invite Error", description="Invitation settings aren't set, hence the bot can't be invited.\nIf you are the server owner, check the `setInvite` command", timestamp=self.bot.util.timestamp(), color=self.color))
await self.bot.util.clean(ctx, msg, 45)
elif self.bot.data.save['invite']['state'] == False or (len(self.bot.guilds) - len(self.bot.data.save['guilds']['pending'])) >= self.bot.data.save['invite']['limit']:
msg = await ctx.reply(embed=self.bot.util.embed(title="Invite Error", description="Invitations are currently closed.", timestamp=self.bot.util.timestamp(), color=self.color))
await self.bot.util.clean(ctx, msg, 45)
else:
await self.bot.send('debug', embed=self.bot.util.embed(title="Invite Request", description="{} ▫️ `{}`".format(ctx.author.name, ctx.author.id), thumbnail=ctx.author.display_avatar, timestamp=self.bot.util.timestamp(), color=self.color))
await ctx.author.send(embed=self.bot.util.embed(title=ctx.guild.me.name, description="{}\nCurrently only servers of 30 members or more can be added.\nYou'll have to wait for my owner approval (Your server owner will be notified if accepted).\nMisuses of this link will result in a server-wide ban.".format(self.bot.data.config['strings']["invite()"]), thumbnail=ctx.guild.me.display_avatar, timestamp=self.bot.util.timestamp(), color=self.color))
@commands.command(no_pm=True, cooldown_after_parsing=True, aliases=['bug', 'report', 'bug_report'])
@commands.cooldown(1, 10, commands.BucketType.guild)
async def bugReport(self, ctx, *, terms : str):
"""Send a bug report (or your love confessions) to the author"""
if len(terms) == 0:
return
await self.bot.send('debug', embed=self.bot.util.embed(title="Bug Report", description=terms, footer="{} ▫️ User ID: {}".format(ctx.author.name, ctx.author.id), thumbnail=ctx.author.display_avatar, color=self.color))
#await self.bot.util.react(ctx.message, '✅') # white check mark
final_msg = await ctx.reply(embed=self.bot.util.embed(title="Information", description="**Development is on hold for an undetermined amount of time following [discord.py death](https://gist.github.com/Rapptz/4a2f62751b9600a31a0d3c78100287f1)**, no new features will be added until an alternative is found.", footer="Your report has still been transmitted to my owner", color=self.color))
await self.bot.util.clean(ctx, final_msg, 40) # TODO
@commands.command(no_pm=True, cooldown_after_parsing=True, aliases=['source'])
@commands.cooldown(1, 20, commands.BucketType.guild)
async def github(self, ctx):
"""Post the link to the bot code source"""
final_msg = await ctx.reply(embed=self.bot.util.embed(title=self.bot.description.splitlines()[0], description="Code source [here](https://github.com/MizaGBF/MizaBOT)\nCommand list available [here](https://mizagbf.github.io/MizaBOT/)", thumbnail=ctx.guild.me.display_avatar, color=self.color))
await self.bot.util.clean(ctx, final_msg, 25)
@commands.command(no_pm=True, cooldown_after_parsing=True, aliases=['mizabot'])
@commands.cooldown(1, 10, commands.BucketType.guild)
async def status(self, ctx):
"""Post the bot status"""
final_msg = await ctx.reply(embed=self.bot.util.embed(title="{} is Ready".format(self.bot.user.display_name), description=self.bot.util.statusString(), thumbnail=self.bot.user.display_avatar, timestamp=self.bot.util.timestamp(), color=self.color))
await self.bot.util.clean(ctx, final_msg, 40)
@commands.command(no_pm=True, cooldown_after_parsing=True)
@commands.cooldown(1, 10, commands.BucketType.guild)
async def changelog(self, ctx):
"""Post the bot changelog"""
msg = ""
for c in self.bot.changelog:
msg += "▫️ {}\n".format(c)
if msg != "":
final_msg = await ctx.send(embed=self.bot.util.embed(title="{} ▫️ v{}".format(ctx.guild.me.display_name, self.bot.version), description="**Changelog**\n" + msg, thumbnail=ctx.guild.me.display_avatar, color=self.color))
await self.bot.util.clean(ctx, final_msg, 40)
@commands.command(no_pm=True, cooldown_after_parsing=True, aliases=['survey'])
@commands.cooldown(1, 200, commands.BucketType.guild)
async def poll(self, ctx, duration : int, *, poll_str : str):
"""Make a poll
duration is in seconds (min 60, max 500)
poll_str format is: `title;choice1;choice2;...;choiceN`"""
try:
if poll_str == "": raise Exception('Please specify what to poll for\nFormat: `duration title;choice1;choice2;...;choiceN`')
splitted = poll_str.split(';')
if len(splitted) < 2: raise Exception('Specify at least a poll title and two choices\nFormat: `duration title;choice1;choice2;...;choiceN`')
view = Poll(self.bot, ctx.author, self.color, splitted[0], splitted[1:])
if duration < 60: duration = 60
elif duration > 500: duration = 500
msg_to_edit = await ctx.send(embed=self.bot.util.embed(author={'name':'{} started a poll'.format(ctx.author.display_name), 'icon_url':ctx.author.display_avatar}, title=splitted[0], description="{} seconds remaining to vote".format(duration), color=self.color))
msg_view = await ctx.send('\u200b', view=view)
await view.run_poll(duration, msg_to_edit, ctx.channel)
await msg_view.delete()
except Exception as e:
msg = await ctx.send(embed=self.bot.util.embed(title="Poll error", description="{}".format(e), color=self.color))
await self.bot.util.clean(ctx, msg, 120)
"""get_category()
Retrieve a command category. Used for the help.
Parameters
----------
command: The command object
no_category: Default string if no category found
Returns
------
str: Category name and description or the content of no_category if no category
"""
def get_category(self, command, *, no_category=""):
cog = command.cog
return ('**' + cog.qualified_name + '** :white_small_square: ' + cog.description) if cog is not None else no_category
"""predicate()
Check if the command can run in the current context. Used for the help.
Parameters
----------
ctx: The command context
cmd: The command object
Returns
------
bool: True if it can runs, False if it can't
"""
async def predicate(self, ctx, cmd):
try:
return await cmd.can_run(ctx)
except Exception:
return False
"""filter_commands()
Smaller implementation of filter_commands() from discord.py help. Used for the help.
Only allowed commands in the current context can pass the filter.
Parameters
----------
ctx: The command context
cmds: List of commands
Returns
------
list: List of sorted and filtered commands
"""
async def filter_commands(self, ctx, cmds):
iterator = filter(lambda c: not c.hidden, cmds)
ret = []
for cmd in iterator:
valid = await self.predicate(ctx, cmd)
if valid:
ret.append(cmd)
ret.sort(key=self.get_category)
return ret
"""get_command_signature()
Implementation of get_command_signature() from discord.py help. Used for the help.
Parameters
----------
ctx: The command context
command: The command object
Returns
------
str: The command signature
"""
def get_command_signature(self, ctx, command):
parent = command.parent
entries = []
while parent is not None:
if not parent.signature or parent.invoke_without_command:
entries.append(parent.name)
else:
entries.append(parent.name + ' ' + parent.signature)
parent = parent.parent
parent_sig = ' '.join(reversed(entries))
if len(command.aliases) > 0:
aliases = '|'.join(command.aliases)
fmt = f'[{command.name}|{aliases}]'
if parent_sig:
fmt = parent_sig + ' ' + fmt
alias = fmt
else:
alias = command.name if not parent_sig else parent_sig + ' ' + command.name
return f'{ctx.clean_prefix}{alias} {command.signature}'
"""search_help()
Search the bot categories and help for a match. Used for the help.
Parameters
----------
ctx: The command context
terms: The search string
Returns
------
list: List of matches, a match being a list of length 2 containing an ID (0 for category, 1 for command) and the matched object (either a Cog or Command)
"""
async def search_help(self, ctx, terms):
flags = []
t = terms.lower()
# searching category match
for key in self.bot.cogs:
if t == self.bot.cogs[key].qualified_name.lower():
return [[0, self.bot.cogs[key]]]
elif t in self.bot.cogs[key].qualified_name.lower():
flags.append([0, self.bot.cogs[key]])
elif t in self.bot.cogs[key].description.lower():
flags.append([0, self.bot.cogs[key]])
# searching command match
for cmd in self.bot.commands:
if not await self.predicate(ctx, cmd):
continue
if t == cmd.name.lower():
return [[1, cmd]]
elif t in cmd.name.lower() or t in cmd.help.lower():
flags.append([1, cmd])
else:
for al in cmd.aliases:
if t == al.lower() or t in al.lower():
flags.append([1, cmd])
return flags
"""get_cog_help()
Send the cog detailed help to the user via DM. Used for the help.
Parameters
----------
ctx: The command context
cog: The cog object to output via DM
Returns
------
discord.Message: Error message or None if no errors
"""
async def get_cog_help(self, ctx, cog):
try:
await self.bot.util.react(ctx.message, '📬')
except:
return await ctx.reply(embed=self.bot.util.embed(title="Help Error", description="Unblock me to receive the Help"))
filtered = await self.filter_commands(ctx, cog.get_commands()) # sort
fields = []
for c in filtered:
if c.short_doc == "": fields.append({'name':"{} ▫ {}".format(c.name, self.get_command_signature(ctx, c)), 'value':"No description"})
else: fields.append({'name':"{} ▫ {}".format(c.name, self.get_command_signature(ctx, c)), 'value':c.short_doc})
if len(str(fields)) > 5800 or len(fields) > 24: # embeds have a 6000 and 25 fields characters limit, I send and make a new embed if needed
try:
await ctx.author.send(embed=self.bot.util.embed(title="{} **{}** Category".format(self.bot.emote.get('mark'), cog.qualified_name), description=cog.description, fields=fields, color=cog.color)) # author.send = dm
fields = []
except:
msg = await ctx.reply(embed=self.bot.util.embed(title="Help Error", description="I can't send you a direct message"))
await self.bot.util.unreact(ctx.message, '📬')
return msg
if len(fields) > 0:
try:
await ctx.author.send(embed=self.bot.util.embed(title="{} **{}** Category".format(self.bot.emote.get('mark'), cog.qualified_name), description=cog.description, fields=fields, color=cog.color)) # author.send = dm
except:
msg = await ctx.reply(embed=self.bot.util.embed(title="Help Error", description="I can't send you a direct message"))
await self.bot.util.unreact(ctx.message, '📬')
return msg
await self.bot.util.unreact(ctx.message, '📬')
return None
"""default_help()
Print the default help when no search terms is specifieed. Used for the help.
Parameters
----------
ctx: The command context
Returns
------
discord.Message
"""
async def default_help(self, ctx):
me = ctx.author.guild.me # bot own user infos
# get command categories
filtered = await self.filter_commands(ctx, self.bot.commands) # sort all category and commands
to_iterate = itertools.groupby(filtered, key=self.get_category)
# categories to string
cats = ""
for category, coms in to_iterate:
if category != "":
cats += "{}\n".format(category)
return await ctx.reply(embed=self.bot.util.embed(title=me.name + " Help", description=self.bot.description + "\n\nUse `{}help <command_name>` or `{}help <category_name>` to get more informations\n**Categories:**\n".format(ctx.message.content[0], ctx.message.content[0]) + cats, thumbnail=me.display_avatar, color=self.color))
"""category_help()
Print the detailed category help. Used for the help.
Wrapper for get_cog_help(), might change it later.
Parameters
----------
terms: The search string
ctx: The command context
cog: The cog object
Returns
------
discord.Message
"""
async def category_help(self, terms, ctx, cog):
me = ctx.author.guild.me # bot own user infos
msg = await self.get_cog_help(ctx, | |
"""
Author: <NAME>
Created: 3/11/2020 9:04 AM
"""
from Climate_Shocks.vcsn_pull import vcsn_pull_single_site
from Climate_Shocks.note_worthy_events.simple_soil_moisture_pet import calc_sma_smd_historical, calc_smd_monthly
from Climate_Shocks.get_past_record import get_restriction_record, get_vcsn_record
from Pasture_Growth_Modelling.initialisation_support.pasture_growth_deficit import calc_past_pasture_growth_anomaly
import ksl_env
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import itertools
import sys
event_def_dir = sys.argv[1] # the path to the directory
print(event_def_dir)
vcsn_version = sys.argv[2] # 'trended', 'detrended2'
print(vcsn_version)
if vcsn_version not in ['trended', 'detrended2']:
raise ValueError('incorrect value for vcsn_version: {}'.format(vcsn_version, ))
if not os.path.exists(event_def_dir):
os.makedirs(event_def_dir)
irrigated_pga = calc_past_pasture_growth_anomaly('irrigated', site='eyrewell').reset_index()
irrigated_pga.loc[:, 'year'] = irrigated_pga.date.dt.year
irrigated_pga = irrigated_pga.set_index(['month', 'year'])
dryland_pga = calc_past_pasture_growth_anomaly('dryland').reset_index()
dryland_pga.loc[:, 'year'] = dryland_pga.date.dt.year
dryland_pga = dryland_pga.set_index(['month', 'year'])
def prob(x):
out = np.nansum(x) / len(x)
return out
def add_pga_from_idx(idx):
idx = idx.dropna()
irr_temp = irrigated_pga.loc[idx].reset_index()
irr_temp2 = irr_temp.loc[:, ['month', 'pga_norm']].groupby('month').describe().loc[:, 'pga_norm']
dry_temp = dryland_pga.loc[idx].reset_index()
dry_temp2 = dry_temp.loc[:, ['month', 'pga_norm']].groupby('month').describe().loc[:, 'pga_norm']
temp3 = pd.merge(irr_temp2, dry_temp2, left_index=True, right_index=True, suffixes=('_irr', '_dry'))
return pd.DataFrame(temp3)
def add_pga(grouped_data, sim_keys, outdata):
grouped_data = grouped_data.set_index(['month', 'year'])
years = {}
for k in sim_keys:
idx = grouped_data.loc[grouped_data.loc[:, k], k]
assert idx.all()
idx = idx.index
years[k] = idx.values
temp_irr = irrigated_pga.loc[idx].reset_index()
temp_irr2 = temp_irr.loc[:, ['month', 'pga_norm']].groupby('month').describe().loc[:, 'pga_norm']
temp_dry = dryland_pga.loc[idx].reset_index()
temp_dry2 = temp_dry.loc[:, ['month', 'pga_norm']].groupby('month').describe().loc[:, 'pga_norm']
for k2 in temp_irr2:
outdata.loc[:, (k, 'pga_irr_{}'.format(k2))] = temp_irr2.loc[:, k2]
outdata.loc[:, (k, 'pga_dry_{}'.format(k2))] = temp_dry2.loc[:, k2]
mx_years = 48 * 12 + 1
out_years = pd.DataFrame(index=range(mx_years), columns=sim_keys)
for k in sim_keys:
missing_len = mx_years - len(years[k])
out_years.loc[:, k] = np.concatenate((years[k], np.zeros(missing_len) * np.nan))
outdata = outdata.sort_index(axis=1, level=0, sort_remaining=False)
return outdata, out_years
def calc_dry_recurance_monthly_smd():
data = get_vcsn_record(vcsn_version)
t = calc_smd_monthly(rain=data.rain, pet=data.pet, dates=data.index)
data.loc[:, 'smd'] = t
t = data.loc[:, ['doy', 'smd']].groupby('doy').mean().to_dict()
data.loc[:, 'sma'] = data.loc[:, 'smd'] - data.loc[:, 'doy'].replace(t['smd'])
data.reset_index(inplace=True)
data.to_csv(os.path.join(event_def_dir, 'monthly_smd_dry_raw.csv'))
smd_thresholds = [0]
sma_thresholds = [-5, -10, -12, -15, -17, -20]
ndays = [5, 7, 10, 14]
out_keys = []
for smd_t, sma_t in itertools.product(smd_thresholds, sma_thresholds):
k = 'd_smd{:03d}_sma{:02d}'.format(smd_t, sma_t)
data.loc[:, k] = (data.loc[:, 'smd'] <= smd_t) & (data.loc[:, 'sma'] <= sma_t)
out_keys.append(k)
grouped_data = data.loc[:, ['month', 'year',
'smd', 'sma'] + out_keys].groupby(['month', 'year']).sum().reset_index()
grouped_data.to_csv(os.path.join(event_def_dir, 'monthly_smd_dry_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'monthly_smd_dry_monthly_data_desc.csv'))
out_keys2 = []
for nd in ndays:
for k in out_keys:
ok = '{:02d}d_{}'.format(nd, k)
out_keys2.append(ok)
grouped_data.loc[:, ok] = grouped_data.loc[:, k] >= nd
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'monthly_smd_dry_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'monthly_smd_dry_prob_only_prob.csv'),
float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'monthly_smd_dry_years.csv'))
def calc_dry_recurance():
data = get_vcsn_record(vcsn_version).reset_index()
temp = calc_sma_smd_historical(data['rain'], data['pet'], data.date, 150, 1)
trans_cols = ['mean_doy_smd', 'sma', 'smd', 'drain', 'aet_out']
data.loc[:, trans_cols] = temp.loc[:, trans_cols]
data.to_csv(os.path.join(event_def_dir, 'dry_raw.csv'))
smd_thresholds = [0, -110, -110]
sma_thresholds = [-20, 0, -20]
ndays = [5, 7, 10, 14]
out_keys = []
for smd_t, sma_t in zip(smd_thresholds, sma_thresholds):
k = 'd_smd{:03d}_sma{:02d}'.format(smd_t, sma_t)
data.loc[:, k] = (data.loc[:, 'smd'] <= smd_t) & (data.loc[:, 'sma'] <= sma_t)
out_keys.append(k)
grouped_data = data.loc[:, ['month', 'year',
'smd', 'sma'] + out_keys].groupby(['month', 'year']).sum().reset_index()
grouped_data.to_csv(os.path.join(event_def_dir, 'dry_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'dry_monthly_data_desc.csv'))
out_keys2 = []
for nd in ndays:
for k in out_keys:
ok = '{:02d}d_{}'.format(nd, k)
out_keys2.append(ok)
grouped_data.loc[:, ok] = grouped_data.loc[:, k] >= nd
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'dry_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'dry_prob_only_prob.csv'), float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'dry_years.csv'))
def calc_wet_recurance():
data = get_vcsn_record(vcsn_version).reset_index()
temp = calc_sma_smd_historical(data['rain'], data['pet'], data.date, 150, 1)
trans_cols = ['mean_doy_smd', 'sma', 'smd', 'drain', 'aet_out']
data.loc[:, trans_cols] = temp.loc[:, trans_cols]
temp = False
if temp: # just to look at some plots
fig, (ax, ax2, ax3) = plt.subplots(3, sharex=True)
ax.plot(data.date, data.smd)
ax2.plot(data.date, data.drain)
ax3.plot(data.date, data.rain)
plt.show()
data.to_csv(os.path.join(event_def_dir, 'smd_wet_raw.csv'))
thresholds_rain = [5, 3, 1, 0]
thresholds_smd = [0, -5, -10]
ndays = [7, 10, 14]
out_keys = []
for t_r, t_smd in itertools.product(thresholds_rain, thresholds_smd):
k = 'd_r{}_smd{}'.format(t_r, t_smd)
data.loc[:, k] = (data.loc[:, 'rain'] >= t_r) & (data.loc[:, 'smd'] >= t_smd)
out_keys.append(k)
grouped_data = data.loc[:, ['month', 'year', 'rain'] + out_keys].groupby(['month', 'year']).sum().reset_index()
# make montly restriction anaomaloy - mean
temp = grouped_data.groupby('month').mean().loc[:, 'rain'].to_dict()
grouped_data.loc[:, 'rain_an_mean'] = grouped_data.loc[:, 'month']
grouped_data = grouped_data.replace({'rain_an_mean': temp})
grouped_data.loc[:, 'rain_an_mean'] = grouped_data.loc[:, 'rain'] - grouped_data.loc[:, 'rain_an_mean']
# make montly restriction anaomaloy - median
temp = grouped_data.groupby('month').median().loc[:, 'rain'].to_dict()
grouped_data.loc[:, 'rain_an_med'] = grouped_data.loc[:, 'month']
grouped_data = grouped_data.replace({'rain_an_med': temp})
grouped_data.loc[:, 'rain_an_med'] = grouped_data.loc[:, 'rain'] - grouped_data.loc[:, 'rain_an_med']
grouped_data.to_csv(os.path.join(event_def_dir, 'smd_wet_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'smd_wet_monthly_data_desc.csv'))
# number of n days
out_keys2 = []
for nd in ndays:
for k in out_keys:
ok = '{:02d}d_{}'.format(nd, k)
out_keys2.append(ok)
grouped_data.loc[:, ok] = grouped_data.loc[:, k] >= nd
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'smd_wet_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'smd_wet_prob_only_prob.csv'),
float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'smd_wet_years.csv'))
def calc_wet_recurance_ndays():
ndays = {
'org': { # this is the best value!
5: 14,
6: 11,
7: 11,
8: 13,
9: 13,
}
}
for v in ndays.values():
v.update({
1: 99,
2: 99,
3: 99,
4: 99,
10: 99,
11: 99,
12: 99,
})
data = get_vcsn_record(vcsn_version).reset_index()
temp = calc_sma_smd_historical(data['rain'], data['pet'], data.date, 150, 1)
trans_cols = ['mean_doy_smd', 'sma', 'smd', 'drain', 'aet_out']
data.loc[:, trans_cols] = temp.loc[:, trans_cols]
data.loc[:, 'ndays_rain'] = (data.loc[:, 'rain'] > 0.01).astype(float)
data.to_csv(os.path.join(event_def_dir, 'ndays_wet_raw.csv'))
grouped_data = data.loc[:, ['month', 'year', 'rain', 'ndays_rain']].groupby(['month', 'year']).sum().reset_index()
grouped_data.to_csv(os.path.join(event_def_dir, 'ndays_wet_monthly_data.csv'))
grouped_data.drop(columns=['year']).groupby('month').describe().to_csv(os.path.join(event_def_dir,
'ndays_wet_monthly_data_desc.csv'))
# number of n days
out_keys2 = []
for k, val in ndays.items():
ok = '{}'.format(k)
out_keys2.append(ok)
grouped_data.loc[:, 'limit'] = grouped_data.loc[:, 'month']
grouped_data = grouped_data.replace({'limit': val})
grouped_data.loc[:, ok] = grouped_data.loc[:, 'ndays_rain'] >= grouped_data.loc[:, 'limit']
out = grouped_data.loc[:, ['month'] + out_keys2].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys2:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out = out.drop(columns=drop_keys)
out, out_years = add_pga(grouped_data, set(out_keys2) - set(drop_keys), out)
t = pd.Series([' '.join(e) for e in out.columns])
idx = ~((t.str.contains('sum')) | (t.str.contains('count')))
out.loc[:, out.columns[idx]] *= 100
out.to_csv(os.path.join(event_def_dir, 'ndays_wet_prob.csv'), float_format='%.1f%%')
out.loc[:, out.columns[idx]].to_csv(os.path.join(event_def_dir, 'ndays_wet_prob_only_prob.csv'),
float_format='%.1f%%')
out_years.to_csv(os.path.join(event_def_dir, 'ndays_wet_years.csv'))
def calc_dry_rolling():
bulk_ndays = [5, 10, 15, 20]
ndays = {}
for bnd in bulk_ndays:
ndays['ndays{}'.format(bnd)] = {k: bnd for k in range(1, 13)}
thresholds = { # this did not end up getting used
'first': {
4: 15,
5: 10,
8: 5,
9: 10,
},
'first-3': {
4: 15 - 3,
5: 10 - 3,
8: 5 - 3,
9: 10 - 3,
},
'first-5': {
4: 15 - 5,
5: 10 - 5,
8: 5 - 5,
9: 10 - 5,
},
'first-10': {
4: 15 - 10,
5: 10 - 10,
8: 5 - 10,
9: 10 - 10,
},
'zero': {
4: 0,
5: 0,
8: 0,
9: 0,
},
'one': {
4: 1,
5: 1,
8: 1,
9: 1,
},
'first-7': {
4: 15 - 7,
5: 10 - 7,
8: 5 - 7,
9: 10 - 7,
},
}
for v in thresholds.values():
v.update({
1: -1,
2: -1,
3: -1,
6: -1,
7: -1,
10: -1,
11: -1,
12: -1,
})
data = get_vcsn_record(vcsn_version).reset_index()
data.loc[:, 'roll_rain_10'] = data.loc[:, 'rain'].rolling(10).sum()
out_keys = []
outdata = pd.DataFrame(
index=pd.MultiIndex.from_product([range(1, 13), range(1972, 2020)], names=['month', 'year']))
for nd, thresh in itertools.product(ndays.keys(), thresholds.keys()):
temp_data = data.copy(deep=True)
ok = '{}_{}'.format(thresh, nd)
out_keys.append(ok)
for m in range(1, 13):
idx = data.month == m
temp_data.loc[idx, ok] = temp_data.loc[idx, 'roll_rain_10'] <= thresholds[thresh][m]
temp_data.loc[idx, 'ndays'] = ndays[nd][m]
temp_data = temp_data.groupby(['month', 'year']).agg({ok: 'sum', 'ndays': 'mean'})
outdata.loc[:, ok] = temp_data.loc[:, ok] >= temp_data.loc[:, 'ndays']
outdata.to_csv(os.path.join(event_def_dir, 'rolling_dry_monthly.csv'))
outdata = outdata.reset_index()
out = outdata.loc[:, ['month'] + out_keys].groupby(['month']).aggregate(['sum', prob])
drop_keys = []
for k in out_keys:
temp = (out.loc[:, k].loc[:, 'sum'] == 48).all() or (out.loc[:, k].loc[:, 'sum'] == 0).all()
if temp:
drop_keys.append(k)
out | |
<filename>feincms/models.py
"""
This is the core of FeinCMS
All models defined here are abstract, which means no tables are created in
the feincms_ namespace.
"""
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models import Q
from django.db.models.fields import FieldDoesNotExist
from django.template.loader import render_to_string
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext_lazy as _
from feincms import settings, ensure_completely_loaded
from feincms.utils import get_object, copy_model_instance
try:
any
except NameError:
# For Python 2.4
from feincms.compat import c_any as any
class Region(object):
"""
This class represents a region inside a template. Example regions might be
'main' and 'sidebar'.
"""
def __init__(self, key, title, *args):
self.key = key
self.title = title
self.inherited = args and args[0] == 'inherited'
self._content_types = []
def __unicode__(self):
return force_unicode(self.title)
@property
def content_types(self):
"""
Returns a list of content types registered for this region as a list
of (content type key, beautified content type name) tuples
"""
return [(ct.__name__.lower(), ct._meta.verbose_name) for ct in self._content_types]
class Template(object):
"""
A template is a standard Django template which is used to render a
CMS object, most commonly a page.
"""
def __init__(self, title, path, regions, key=None, preview_image=None):
# The key is what will be stored in the database. If key is undefined
# use the template path as fallback.
if not key:
key = path
self.key = key
self.title = title
self.path = path
self.preview_image = preview_image
def _make_region(data):
if isinstance(data, Region):
return data
return Region(*data)
self.regions = [_make_region(row) for row in regions]
self.regions_dict = dict((r.key, r) for r in self.regions)
def __unicode__(self):
return force_unicode(self.title)
class ContentProxy(object):
"""
This proxy offers attribute-style access to the page contents of regions::
>> page = Page.objects.all()[0]
>> page.content.main
[A list of all page contents which are assigned to the region with key 'main']
"""
def __init__(self, item):
self.item = item
def __getattr__(self, attr):
"""
Get all item content instances for the specified item and region
If no item contents could be found for the current item and the region
has the inherited flag set, this method will go up the ancestor chain
until either some item contents have found or no ancestors are left.
"""
if (attr.startswith('__')):
raise AttributeError
item = self.__dict__['item']
return self.get_content(item, attr)
def get_content(self, item, attr):
template = item.template
try:
region = template.regions_dict[attr]
except KeyError:
return []
def collect_items(obj):
contents = obj._content_for_region(region)
# go to parent if this model has a parent attribute
# TODO: this should be abstracted into a property/method or something
# The link which should be followed is not always '.parent'
if region.inherited and not contents and hasattr(obj, 'parent_id') and obj.parent_id:
return collect_items(obj.parent)
return contents
contents = collect_items(item)
contents.sort(key=lambda c: c.ordering)
return contents
def create_base_model(inherit_from=models.Model):
class Base(inherit_from):
"""
This is the base class for your CMS models.
"""
content_proxy_class = ContentProxy
class Meta:
abstract = True
_cached_django_content_type = None
@classmethod
def register_regions(cls, *regions):
"""
Register a list of regions. Only use this if you do not want to use
multiple templates with this model (read: not use ``register_templates``)::
BlogEntry.register_regions(
('main', _('Main content area')),
)
"""
if hasattr(cls, 'template'):
import warnings
warnings.warn(
'Ignoring second call to register_regions.',
RuntimeWarning)
return
# implicitly creates a dummy template object -- the item editor
# depends on the presence of a template.
cls.template = Template('', '', regions)
cls._feincms_all_regions = cls.template.regions
@classmethod
def register_templates(cls, *templates):
"""
Register templates and add a ``template_key`` field to the model for
saving the selected template::
Page.register_templates({
'key': 'base',
'title': _('Standard template'),
'path': 'feincms_base.html',
'regions': (
('main', _('Main content area')),
('sidebar', _('Sidebar'), 'inherited'),
),
}, {
'key': '2col',
'title': _('Template with two columns'),
'path': 'feincms_2col.html',
'regions': (
('col1', _('Column one')),
('col2', _('Column two')),
('sidebar', _('Sidebar'), 'inherited'),
),
})
"""
if not hasattr(cls, '_feincms_templates'):
cls._feincms_templates = SortedDict()
cls.TEMPLATES_CHOICES = []
instances = getattr(cls, '_feincms_templates', SortedDict())
for template in templates:
if not isinstance(template, Template):
template = Template(**template)
instances[template.key] = template
try:
field = cls._meta.get_field_by_name('template_key')[0]
except (FieldDoesNotExist, IndexError):
cls.add_to_class('template_key', models.CharField(_('template'), max_length=255, choices=()))
field = cls._meta.get_field_by_name('template_key')[0]
def _template(self):
ensure_completely_loaded()
try:
return self._feincms_templates[self.template_key]
except KeyError:
# return first template as a fallback if the template
# has changed in-between
return self._feincms_templates[
self._feincms_templates.keys()[0]]
cls.template = property(_template)
cls.TEMPLATE_CHOICES = field._choices = [(template.key, template.title)
for template in cls._feincms_templates.values()]
field.default = field.choices[0][0]
# Build a set of all regions used anywhere
cls._feincms_all_regions = set()
for template in cls._feincms_templates.values():
cls._feincms_all_regions.update(template.regions)
@classmethod
def register_extension(cls, register_fn):
"""
Call the register function of an extension. You must override this
if you provide a custom ModelAdmin class and want your extensions to
be able to patch stuff in.
"""
register_fn(cls, None)
@classmethod
def register_extensions(cls, *extensions):
if not hasattr(cls, '_feincms_extensions'):
cls._feincms_extensions = set()
here = cls.__module__.split('.')[:-1]
here_path = '.'.join(here + ['extensions'])
common_path = '.'.join(here[:-1] + ['extensions'])
for ext in extensions:
if ext in cls._feincms_extensions:
continue
try:
if isinstance(ext, basestring):
try:
fn = get_object(ext + '.register')
except ImportError:
try:
fn = get_object('%s.%s.register' % ( here_path, ext ) )
except ImportError:
fn = get_object('%s.%s.register' % ( common_path, ext ) )
# Not a string, so take our chances and just try to access "register"
else:
fn = ext.register
cls.register_extension(fn)
cls._feincms_extensions.add(ext)
except Exception, e:
raise ImproperlyConfigured("%s.register_extensions('%s') raised an '%s' exception" %
(cls.__name__, ext, e.message))
@property
def content(self):
"""
Provide a simple interface for getting all content blocks for a region.
"""
if not hasattr(self, '_content_proxy'):
self._content_proxy = self.content_proxy_class(self)
return self._content_proxy
def _get_content_types_for_region(self, region):
# find all concrete content type tables which have at least one entry for
# the current CMS object and region
# This method is overridden by a more efficient implementation if
# the ct_tracker extension is active.
from django.core.cache import cache as django_cache
counts = None
ck = None
# ???: Should we move the cache_key() method to Base, so we can avoid
# the if-it-supports-it dance?
if settings.FEINCMS_USE_CACHE and getattr(self, 'cache_key', None):
ck = 'CNT-FOR-REGION-' + region.key + '-' + self.cache_key()
counts = django_cache.get(ck)
if counts is None:
sql = ' UNION '.join([
'SELECT %d AS ct_idx, COUNT(id) FROM %s WHERE parent_id=%s AND region=%%s' % (
idx,
cls._meta.db_table,
self.pk) for idx, cls in enumerate(self._feincms_content_types)])
sql = 'SELECT * FROM ( ' + sql + ' ) AS ct ORDER BY ct_idx'
from django.db import connection
cursor = connection.cursor()
cursor.execute(sql, [region.key] * len(self._feincms_content_types))
counts = [row[1] for row in cursor.fetchall()]
if ck:
django_cache.set(ck, counts)
return counts
def _content_for_region(self, region):
"""
This method is used primarily by the ContentProxy
"""
self._needs_content_types()
counts = self._get_content_types_for_region(region)
if not any(counts):
return []
contents = []
for idx, cnt in enumerate(counts):
if cnt:
# the queryset is evaluated right here, because the content objects
# of different type will have to be sorted into a list according
# to their 'ordering' attribute later
contents += list(
self._feincms_content_types[idx].get_queryset(
Q(parent=self) & Q(region=region.key)))
return contents
@classmethod
def _create_content_base(cls):
"""
This is purely an internal method. Here, we create a base class for the
concrete content types, which are built in ``create_content_type``.
The three fields added to build a concrete content type class/model are
``parent``, ``region`` and ``ordering``.
"""
# We need a template, because of the possibility of restricting content types
# to a subset of all available regions. Each region object carries a list of
# all allowed content types around. Content types created before a region is
# initialized would not be available in the respective region; we avoid this
# problem by raising an ImproperlyConfigured exception early.
cls._needs_templates()
class Meta:
abstract = True
app_label = cls._meta.app_label
ordering = ['ordering']
def __unicode__(self):
return u'%s on %s, ordering %s' % (self.region, self.parent, self.ordering)
def render(self, **kwargs):
"""
Default render implementation, tries to call a method named after the
region key before giving up.
You'll probably override the render method itself most of the time
instead of adding region-specific render methods.
"""
render_fn = getattr(self, 'render_%s' % self.region, None)
if render_fn:
return render_fn(**kwargs)
raise NotImplementedError
def fe_render(self, **kwargs):
"""
Frontend Editing enabled renderer
"""
if 'request' in kwargs:
request = kwargs['request']
if request.session | |
<reponame>scopatz/openmc<gh_stars>0
from collections import OrderedDict, Iterable
from numbers import Integral
from xml.etree import ElementTree as ET
import random
import sys
import warnings
import numpy as np
import openmc
import openmc.checkvalue as cv
if sys.version_info[0] >= 3:
basestring = str
# A dictionary for storing IDs of cell elements that have already been written,
# used to optimize the writing process
WRITTEN_IDS = {}
# A static variable for auto-generated Lattice (Universe) IDs
AUTO_UNIVERSE_ID = 10000
def reset_auto_universe_id():
global AUTO_UNIVERSE_ID
AUTO_UNIVERSE_ID = 10000
class Universe(object):
"""A collection of cells that can be repeated.
Parameters
----------
universe_id : int, optional
Unique identifier of the universe. If not specified, an identifier will
automatically be assigned
name : str, optional
Name of the universe. If not specified, the name is the empty string.
cells : Iterable of openmc.Cell, optional
Cells to add to the universe. By default no cells are added.
Attributes
----------
id : int
Unique identifier of the universe
name : str
Name of the universe
cells : collections.OrderedDict
Dictionary whose keys are cell IDs and values are :class:`Cell`
instances
"""
def __init__(self, universe_id=None, name='', cells=None):
# Initialize Cell class attributes
self.id = universe_id
self.name = name
# Keys - Cell IDs
# Values - Cells
self._cells = OrderedDict()
# Keys - Cell IDs
# Values - Offsets
self._cell_offsets = OrderedDict()
if cells is not None:
self.add_cells(cells)
def __eq__(self, other):
if not isinstance(other, Universe):
return False
elif self.id != other.id:
return False
elif self.name != other.name:
return False
elif self.cells != other.cells:
return False
else:
return True
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(repr(self))
def __repr__(self):
string = 'Universe\n'
string += '{0: <16}{1}{2}\n'.format('\tID', '=\t', self._id)
string += '{0: <16}{1}{2}\n'.format('\tName', '=\t', self._name)
string += '{0: <16}{1}{2}\n'.format('\tCells', '=\t',
list(self._cells.keys()))
return string
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def cells(self):
return self._cells
@id.setter
def id(self, universe_id):
if universe_id is None:
global AUTO_UNIVERSE_ID
self._id = AUTO_UNIVERSE_ID
AUTO_UNIVERSE_ID += 1
else:
cv.check_type('universe ID', universe_id, Integral)
cv.check_greater_than('universe ID', universe_id, 0, equality=True)
self._id = universe_id
@name.setter
def name(self, name):
if name is not None:
cv.check_type('universe name', name, basestring)
self._name = name
else:
self._name = ''
def find(self, point):
"""Find cells/universes/lattices which contain a given point
Parameters
----------
point : 3-tuple of float
Cartesian coordinates of the point
Returns
-------
list
Sequence of universes, cells, and lattices which are traversed to
find the given point
"""
p = np.asarray(point)
for cell in self._cells.values():
if p in cell:
if cell.fill_type in ('material', 'distribmat', 'void'):
return [self, cell]
elif cell.fill_type == 'universe':
if cell.translation is not None:
p -= cell.translation
if cell.rotation is not None:
p[:] = cell.rotation_matrix.dot(p)
return [self, cell] + cell.fill.find(p)
else:
return [self, cell] + cell.fill.find(p)
return []
def plot(self, center=(0., 0., 0.), width=(1., 1.), pixels=(200, 200),
basis='xy', color_by='cell', seed=None):
"""Display a slice plot of the universe.
Parameters
----------
center : Iterable of float
Coordinates at the center of the plot
width : Iterable of float
Width of the plot in each basis direction
pixels : Iterable of int
Number of pixels to use in each basis direction
basis : {'xy', 'xz', 'yz'}
The basis directions for the plot
color_by : {'cell', 'material'}
Indicate whether the plot should be colored by cell or by material
seed : hashable object or None
Hashable object which is used to seed the random number generator
used to select colors. If None, the generator is seeded from the
current time.
"""
import matplotlib.pyplot as plt
# Seed the random number generator
if seed is not None:
random.seed(seed)
if basis == 'xy':
x_min = center[0] - 0.5*width[0]
x_max = center[0] + 0.5*width[0]
y_min = center[1] - 0.5*width[1]
y_max = center[1] + 0.5*width[1]
elif basis == 'yz':
# The x-axis will correspond to physical y and the y-axis will correspond to physical z
x_min = center[1] - 0.5*width[0]
x_max = center[1] + 0.5*width[0]
y_min = center[2] - 0.5*width[1]
y_max = center[2] + 0.5*width[1]
elif basis == 'xz':
# The y-axis will correspond to physical z
x_min = center[0] - 0.5*width[0]
x_max = center[0] + 0.5*width[0]
y_min = center[2] - 0.5*width[1]
y_max = center[2] + 0.5*width[1]
# Determine locations to determine cells at
x_coords = np.linspace(x_min, x_max, pixels[0], endpoint=False) + \
0.5*(x_max - x_min)/pixels[0]
y_coords = np.linspace(y_max, y_min, pixels[1], endpoint=False) - \
0.5*(y_max - y_min)/pixels[1]
colors = {}
img = np.zeros(pixels + (4,)) # Use RGBA form
for i, x in enumerate(x_coords):
for j, y in enumerate(y_coords):
if basis == 'xy':
path = self.find((x, y, center[2]))
elif basis == 'yz':
path = self.find((center[0], x, y))
elif basis == 'xz':
path = self.find((x, center[1], y))
if len(path) > 0:
try:
if color_by == 'cell':
uid = path[-1].id
elif color_by == 'material':
if path[-1].fill_type == 'material':
uid = path[-1].fill.id
else:
continue
except AttributeError:
continue
if uid not in colors:
colors[uid] = (random.random(), random.random(),
random.random(), 1.0)
img[j,i,:] = colors[uid]
plt.imshow(img, extent=(x_min, x_max, y_min, y_max))
plt.show()
def add_cell(self, cell):
"""Add a cell to the universe.
Parameters
----------
cell : openmc.Cell
Cell to add
"""
if not isinstance(cell, openmc.Cell):
msg = 'Unable to add a Cell to Universe ID="{0}" since "{1}" is not ' \
'a Cell'.format(self._id, cell)
raise ValueError(msg)
cell_id = cell._id
if cell_id not in self._cells:
self._cells[cell_id] = cell
def add_cells(self, cells):
"""Add multiple cells to the universe.
Parameters
----------
cells : Iterable of openmc.Cell
Cells to add
"""
if not isinstance(cells, Iterable):
msg = 'Unable to add Cells to Universe ID="{0}" since "{1}" is not ' \
'iterable'.format(self._id, cells)
raise ValueError(msg)
for cell in cells:
self.add_cell(cell)
def remove_cell(self, cell):
"""Remove a cell from the universe.
Parameters
----------
cell : openmc.Cell
Cell to remove
"""
if not isinstance(cell, openmc.Cell):
msg = 'Unable to remove a Cell from Universe ID="{0}" since "{1}" is ' \
'not a Cell'.format(self._id, cell)
raise ValueError(msg)
# If the Cell is in the Universe's list of Cells, delete it
if cell.id in self._cells:
del self._cells[cell.id]
def clear_cells(self):
"""Remove all cells from the universe."""
self._cells.clear()
def get_cell_instance(self, path, distribcell_index):
# Pop off the root Universe ID from the path
next_index = path.index('-')
path = path[next_index+2:]
# Extract the Cell ID from the path
if '-' in path:
next_index = path.index('-')
cell_id = int(path[:next_index])
path = path[next_index+2:]
else:
cell_id = int(path)
path = ''
# Make a recursive call to the Cell within this Universe
offset = self.cells[cell_id].get_cell_instance(path, distribcell_index)
# Return the offset computed at all nested Universe levels
return offset
def get_all_nuclides(self):
"""Return all nuclides contained in the universe
Returns
-------
nuclides : collections.OrderedDict
Dictionary whose keys are nuclide names and values are 2-tuples of
(nuclide, density)
"""
nuclides = OrderedDict()
# Append all Nuclides in each Cell in the Universe to the dictionary
for cell_id, cell in self._cells.items():
nuclides.update(cell.get_all_nuclides())
return nuclides
def get_all_cells(self):
"""Return all cells that are contained within the universe
Returns
-------
cells : collections.OrderedDict
Dictionary whose keys are cell IDs and values are :class:`Cell`
instances
"""
cells = OrderedDict()
# Add this Universe's cells to the dictionary
cells.update(self._cells)
# Append all Cells in each Cell in the Universe to the dictionary
for cell_id, cell in self._cells.items():
cells.update(cell.get_all_cells())
return cells
def get_all_materials(self):
"""Return all materials that are contained within the universe
Returns
-------
materials : Collections.OrderedDict
Dictionary whose keys are material IDs and values are
:class:`Material` instances
"""
materials = OrderedDict()
# Append all Cells in each Cell in the Universe to the dictionary
cells = self.get_all_cells()
for cell_id, cell in cells.items():
materials.update(cell.get_all_materials())
return materials
def get_all_universes(self):
"""Return all universes that are contained within this one.
Returns
-------
universes : collections.OrderedDict
Dictionary whose keys are universe IDs and values are
:class:`Universe` instances
"""
# Get all Cells in this Universe
cells = self.get_all_cells()
universes = OrderedDict()
# Append all Universes containing each Cell to the dictionary
for cell_id, cell in cells.items():
universes.update(cell.get_all_universes())
return universes
def create_xml_subelement(self, xml_element):
# Iterate over all Cells
for cell_id, cell in self._cells.items():
# If the | |
<filename>sru/sru_functional.py
import os
import sys
import warnings
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
SRU_CPU_kernel = None
SRU_GPU_kernel = None
# load C++ implementation for CPU computation
def _lazy_load_cpu_kernel():
global SRU_CPU_kernel
if SRU_CPU_kernel is not None:
return SRU_CPU_kernel
try:
from torch.utils.cpp_extension import load
cpu_source = os.path.join(os.path.dirname(__file__), "sru_cpu_impl.cpp")
SRU_CPU_kernel = load(
name="sru_cpu_impl",
sources=[cpu_source],
extra_cflags=['-O3'],
verbose=False
)
except:
# use Python version instead
SRU_CPU_kernel = False
return SRU_CPU_kernel
# load C++ implementation for GPU computation
def _lazy_load_cuda_kernel():
try:
from .cuda_functional import SRU_Compute_GPU
except:
from cuda_functional import SRU_Compute_GPU
return SRU_Compute_GPU
def SRU_CPU_class(activation_type,
d,
bidirectional=False,
has_skip_term=True,
scale_x=None,
mask_pad=None):
"""CPU version of the core SRU computation.
Has the same interface as SRU_Compute_GPU() but is a regular Python function
instead of a torch.autograd.Function because we don't implement backward()
explicitly.
Args:
activation_type (int) : 0 (identity), 1 (tanh), 2 (ReLU) or 3 (SeLU).
d (int) : the dimensionality of the hidden layer
of the `SRUCell`. This is not named very well; it is
nonetheless named as such to maintain consistency with
the GPU compute-kernel constructor.
bidirectional (bool) : whether or not to use bidirectional `SRUCell`s.
Default: False.
has_skip_term (bool) : whether or not to include `(1-r[t])*x[t]` skip-term in h[t]
scale_x (float) : scaling constant on the highway connection
"""
def sru_compute_cpu(u, x, weight_c, bias, init=None, mask_c=None):
"""
An SRU is a recurrent neural network cell comprised of 5 equations, described
in "Simple Recurrent Units for Highly Parallelizable Recurrence."
The first 3 of these equations each require a matrix-multiply component,
i.e. the input vector x_t dotted with a weight matrix W_i, where i is in
{0, 1, 2}.
As each weight matrix W is dotted with the same input x_t, we can fuse these
computations into a single matrix-multiply, i.e. `x_t <dot> stack([W_0, W_1, W_2])`.
We call the result of this computation `U`.
sru_compute_cpu() accepts 'u' and 'x' (along with a tensor of biases,
an initial memory cell `c0`, and an optional dropout mask) and computes
equations (3) - (7). It returns a tensor containing all `t` hidden states
(where `t` is the number of elements in our input sequence) and the final
memory cell `c_T`.
"""
bidir = 2 if bidirectional else 1
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
k = u.size(-1) // d // bidir
sru_cpu_impl = _lazy_load_cpu_kernel()
if (sru_cpu_impl is not None) and (sru_cpu_impl != False):
if not torch.is_grad_enabled():
assert mask_c is None
cpu_forward = sru_cpu_impl.cpu_bi_forward if bidirectional else \
sru_cpu_impl.cpu_forward
mask_pad_ = torch.FloatTensor() if mask_pad is None else mask_pad.float()
return cpu_forward(
u,
x.contiguous(),
weight_c,
bias,
init,
mask_pad_,
length,
batch,
d,
k,
activation_type,
has_skip_term,
scale_x.item() if scale_x is not None else 1.0
)
else:
warnings.warn("Running SRU on CPU with grad_enabled=True. Are you sure?")
else:
warnings.warn("C++ kernel for SRU CPU inference was not loaded. "
"Use Python version instead.")
mask_pad_ = mask_pad.view(length, batch, 1).float() if mask_pad is not None else mask_pad
u = u.view(length, batch, bidir, d, k)
forget_wc, reset_wc = weight_c.view(2, bidir, d)
forget_bias, reset_bias = bias.view(2, bidir, d)
if not has_skip_term:
x_prime = None
elif k == 3:
x_prime = x.view(length, batch, bidir, d)
x_prime = x_prime*scale_x if scale_x is not None else x_prime
else:
x_prime = u[..., 3]
h = Variable(x.data.new(length, batch, bidir, d))
if init is None:
c_init = Variable(x.data.new(batch, bidir, d).zero_())
else:
c_init = init.view(batch, bidir, d)
c_final = []
for di in range(bidir):
if di == 0:
time_seq = range(length)
else:
time_seq = range(length - 1, -1, -1)
mask_c_ = 1 if mask_c is None else mask_c.view(batch, bidir, d)[:, di, :]
c_prev = c_init[:, di, :]
fb, rb = forget_bias[di], reset_bias[di]
fw, rw = forget_wc[di].expand(batch, d), reset_wc[di].expand(batch, d)
u0 = u[:, :, di, :, 0].chunk(length)
u1 = (u[:, :, di, :, 1] + fb).chunk(length)
u2 = (u[:, :, di, :, 2] + rb).chunk(length)
if x_prime is not None:
xp = x_prime[:, :, di, :].chunk(length)
for t in time_seq:
forget_t = (u1[t] + c_prev*fw).sigmoid()
reset_t = (u2[t] + c_prev*rw).sigmoid()
c_t = u0[t] + (c_prev - u0[t]) * forget_t
if mask_pad_ is not None:
c_t = c_t * (1-mask_pad_[t]) + c_prev * mask_pad_[t]
c_prev = c_t
if activation_type == 0:
g_c_t = c_t
elif activation_type == 1:
g_c_t = c_t.tanh()
elif activation_type == 2:
g_c_t = nn.functional.relu(c_t)
else:
raise ValueError('Activation type must be 0, 1, or 2, not {}'.format(activation_type))
if x_prime is not None:
h_t = xp[t] + (g_c_t * mask_c_ - xp[t]) * reset_t
else:
h_t = g_c_t * mask_c_ * reset_t
if mask_pad_ is not None:
h_t = h_t * (1-mask_pad_[t])
h[t, :, di, :] = h_t
c_final.append(c_t)
return h.view(length, batch, -1), torch.stack(c_final, dim=1).view(batch, -1)
return sru_compute_cpu
class SRUCell(nn.Module):
"""
An SRU cell, i.e. a single recurrent neural network cell,
as per `LSTMCell`, `GRUCell` and `RNNCell` in PyTorch.
Args:
input_size (int) : the number of dimensions in a single
input sequence element. For example, if the input sequence
is a sequence of word embeddings, `input_size` is the
dimensionality of a single word embedding, e.g. 300.
hidden_size (int) : the dimensionality of the hidden state
of this cell.
dropout (float) : a number between 0.0 and 1.0. The amount of dropout
applied to `g(c_t)` internally in this cell.
rnn_dropout (float) : the amount of dropout applied to the input of
this cell.
use_tanh (bool) : use tanh activation
is_input_normalized (bool) : whether the input is normalized (e.g. batch norm / layer norm)
bidirectional (bool) : whether or not to employ a bidirectional cell.
"""
def __init__(self,
input_size,
hidden_size,
dropout=0,
rnn_dropout=0,
bidirectional=False,
n_proj=0,
use_tanh=0,
#is_input_normalized=False,
highway_bias=0,
has_skip_term=True,
layer_norm=False,
rescale=True,
v1=False):
super(SRUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size # hidden size per direction
self.output_size = hidden_size * 2 if bidirectional else hidden_size
self.rnn_dropout = rnn_dropout
self.dropout = dropout
self.bidirectional = bidirectional
#self.is_input_normalized = is_input_normalized
self.has_skip_term = has_skip_term
self.highway_bias = highway_bias
self.v1 = v1
self.rescale = rescale
self.activation_type = 0
self.activation = 'none'
if use_tanh:
self.activation_type = 1
self.activation = 'tanh'
# projection dimension
self.projection_size = 0
if n_proj > 0 and n_proj < input_size and n_proj < self.output_size:
self.projection_size = n_proj
# number of sub-matrices used in SRU
self.num_matrices = 3
if has_skip_term and self.input_size != self.output_size:
self.num_matrices = 4
# make parameters
if self.projection_size == 0:
self.weight = nn.Parameter(torch.Tensor(
self.input_size,
self.output_size * self.num_matrices
))
else:
self.weight_proj = nn.Parameter(torch.Tensor(self.input_size, self.projection_size))
self.weight = nn.Parameter(torch.Tensor(
self.projection_size,
self.output_size * self.num_matrices
))
self.weight_c = nn.Parameter(torch.Tensor(self.output_size * 2))
self.bias = nn.Parameter(torch.Tensor(self.output_size * 2))
# scaling constant used in highway connections when rescale=True
self.register_buffer('scale_x', torch.FloatTensor([0]))
if layer_norm:
self.layer_norm = nn.LayerNorm(input_size)
else:
self.layer_norm = None
self.reset_parameters()
def reset_parameters(self):
"""
Properly initialize the weights of SRU, following the same recipe as:
Xavier init: http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf
Kaiming init: https://arxiv.org/abs/1502.01852
"""
# initialize weights such that E[w_ij]=0 and Var[w_ij]=1/d
d = self.weight.size(0)
val_range = (3.0 / d)**0.5
self.weight.data.uniform_(-val_range, val_range)
if self.projection_size > 0:
val_range = (3.0 / self.weight_proj.size(0))**0.5
self.weight_proj.data.uniform_(-val_range, val_range)
# initialize bias
self.bias.data.zero_()
bias_val, output_size = self.highway_bias, self.output_size
self.bias.data[output_size:].zero_().add_(bias_val)
# projection matrix as a tensor of size:
# (input_size, bidirection, hidden_size, num_matrices)
w = self.weight.data.view(d, -1, self.hidden_size, self.num_matrices)
if not self.v1:
# intialize weight_c such that E[w]=0 and Var[w]=1
self.weight_c.data.uniform_(-3.0**0.5, 3.0**0.5)
# rescale weight_c and the weight of sigmoid gates with a factor of sqrt(0.5)
w[:, :, :, 1].mul_(0.5**0.5)
w[:, :, :, 2].mul_(0.5**0.5)
self.weight_c.data.mul_(0.5**0.5)
else:
self.weight_c.data.zero_()
self.weight_c.requires_grad = False
# re-scale weights for dropout and normalized input for better gradient flow
if self.dropout > 0:
w[:, :, :, 0].mul_((1 - self.dropout)**0.5)
if self.rnn_dropout > 0:
w.mul_((1 - self.rnn_dropout)**0.5)
# making weights smaller when layer norm is used. need more tests
if self.layer_norm:
w.mul_(0.1)
#self.weight_c.data.mul_(0.25)
self.scale_x.data[0] = 1
if not (self.rescale and self.has_skip_term):
return
# scalar used to properly scale the highway output
scale_val = (1 + math.exp(bias_val) * 2)**0.5
self.scale_x.data[0] = scale_val
if self.num_matrices == 4:
w[:, :, :, 3].mul_(scale_val)
def forward(self, input, c0=None, mask_pad=None):
"""
This method computes `U`. In addition, it computes the remaining components
in `SRU_Compute_GPU` or `SRU_Compute_CPU` and return | |
<reponame>LeBlue/pyOCD
# pyOCD debugger
# Copyright (c) 2018-2020 Arm Limited
# Copyright (c) 2021 <NAME>
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
import logging
import logging.config
import yaml
import os
from pathlib import Path
import weakref
from inspect import (getfullargspec, signature)
from typing import (Any, Callable, Generator, Sequence, Union, cast, Dict, List, Mapping, Optional, TYPE_CHECKING)
from . import exceptions
from .options_manager import OptionsManager
from ..utility.notification import Notifier
if TYPE_CHECKING:
from types import TracebackType
from .soc_target import SoCTarget
from ..probe.debug_probe import DebugProbe
from ..probe.tcp_probe_server import DebugProbeServer
from ..gdbserver.gdbserver import GDBServer
from ..board.board import Board
LOG = logging.getLogger(__name__)
## @brief Set of default config filenames to search for.
_CONFIG_FILE_NAMES = [
"pyocd.yaml",
"pyocd.yml",
".pyocd.yaml",
".pyocd.yml",
]
## @brief Set of default user script names to search for.
_USER_SCRIPT_NAMES = [
"pyocd_user.py",
".pyocd_user.py",
]
class Session(Notifier):
"""@brief Top-level object for a debug session.
This class represents a debug session with a single debug probe. It is the root of the object
graph, where it owns the debug probe and the board objects.
Another important function of this class is that it contains a dictionary of session-scope
options. These would normally be passed in from the command line. Options can also be loaded
from a config file.
Precedence for session options:
1. Keyword arguments to constructor.
2. _options_ parameter to constructor.
3. Probe-specific options from a config file.
4. General options from a config file.
5. _option_defaults_ parameter to constructor.
The session also tracks several other objects:
- @ref pyocd.gdbserver.gdbserver.GDBServer "GDBServer" instances created for any cores.
- @ref pyocd.probe.tcp_probe_server.DebugProbeServer "DebugProbeServer".
- The user script proxy.
See the @ref pyocd.core.helpers.ConnectHelper "ConnectHelper" class for several methods that
make it easy to create new sessions, with or without user interaction in the case of multiple
available debug probes. A common pattern is to combine @ref
pyocd.core.helpers.ConnectHelper.session_with_chosen_probe()
"ConnectHelper.session_with_chosen_probe()" and a **with** block.
A Session instance can be used as a context manager. The session will, by default, be
automatically opened when the context is entered. And, of course, it will be closed when the
**with** block is exited (which is harmless if the session was never opened). If you wish to
disable automatic opening, set the `auto_open` parameter to the constructor to False. If an
exception is raised while opening a session inside a **with** statement, the session will be
closed for you to undo any partial initialisation.
"""
## @brief Weak reference to the most recently created session.
_current_session: Optional[weakref.ref] = None
@classmethod
def get_current(cls) -> "Session":
"""@brief Return the most recently created Session instance or a default Session.
By default this method will return the most recently created Session object that is
still alive. If no live session exists, a new default session will be created and returned.
That at least provides access to the user's config file(s).
Used primarily so code that doesn't have a session reference can access session options. This
method should only be used to access options that are unlikely to differ between sessions,
or for debug or other purposes.
"""
if cls._current_session is not None:
session = cls._current_session()
if session is not None:
return session
return Session(None)
def __init__(
self,
probe: Optional["DebugProbe"],
auto_open: bool = True,
options: Optional[Mapping[str, Any]] = None,
option_defaults: Optional[Mapping[str, Any]] = None,
**kwargs
) -> None:
"""@brief Session constructor.
Creates a new session using the provided debug probe. Session options are merged from the
_options_ parameter and any keyword arguments. Normally a board instance is created that can
either be a generic board or a board associated with the debug probe.
Note that the 'project_dir' and 'config' options must be set in either keyword arguments or
the _options_ parameter.
Passing in a _probe_ that is None is allowed. This is useful to create a session that operates
only as a container for session options. In this case, the board instance is not created, so the
#board attribute will be None. Such a Session cannot be opened.
@param self
@param probe The @ref pyocd.probe.debug_probe. "DebugProbe" instance. May be None.
@param auto_open Whether to automatically open the session when used as a context manager.
@param options Optional session options dictionary.
@param option_defaults Optional dictionary of session option values. This dictionary has the
lowest priority in determining final session option values, and is intended to set new
defaults for option if they are not set through any other method.
@param kwargs Session options passed as keyword arguments.
"""
# Importing Board here eases circular import issues, and it's only needed here anyway.
from ..board.board import Board
super().__init__()
Session._current_session = weakref.ref(self)
self._probe = probe
self._closed: bool = True
self._inited: bool = False
self._user_script_namespace: Dict[str, Any] = {}
self._user_script_proxy: Optional[UserScriptDelegateProxy] = None
self._user_script_print_proxy = PrintProxy()
self._delegate: Optional[Any] = None
self._auto_open = auto_open
self._options = OptionsManager()
self._gdbservers: Dict[int, "GDBServer"] = {}
self._probeserver: Optional["DebugProbeServer"] = None
# Set this session on the probe, if we were given a probe.
if probe is not None:
probe.session = self
# Update options.
self._options.add_front(kwargs)
self._options.add_back(options)
# Init project directory.
if self.options.get('project_dir') is None:
self._project_dir: str = os.environ.get('PYOCD_PROJECT_DIR') or os.getcwd()
else:
self._project_dir: str = os.path.abspath(os.path.expanduser(self.options.get('project_dir')))
LOG.debug("Project directory: %s", self.project_dir)
# Switch the working dir to the project dir.
os.chdir(self.project_dir)
# Load options from the config file.
config = self._get_config()
probes_config = config.pop('probes', None)
# Pick up any config file options for this probe. These have priority over global options.
if (probe is not None) and (probes_config is not None):
did_match_probe = False
for uid, settings in probes_config.items():
if str(uid).lower() in probe.unique_id.lower():
if did_match_probe:
LOG.warning("Multiple probe config options match probe ID %s", probe.unique_id)
break
LOG.info("Using config options for probe %s" % (probe.unique_id))
self._options.add_back(settings)
did_match_probe = True
# Add global config options.
self._options.add_back(config)
# Merge in lowest priority options.
self._options.add_back(option_defaults)
# Logging config.
self._configure_logging()
# Bail early if we weren't provided a probe.
if probe is None:
self._board = None
return
# Load the user script.
self._load_user_script()
# Ask the probe if it has an associated board, and if not then we create a generic one.
self._board = probe.create_associated_board() or Board(self)
def _get_config(self) -> Dict[str, Any]:
# Load config file if one was provided via options, and no_config option was not set.
if not self.options.get('no_config'):
configPath = self.find_user_file('config_file', _CONFIG_FILE_NAMES)
if configPath is not None:
try:
with open(configPath, 'r') as configFile:
LOG.debug("Loading config from: %s", configPath)
config = yaml.safe_load(configFile)
# Allow an empty config file.
if config is None:
return {}
# But fail if someone tries to put something other than a dict at the top.
elif not isinstance(config, dict):
raise exceptions.Error("configuration file %s does not contain a top-level dictionary"
% configPath)
return config
except IOError as err:
LOG.warning("Error attempting to access config file '%s': %s", configPath, err)
return {}
def find_user_file(self, option_name: Optional[str], filename_list: List[str]) -> Optional[str]:
"""@brief Search the project directory for a file.
@retval None No matching file was found.
@retval string An absolute path to the requested file.
"""
if option_name is not None:
filePath = self.options.get(option_name)
else:
filePath = None
# Look for default filenames if a path wasn't provided.
if filePath is None:
for filename in filename_list:
thisPath = os.path.expanduser(filename)
if not os.path.isabs(thisPath):
thisPath = os.path.join(self.project_dir, filename)
if os.path.isfile(thisPath):
filePath = thisPath
break
# Use the path passed in options, which may be absolute, relative to the
# home directory, or relative to the project directory.
else:
filePath = os.path.expanduser(filePath)
if not os.path.isabs(filePath):
filePath = os.path.join(self.project_dir, filePath)
return filePath
def _configure_logging(self) -> None:
"""@brief Load a logging config dict | |
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-07-06 14:02:20.222384
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.hybrid_shape_interfaces.hybrid_shape_direction import HybridShapeDirection
from pycatia.in_interfaces.reference import Reference
from pycatia.mec_mod_interfaces.hybrid_shape import HybridShape
class HybridShapeWrapCurve(HybridShape):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| MecModInterfaces.HybridShape
| HybridShapeWrapCurve
|
| Represents the hybrid shape wrap curve surface object.
| Role: To access the data of the hybrid shape wrap curve surface
| object.
|
| This data includes:
|
| Two support surfaces, one at each limit of the wrap curve
| surface
| Two curves, one for each support surface
| The curve closing points
|
| Use the CATIAHybridShapeFactory to create a HybridShapeWrapCurve
| object.
|
| See also:
| HybridShapeFactory
"""
def __init__(self, com_object):
super().__init__(com_object)
self.hybrid_shape_wrap_curve = com_object
@property
def first_curves_constraint(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property FirstCurvesConstraint() As long
|
| Returns or sets constraint at first curves of the
| WrapCurve.
| Legal values:
| 1 = no constraint
| 2 = Deformed surface will have the same tangency and the same curvature as the original surface
| at first curves.
|
| Example:
| This example retrieves in FirstCurvesConstraint the constraint at first
| curves of the ShpWrapCurve hybrid shape WrapCurve feature.
|
| Dim FirstCurvesConstraint As long
| Set FirstCurvesConstraint = ShpWrapCurve.FirstCurvesConstraint
:return: int
:rtype: int
"""
return self.hybrid_shape_wrap_curve.FirstCurvesConstraint
@first_curves_constraint.setter
def first_curves_constraint(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_wrap_curve.FirstCurvesConstraint = value
@property
def last_curves_constraint(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property LastCurvesConstraint() As long
|
| Returns or sets constraint at last curves of the
| WrapCurve.
| Legal values:
| 1 = no constraint,
| 2 = Deformed surface will have the same tangency and the the same curvatureas the original
| surface at last curves.
|
| Example:
| This example retrieves in LastCurvesConstraint the constraint at last
| curves of the ShpWrapCurve hybrid shape WrapCurve feature.
|
| Dim LastCurvesConstraint As long
| Set LastCurvesConstraint = ShpWrapCurve.LastCurvesConstraint
:return: int
:rtype: int
"""
return self.hybrid_shape_wrap_curve.LastCurvesConstraint
@last_curves_constraint.setter
def last_curves_constraint(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_wrap_curve.LastCurvesConstraint = value
@property
def surface(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Surface() As Reference
|
| Returns or sets the surface to deform of the WrapCurve.
| Sub-element(s) supported (see Boundary object): Face.
|
| Example:
| This example retrieves in SurfaceToDeform the surface to deform of the
| ShpWrapCurve hybrid shape WrapCurve feature.
|
| SurfaceToDeform = ShpWrapCurve.Surface
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_wrap_curve.Surface)
@surface.setter
def surface(self, reference_surface: Reference):
"""
:param Reference reference_surface:
"""
self.hybrid_shape_wrap_curve.Surface = reference_surface.com_object
def get_curves(self, i_position: int, o_reference_curve: Reference, o_target_curve: Reference) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetCurves(long iPosition,
| Reference oReferenceCurve,
| Reference oTargetCurve)
|
| Returns a curve from the WrapCurve.
|
| Parameters:
|
| iPosition
| The position of the curves in the list of curves.
| oReferenceCurve
| the reference curve.
| oTargetCurve
| the target curve.
| Legal values: can be egal to Nothing. In this case, the associated
| ref curve will be fixed.
|
| Example:
| This example retrieves in WrapCurveRefCurve the first reference
| curve of the ShpWrapCurve hybrid shape WrapCurve feature and retrieves in
| WrapCurveTargCurve the first target curve of the ShpWrapCurve hybrid shape
| WrapCurve feature.
|
| Dim WrapCurveRefCurve As Reference
| Dim WrapCurveTargCurve As Reference
| ShpWrapCurve.GetCurve(2)
:param int i_position:
:param Reference o_reference_curve:
:param Reference o_target_curve:
:return: None
:rtype: None
"""
return self.hybrid_shape_wrap_curve.GetCurves(i_position, o_reference_curve.com_object,
o_target_curve.com_object)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'get_curves'
# # vba_code = """
# # Public Function get_curves(hybrid_shape_wrap_curve)
# # Dim iPosition (2)
# # hybrid_shape_wrap_curve.GetCurves iPosition
# # get_curves = iPosition
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def get_number_of_curves(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetNumberOfCurves() As long
|
| Returns the number of couples of curves of the WrapCurve.
|
| Returns:
| The number of couples of curves
| Legal values: positive or null.
|
| Example:
| This example retrieves in NumberOfCurves the number of couples of curves of
| the ShpWrapCurve hybrid shape WrapCurve feature.
|
| NumberOfCurves = ShpWrapCurve.GetNumberOfCurves(2)
:return: int
:rtype: int
"""
return self.hybrid_shape_wrap_curve.GetNumberOfCurves()
def get_reference_direction(self, o_direction_type: int, o_direction: HybridShapeDirection) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetReferenceDirection(long oDirectionType,
| HybridShapeDirection oDirection)
|
| Gets the reference direction projection of the wrap curve
| feature.
|
| Parameters:
|
| oDirectionType
| type of direction.
| Legal values: 1 = reference direction is computed, and 2 = user direction.
| oDirection
| curve to be added as a direction, if oDirectionType = 2.
| Example:
| This example retrieves in RefDirection the reference direction of
| the ShpWrapCurve hybrid shape WrapCurve feature and in RefDirectionType the
| reference direction of the ShpWrapCurve hybrid shape
| WrapCurve
|
| Dim RefDirectionType As long
| Dim RefDirection As CATIAHybridShapeDirection
| ShpWrapCurve.SetReferenceDirection (RefDirectionType,
| RefDirection)
:param int o_direction_type:
:param HybridShapeDirection o_direction:
:return: None
:rtype: None
"""
return self.hybrid_shape_wrap_curve.GetReferenceDirection(o_direction_type, o_direction.com_object)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'get_reference_direction'
# # vba_code = """
# # Public Function get_reference_direction(hybrid_shape_wrap_curve)
# # Dim oDirectionType (2)
# # hybrid_shape_wrap_curve.GetReferenceDirection oDirectionType
# # get_reference_direction = oDirectionType
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def get_reference_spine(self, o_spine_type: int, o_spine: Reference) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetReferenceSpine(long oSpineType,
| Reference oSpine)
|
| Returns the reference spine of the wrap curve feature.
|
| Parameters:
|
| oSpineType
| type of spine.
| Legal values: 1 = Reference Spine is equal to the first reference curve, and
| 2 = user spine.
| oSpine
| curve to be added as a spine, if iSpineType = 2.
|
| Example:
| This example retrieves in RefSpine the reference spine of the
| ShpWrapCurve hybrid shape WrapCurve feature and in RefSpineType the reference
| spine type.
|
| Dim RefSpineType As long
| Dim RefSpine As Reference
| ShpWrapCurve.GetReferenceSpine (RefSpineType,
| RefSpine)
:param int o_spine_type:
:param Reference o_spine:
:return: None
:rtype: | |
38
F CAN 45 43
This still describes a binomial family, but in a more compact
format than specifying each individual user. We eventually
want to support this more compact format, but we do not
currently! In this context, it is important to check for
over-dispersion (see [GLM]), and I need to learn more first.
In the current implementation, we assume that there is no
over-dispersion, and that the number of users having the
same set of features is small.
Parameters
----------
X : pandas dataframe
Dataframe of features. The column names must correspond
to the names of features added to the model. X may have
extra columns corresponding to features not included in
the model; these are simply ignored. Where applicable,
the data should be "pre-transformation", since this code
will apply any transformations specified in .add_feature.
y : array
Response. Depending on the model family, the response
may need to be in a particular form (for example, for
a binomial family, the y's should be either 0 or 1),
but this is not checked anywhere!
covariate_class_sizes : array or None.
If observations are grouped into covariance classes, the
size of those classes should be listed in this input.
w : array
Weights applied to each observation. This is effectively
specifying the dispersion of each observation.
optimizer : string
We use the Alternating Direction Method of Multipliers
('admm') to fit the model. We may eventually support more
methods, but right now this option does nothing.
smoothing : float
Smoothing to apply to entire model, used in conjunction
with other regularization parameters. That is, whatever
regularization is used for the various features, is
scaled by this term, allowing the user to set the overall
smoothing by Cross Validation or whatever they like. This
allows the user to specify different regularization for
each feature, while still permitting a one-dimensional
family of models corresponding to different amounts of
regularization. Defaults to 1., leaving the regularization
as specified in .add_feature().
save_flag : boolean
Specifies whether to save intermediate results after each
iteration. Useful for complicated models with massive
data sets that take a while to fit. If the system crashes
during the fit, the analyst can pick up where they left
off instead of starting from scratch. Defaults to False.
verbose : boolean
Specifies whether to print mildly useful information to
the screen during the fit. Defaults to False.
plot_convergence : boolean
Specifies whether to plot the convergence graph at the
end. (I suspect only Convex Optimization nerds like me
want to see this.) Defaults to False.
max_its : integer
Maximum number of iterations. Defaults to 100.
Returns
-------
(nothing)
"""
if save_flag and self._name is None:
msg = 'Cannot save a GAM with no name.'
msg += ' Specify name when instantiating model.'
raise ValueError(msg)
if len(X) != len(y):
raise ValueError('Inconsistent number of observations in X and y.')
num_threads = 1
self._rho = 0.1
eps_abs = 1e-3
eps_rel = 1e-3
# Note that X may include columns that do not correspond to features in our model
# (for example, if the user is experimenting with leaving out features to assess
# importance). Thus, the real number of features is self._num_features, not
# num_features as in the next line.
self._num_obs, num_features = X.shape
self._y = y.flatten()
self._weights = weights
if covariate_class_sizes is not None:
self._has_covariate_classes = True
self._covariate_class_sizes = covariate_class_sizes
mean_response = float(np.sum(self._y)) / np.sum(self._covariate_class_sizes)
self._offset = self._eval_link(mean_response)
else:
self._has_covariate_classes = False
self._covariate_class_sizes = None
self._offset = self._eval_link(np.mean(self._y))
fj = {}
for name, feature in self._features.iteritems():
feature.initialize(X[name].values, smoothing=smoothing,
covariate_class_sizes=self._covariate_class_sizes,
save_flag=save_flag, save_prefix=self._name)
fj[name] = np.zeros(self._num_obs)
self.f_bar = np.full((self._num_obs,), self._offset / self._num_features)
self.z_bar = np.zeros(self._num_obs)
self.u = np.zeros(self._num_obs)
self.prim_res = []
self.dual_res = []
self.prim_tol = []
self.dual_tol = []
self.dev = []
z_new = np.zeros(self._num_obs)
if num_threads > 1:
p = mp.Pool(num_threads)
else:
p = None
for i in range(max_its):
if verbose:
print 'Iteration {0:d}'.format(i)
print 'Optimizing primal variables'
fpumz = self._num_features * (self.f_bar + self.u - self.z_bar)
fj_new = {}
f_new = np.full((self._num_obs,), self._offset)
if False: #num_threads > 1:
# Getting python to run a for loop in parallel
# might as well be impossible :-(
args = [(i, self._features[i], fpumz, self._rho) for i in self._features.keys()]
results = p.map(_feature_wrapper, args)
for i in results:
fj_new[i[0]] = i[1]
f_new += i[1]
else:
for name, feature in self._features.iteritems():
if verbose:
print 'Optimizing {0:s}'.format(name)
fj_new[name] = feature.optimize(fpumz, self._rho)
f_new += fj_new[name]
f_new /= self._num_features
if verbose:
print 'Optimizing dual variables'
z_new = self._optimize(self.u + f_new, self._num_features, p)
self.u += f_new - z_new
prim_res = np.sqrt(self._num_features) * linalg.norm(f_new - z_new)
dual_res = 0.0
norm_ax = 0.0
norm_bz = 0.0
norm_aty = 0.0
num_params = 0
for name, feature in self._features.iteritems():
dr = ((fj_new[name] - fj[name])
+ (z_new - self.z_bar)
- (f_new - self.f_bar))
dual_res += dr.dot(dr)
norm_ax += fj_new[name].dot(fj_new[name])
zik = fj_new[name] + z_new - f_new
norm_bz += zik.dot(zik)
norm_aty += feature.compute_dual_tol(self.u)
num_params += feature.num_params()
dual_res = self._rho * np.sqrt(dual_res)
norm_ax = np.sqrt(norm_ax)
norm_bz = np.sqrt(norm_bz)
norm_aty = np.sqrt(norm_aty)
self.f_bar = f_new
fj = fj_new
self.z_bar = z_new
if self._has_covariate_classes:
sccs = np.sum(self._covariate_class_sizes)
prim_tol = (np.sqrt(sccs * self._num_features) * eps_abs
+ eps_rel * np.max([norm_ax, norm_bz]))
else:
prim_tol = (np.sqrt(self._num_obs * self._num_features) * eps_abs
+ eps_rel * np.max([norm_ax, norm_bz]))
dual_tol = np.sqrt(num_params) * eps_abs + eps_rel * norm_aty
self.prim_res.append(prim_res)
self.dual_res.append(dual_res)
self.prim_tol.append(prim_tol)
self.dual_tol.append(dual_tol)
self.dev.append(self.deviance())
if prim_res < prim_tol and dual_res < dual_tol:
if verbose:
print 'Fit converged'
break
else:
if verbose:
print 'Fit did not converge'
if num_threads > 1:
p.close()
p.join()
self._fitted = True
if save_flag:
self._save()
if plot_convergence:
_plot_convergence(self.prim_res, self.prim_tol, self.dual_res,
self.dual_tol, self.dev)
def _optimize(self, upf, N, p=None):
"""Optimize \bar{z}.
Solves the optimization problem:
minimize L(N*z) + \rho/2 * \| N*z - N*u - N*\bar{f} \|_2^2
where z is the variable, N is the number of features, u is the scaled
dual variable, \bar{f} is the average feature response, and L is
the likelihood function which is different depending on the
family and link function. This is accomplished via a proximal
operator, as discussed in [GAMADMM]:
prox_\mu(v) := argmin_x L(x) + \mu/2 * \| x - v \|_2^2
I strongly believe that paper contains a typo in this equation, so we
return (1. / N) * prox_\mu (N * (u + \bar{f}) with \mu = \rho instead
of \mu = \rho / N as in [GAMADMM]. When implemented as in the paper,
convergence was much slower, but it did still converge.
Certain combinations of family and link function result in proximal
operators with closed form solutions, making this step *very* fast
(e.g. 3 flops per observation).
Parameters
----------
upf : array
Vector representing u + \bar{f}
N : integer
Number of features.
p : Multiprocessing Pool (optional)
If multiple threads are available, massive data sets may
benefit from solving this optimization problem in parallel.
It is up to the individual functions to decide whether to
actually do this.
Returns
-------
z : array
Result of the above optimization problem.
"""
prox = None
if self._family == 'normal':
if self._link == 'identity':
prox = po._prox_normal_identity
else:
prox = po._prox_normal
elif self._family == 'binomial':
if self._link == 'logistic':
prox = po._prox_binomial_logit
else:
prox = po._prox_binomial
if self._has_covariate_classes:
return (1. / N) * prox(N*upf, self._rho, self._y,
self._covariate_class_sizes,
self._weights, self._eval_inv_link, p=p)
elif self._family == 'poisson':
if self._link == 'log':
prox = po._prox_poisson_log
else:
prox = po._prox_poisson
elif self._family == 'gamma':
if self._link == 'reciprocal':
prox = po._prox_gamma_reciprocal
else:
prox = po._prox_gamma
elif self._family == 'inverse_gaussian':
if self._link == 'reciprocal_squared':
prox = po._prox_inv_gaussian_reciprocal_squared
else:
prox = po._prox_inv_gaussian
else:
msg = 'Family {0:s} and Link Function {1:s} not (yet) supported.'
raise ValueError(msg.format(self._family, self._link))
return (1. / N) * prox(N*upf, self._rho, self._y, w=self._weights,
inv_link=self._eval_inv_link, p=p)
def predict(self, X):
"""Apply fitted model to features.
Parameters
----------
X : pandas dataframe
Data for which we wish to | |
"""
telemetrystation-15min.py
This Python program queries the HydroBase web services for
real-time (15 minute) station telemetry data.
The web service results are output to stdout or a file.
Run with -h to display the usage.
See the main program at the end of this file.
"""
import argparse
import dateutil.parser as p
import json
import pprint
import requests
import sys
def build_url(app_data: dict, param: str, page_index: int) -> str:
"""
Build the URL for querying the HydroBase web services,
for a single parameter.
Args:
app_data (dict): Dictionary of command line input.
param (str): Single parameter to query.
page_index (int): Page index, used for multi-page queries.
Returns:
URL to use for query.
"""
# Get needed data, alphabetized
api_key = get_app_data(app_data, 'API_KEY')
end_date = get_app_data(app_data, 'END_DATE')
output_format = get_app_data(app_data, 'OUTPUT_FORMAT')
page_size = get_app_data(app_data, 'PAGE_SIZE')
if output_format == 'json':
# Automatically use pretty print for JSON
output_format = 'jsonprettyprint'
station_abbrev = get_app_data(app_data, 'STATION_ABBREV')
start_date = get_app_data(app_data, 'START_DATE')
# Base URL
url = 'https://dwr.state.co.us/Rest/GET/api/v2/telemetrystations/telemetrytimeseriesraw/?abbrev={}'.format(station_abbrev)
# Append other parts
# - station ID is required so put first with the question mark
if output_format != '':
url = "{}&format={}".format(url, output_format)
if page_size != '':
url = "{}&pageSize={}".format(url, page_size)
if page_index != '':
url = "{}&pageIndex={}".format(url, page_index)
if param != '':
url = "{}¶meter={}".format(url, param)
if start_date != '':
# start_date = '%2F'.join(start_date)
url = "{}&startDate={}".format(url, start_date)
if end_date != '':
# end_date = '%2F'.join(end_date)
url = "{}&endDate={}".format(url, end_date)
if api_key != '':
api_key = api_key.replace('/', '%2F')
url = "{}&apiKey={}".format(url, api_key)
return url
def check_input(app_data: dict) -> None:
"""
Check input parameters and exit if not correct.
"""
# Check the output format
# - required argument so won't be blank
output_format = get_app_data(app_data, 'OUTPUT_FORMAT')
error_count = 0
if (output_format != 'csv') and (output_format != 'json'):
print_stderr('Output format ({}) is not valid, must be csv or json.'.format(output_format))
error_count += 1
# Append .csv or .json to the output file if not already at the end.
output_file = get_app_data(app_data, 'OUTPUT_FILE')
if not output_file.endswith(output_format) and output_file != 'stdout':
output_file = output_file + '.' + output_format
app_data['OUTPUT_FILE'] = output_file
# Check the format for start and end dates
# - optional so could be blank
start_date = get_app_data(app_data, 'START_DATE')
if start_date is not None and (start_date != ''):
if not check_date(start_date):
print_stderr('Start date ({}) is not valid, must be in format: mm/dd/yyyy'.format(start_date))
error_count += 1
end_date = get_app_data(app_data, 'END_DATE')
if end_date is not None and (start_date != ''):
if not check_date(end_date):
print_stderr('End date ({}) is not valid, must be in format: mm/dd/yyyy'.format(end_date))
error_count += 1
if error_count > 0:
sys.exit(1)
def check_date(date_string: str) -> bool:
"""
Determine if the date is an valid format mm/dd/yyyy.
Args:
date_string(str): date string to check.
Returns:
bool: True if value, False if invalid.
"""
parts = date_string.split('/')
if len(parts) != 3:
print_stderr('Date input ({}) is invalid.'.format(date_string))
print_stderr('Dates must use format: mm/dd/yyyy')
return False
return True
def format_output(lines: list) -> list:
"""
The data received from the database returns a time format that is verbose.
This function converts to standard ISO 8601 formats.
"""
temp1 = []
meas_date_time = lines[2].split(',')[2] if lines[2].split(',')[2] == 'measDateTime' else ''
modified = lines[2].split(',')[7] if lines[2].split(',')[7] == 'modified' else ''
meas_date_time_index = lines[2].split(',').index('measDateTime')
modified_index = lines[2].split(',').index('modified')
# Go through each line in the file
for i, line in enumerate(lines):
temp2 = []
# Split the line by commas into a list of strings and go through each element
for index, elem in enumerate(line.split(',')):
# The next four slightly convoluted lines check to see if the measDateTime
# and modified variables have been set (they exist), and if they do,
# make sure to skip the first 3 lines of the data (they're metadata) and
# double check that the index in the line of split strings is the
# same as the original measDateTime and modified columns that are found. Then
# replace the dates there with ISO 8601 dates with no 'T' in between the
# date and time, and don't display anything faster than a second, or else
# it will import weirdly in Excel.
if meas_date_time != '' and i > 2 and index == meas_date_time_index:
temp2.append(p.parse(elem).isoformat(sep=' ', timespec='minutes'))
elif modified != '' and i > 2 and index == modified_index:
temp2.append(p.parse(elem).isoformat(sep=' ', timespec='minutes'))
else:
# The element is not a date so just append to the temp list.
temp2.append(elem)
# Now join all the elements in the temp list into a string and append to temp1 list.
# This way it's being put back the way it was originally, except for the extra date formatting.
temp1.append(','.join(temp2))
# Now that the outer for loop is finished, return the new list
return temp1
def get_app_data(app_data: dict, key: str) -> object or None:
"""
Get application data value from the application data dictionary.
Args:
app_data (dict): Application data from command line.
key (str): Name of application data value.
Returns:
Object matching the requested key, or None if no value is defined.
"""
try:
value = app_data[key]
# Value is defined so return it
return value
except KeyError:
# Value is not defined
return None
def parse_command_line() -> dict:
"""
Parse the command line arguments, warn if they're incorrect, and assign them
to global constants, since they won't change during this program.
Returns:
A dictionary containing the parsed values.
"""
parser = argparse.ArgumentParser(description='Query the HydroBase web services for real-time 15-minute telemetry station data.')
# Optional arguments.
parser.add_argument('--output', metavar='filename', default='stdout',
help='Write the output to the specified file instead of stdout.')
parser.add_argument('--startDate', metavar='start_date', default='',
help='The date to start the query in format: mm/dd/yyyy')
parser.add_argument('--endDate', metavar='end_date', default='',
help='The date to end the query in format: mm/dd/yyyy')
parser.add_argument('--pageSize', metavar='page_size', default='',
help='The page size for the response, used in testing.')
parser.add_argument('--apiKey', metavar='api_key', default='',
help='The API Key to increase response limit from web services.')
# Required arguments.
required = parser.add_argument_group('required arguments')
required.add_argument('--abbrev', metavar='abbrev', help='The station abbreviation (ABBREV) identifier.',
required=True)
required.add_argument('--parameter', nargs='+', metavar='param_name',
help='The measured parameter name(s), separated by spaces if more than one.', required=True)
required.add_argument('--format', metavar='output_format',
help='Format for output: csv or json', required=True)
# If no arguments are given, print help, and exit
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
# Parse the command line
args = parser.parse_args()
# Save arguments in a dictionary and return to the calling code.
app_data = {}
app_data['STATION_ABBREV'] = args.abbrev
# The following is a list
app_data['PARAMETERS'] = args.parameter
app_data['OUTPUT_FORMAT'] = args.format
app_data['OUTPUT_FILE'] = args.output
app_data['START_DATE'] = args.startDate
app_data['END_DATE'] = args.endDate
app_data['API_KEY'] = args.apiKey
app_data['PAGE_SIZE'] = args.pageSize
return app_data
def print_remaining(app_data: dict, param: str, page_count: int) -> None:
"""
Have the first query and determined that the page count is greater than one.
Since it is, have multiple pages and need to query the rest of the pages for both csv and json.
print_remaining() prints the pages to stdout, and write_remaining writes the pages to a file.
Args:
app_data (dict): Application data from the command line.
param (str): Parameter being processed.
page_count (int): Page count being processed.
Return:
None
"""
output_format = get_app_data(app_data, 'OUTPUT_FORMAT')
for page_index in range(2, page_count + 1):
print_stderr("Processing results page {} of {}.".format(page_index, page_count))
url = build_url(app_data, param, page_index)
print_stderr("Request: {}".format(url))
response = requests.get(url)
if output_format == 'csv':
lines = response.text.split('\r\n')
print(*lines[3::], sep='\n')
elif output_format == 'json':
json_obj = json.loads(response.text)
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(json_obj["ResultList"])
def print_stderr(s: str) -> None:
"""
Print a string to stderr.
Do this for messages so that output can be redirected.
Args:
s (str): String to print.
Returns:
None
"""
print(s, file=sys.stderr)
def process_csv(app_data: dict, param: str, first_page: bool, response: str) -> None:
"""
Append page of results to csv output format.
Args:
app_data (dict): Application input from command line.
param (str): Parameter being read.
first_page (bool): Whether the first page is being written.
response (str): Query response.
Returns:
None
"""
print_stderr('Creating csv output format.')
# response.text is a string, so can split on CRLF
lines = response.split('\r\n')
# Format the output of the response
lines = format_output(lines)
# Determine the amount of | |
from tkinter import *
import pickle as pickle
from tkinter.ttk import *
from helpers import *
from Anchors import Anchor
from Structures import Structure
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
def tv_sort(display, col, root):
l = [(display.set(k, 'repr'), k) for k in display.get_children(root)]
l.sort()
# rearrange items in sorted positions
for index, (val, k) in enumerate(l):
display.move(k, root, index)
tv_sort(display, col, k)
###################################
# Concept list box
# handles selecting concepts
# adding new concepts
# deleting concepts
###################################
class ConceptListbox:
def __init__(self, parent, root, side=TOP, fill=BOTH):
self.parent=parent
self.treeview=Treeview(parent, columns=['conceptID'], displaycolumns=[])
self.root = root
self.treeview.pack(side=side, fill=fill)
self.concepts = {}
#BINDINGS
self.treeview.bind('<<TreeviewSelect>>', self.onConceptSelect)
self.treeview.bind('-', self.deleteConcept)
self.treeview.bind('r', self.initiateRenameConcept)
self.treeview.tag_configure('grey', foreground='gray')
self.treeview.tag_configure('black', foreground='black')
def onConceptSelect(self, event):
for p in event.widget.selection():
conceptID = event.widget.item(p)['text']
print("selecting", conceptID)
self.root.displayConcept(conceptID)
def insertConcept(self, name,iden):
self.concepts[name] = self.treeview.insert("", END, text=name, values=[id], tags=['grey'])
def activateConcept(self,name):
self.treeview.item(self.concepts[name], tags=['black'])
def deleteConcept(self, event):
for p in event.widget.selection():
conceptID = event.widget.item(p)['text']
print("deleting", conceptID)
self.root.backend.delete_concept(conceptID)
self.treeview.delete(p)
def initiateRenameConcept(self, event):
for p in event.widget.selection():
conceptID = event.widget.item(p)['text']
print("renaming", conceptID)
self.renameConceptWindow(conceptID)
def renameConceptWindow(self, oldConcept):
display = Tk()
display.title('RENAME')
new_window = PanedWindow(display, orient=VERTICAL)
new_window.pack(fill=BOTH, expand=1)
label = Label(new_window, text = "Enter the new name")
new_window.add(label)
l = Entry(new_window)
def renameConcept(event):
newname = event.widget.get().lower()
self.root.backend.rename_concept(oldConcept, newname)
event.widget.master.master.destroy()
self.treeview.item(self.concepts[oldConcept], text=newname)
self.concepts[newname] = self.concepts[oldConcept]
self.concepts[oldConcept] = None
l.bind("<Return>", renameConcept)
new_window.add(l)
class StructuredAnchorDisplay:
def __init__(self, parent, root, source):
self.parent = parent
self.source = source
self.root = root
self.structure = pickle.load(file(source))
self.display=Treeview(parent, columns=['repr'], displaycolumns=[])
self.display.pack(fill=BOTH, expand=1)
self.ref = {"":""}
for repr,disp,p in self.structure.getStructure():
try:
self.ref[repr] = self.display.insert(self.ref[p], END, text=disp, val=[repr])
except:
pass
root = self.display.get_children('')[0]
tv_sort(self.display, 'repr', root)
self.display.bind('+', self.selectAnchor)
#self.display.bind('E', self.selectEvaluator)
def selectAnchor(self, event):
for p in event.widget.selection():
anchor_name = event.widget.item(p)['values'][0]
print('new structured anchor', anchor_name)
nodes,names,edges = self.structure.getDescendents(anchor_name)
new_anchor = Anchor(str(anchor_name), nodes, edges,names)
self.root.backend.getActiveConcept().addAnchor(new_anchor)
self.root.displayConcept()
def selectEvaluator(self, event):
for p in event.widget.selection():
anchor_name = event.widget.item(p)['values'][0]
print('new structured evaluator', anchor_name)
nodes,names,edges = self.structure.getDescendents(anchor_name)
new_anchor = Anchor(str(anchor_name), nodes, edges,names)
self.root.backend.getActiveConcept().addEvaluator(new_anchor)
self.root.displayConcept()
def open(self, iden):
item = self.ref[iden]
self.display.see(item)
self.display.selection_set(item)
self.display.see(item)
self.display.selection_set(item)
class AnchorDisplay:
def __init__(self, parent, root, side=TOP, fill=BOTH):
self.parent = parent
self.root = root
self.Windows = PanedWindow(parent, orient=HORIZONTAL)
self.anchorDisplay = Treeview(self.Windows, columns=['repr'], displaycolumns=[])
self.anchorDisplay.pack(side=LEFT, fill=BOTH, expand=1)
self.anchorDisplay.heading('#0', text='anchors')
#self.evalAnchorDisplay = Treeview(self.Windows, columns=['repr'], displaycolumns=[])
#self.evalAnchorDisplay.pack(side=LEFT, fill=BOTH, expand=1)
#self.evalAnchorDisplay.heading('#0', text='evaluators')
self.selection = 0
self.anchorSuggestions = Notebook(self.Windows)
self.anchorSuggestions.pack(side=RIGHT, fill=BOTH, expand=1)
self.Windows.add(self.anchorDisplay)
self.Windows.add(self.anchorSuggestions)
#self.Windows.add(self.evalAnchorDisplay)
self.frames = {}
self.trees = {}
settings = root.settings
self.dataTypes = [field.attrib['type'] for field in root.settings.findall('./dataTypes/datum')]
self.prefixes = [field.attrib['prefix'] for field in root.settings.findall('./dataTypes/datum')]
f = self.frames['suggest'] = Frame(self.anchorSuggestions)
self.anchorSuggestions.add(f, text='Suggestions')
self.trees['suggest'] = t = Treeview(f, columns=['repr', 'weight'], displaycolumns=[])
t.pack(fill=BOTH, expand=1)
frame_list = [d for d in settings.findall('dataTypes/datum') if not d.attrib['heirarchy']=='']
for frame in frame_list:
f = self.frames[frame.attrib['type']] = Frame(self.anchorSuggestions)
self.anchorSuggestions.add(f, text=frame.attrib['type'])
self.trees[frame.attrib['type']] = StructuredAnchorDisplay(f, root, frame.attrib['heirarchy'])
self.anchorEntry = Entry(parent)
parent.add(self.Windows)
parent.add(self.anchorEntry)
buttonWindow = PanedWindow(parent, orient=HORIZONTAL)
self.learnButton = Button(buttonWindow, text="Learn!", command=self.doLearning)
buttonWindow.add(self.learnButton)
self.learnButton.pack(side=LEFT)
#self.evaluateButton = Button(buttonWindow, text="showEval!", command=self.showEval)
#buttonWindow.add(self.evaluateButton)
#self.evaluateButton.pack(side=LEFT)
#self.learnButton = Button(buttonWindow, text="Evaluate", command=self.doEvaluation)
#buttonWindow.add(self.learnButton)
#self.learnButton.pack(side=LEFT)
self.parent.add(buttonWindow)
#BINDINGS
self.anchorEntry.bind("<Return>", self.anchorAdded)
self.anchorDisplay.bind("-", self.anchorRemoved)
self.anchorDisplay.tag_configure("red", foreground="red")
#self.evalAnchorDisplay.bind("-", self.evalRemoved)
self.anchorDisplay.bind("<<TreeviewSelect>>", self.updateSelection)
self.trees['suggest'].bind('+', self.suggestionAccepted)
self.trees['suggest'].bind('E', self.evaluatorAccepted)
for datatype in self.dataTypes:
self.trees['suggest'].tag_bind(datatype, "g", self.gotoCode)
def updateSelection(self, event):
for p in event.widget.selection():
self.selection = event.widget.item(p)['values'][0]
if self.root.displayMode.get() == 'select':
self.root.patientListDisplay.displayPatients()
self.root.patientDetailDisplay.clear()
def gotoCode(self, event):
for p in event.widget.selection():
tags = event.widget.item(p)['tags']
datatype = ""
val = ""
for t in tags:
t = str(t)
for d in self.dataTypes:
if t == d:
datatype = d
for p in self.prefixes:
if len(p) and p in t:
val = t
self.showSuggestion(datatype, val)
def showAnchors(self, conceptID):
concept = self.root.backend.concepts[conceptID]
for c in self.anchorDisplay.get_children():
self.anchorDisplay.delete(c)
#for c in self.evalAnchorDisplay.get_children():
# self.evalAnchorDisplay.delete(c)
ref = {"":""}
for anch in concept.anchors:
for a,name,p in anch.getStructure():
print('adding anchor', a,name,p)
tags = []
if name[0] == '!':
tags.append('red')
ref[a] = self.anchorDisplay.insert(ref[p], END, text=name, values=[a], tags=tags)
if a == self.selection:
self.anchorDisplay.selection_set(ref[a])
#for anch in concept.evaluators:
#for a,name,p in anch.getStructure():
#ref[a] = self.evalAnchorDisplay.insert(ref[p], END, text=name, values=[a])
#if a == self.selection:
#self.evalAnchorDisplay.selection_set(ref[a])
for c in self.trees['suggest'].get_children():
self.trees['suggest'].delete(c)
for word,weight in concept.getSuggestions():
if '\'' in word or '\"' in word:
continue
txt,tags = self.getDisplayVersion(word)
prefix = ""
if '_' in word:
prefix = word.split('_')[0]+'_'
self.trees['suggest'].insert('', END, text=prefix+txt, values=(word, weight), tags=tags)
def doLearning(self):
self.root.backend.getActiveConcept().doLearning()
self.root.displayConcept()
def showEval(self):
evaluations = self.root.backend.getActiveConcept().evaluations
recall = self.root.backend.getActiveConcept().recall
plt.plot(evaluations, '*-')
plt.xlabel('steps')
plt.ylabel('prec@'+str(recall))
plt.show()
def getDisplayVersion(self, word):
for datatype,dct in self.root.dictionaries:
if word in dct:
return dct[word], [word, datatype]
else:
return word, []
def anchorAdded(self, event):
new_anchor = Anchor(event.widget.get().lower())
self.anchorEntry.delete(0,END)
self.root.backend.getActiveConcept().addAnchor(new_anchor)
self.root.displayConcept(self.root.currentConcept)
def suggestionAccepted(self, event):
for p in event.widget.selection():
repr = (str(event.widget.item(p)['values'][0]))
disp = (str(event.widget.item(p)['text']))
new_anchor = Anchor(repr, members=[repr], display_names=[disp])
self.root.backend.getActiveConcept().addAnchor(new_anchor)
self.root.displayConcept(self.root.currentConcept)
def evaluatorAccepted(self, event):
for p in event.widget.selection():
repr = (str(event.widget.item(p)['values'][0]))
disp = (str(event.widget.item(p)['text']))
new_anchor = Anchor(repr, members=[repr], display_names=[disp])
self.root.backend.getActiveConcept().addEvaluator(new_anchor)
self.root.displayConcept(self.root.currentConcept)
def anchorRemoved(self, event):
for p in event.widget.selection():
anchor = event.widget.item(p)['values'][0]
self.root.backend.getActiveConcept().removeAnchor(anchor)
self.root.displayConcept(self.root.currentConcept)
def evalRemoved(self, event):
for p in event.widget.selection():
anchor = event.widget.item(p)['values'][0]
self.root.backend.getActiveConcept().removeEvaluator(anchor)
self.root.displayConcept(self.root.currentConcept)
def showSuggestion(self, datatype, val):
tab = self.frames[datatype]
self.anchorSuggestions.select(tab)
self.trees[datatype].open(val)
class PatientDetailDisplay:
def __init__(self, parent, root, side=TOP, fill=BOTH):
#middle listbox -- patient representation
self.parent = parent
self.root = root
self.patientDetails = Text(parent, wrap=WORD)
self.patientDetails.pack(side=TOP, fill=X, expand=0)
self.settings = root.settings
self.displayFields = [field.attrib['name'] for field in root.settings.findall('./displaySettings/detailedDisplay/displayFields/field')]
self.dataTypes = [field.attrib['type'] for field in root.settings.findall('./dataTypes/datum')]
self.prefixes = [field.attrib['prefix'] for field in root.settings.findall('./dataTypes/datum')]
parent.add(self.patientDetails)
def clear(self):
self.patientDetails.delete(1.0,END)
def gotoCode(self, event):
x,y = event.x, event.y
tags = event.widget.tag_names("@%d,%d" % (x, y))
datatype = ""
val = ""
for t in tags:
t = str(t)
for d in self.dataTypes:
if t == d:
datatype = d
for p in self.prefixes:
if len(p) and p in t:
val = t
self.root.anchorDisplay.anchorSuggestions.focus_set()
self.root.anchorDisplay.showSuggestion(datatype, val)
def displayPatient(self, id):
currentConcept = self.root.currentConcept
self.clear()
self.patientDetails.tag_config("red", foreground="red")
self.patientDetails.tag_config("blue", foreground="blue")
self.patientDetails.tag_config("purple", foreground="purple")
for datatype in self.dataTypes:
self.patientDetails.tag_config(datatype, underline=1)
self.patientDetails.tag_bind(datatype, "<Enter>", show_hand_cursor)
self.patientDetails.tag_bind(datatype, "<Leave>", show_arrow_cursor)
self.patientDetails.tag_bind(datatype, "<Button-1>", self.gotoCode)
pat = self.root.backend.patients[id]
for field in self.displayFields:
self.patientDetails.insert(END, field+': ')
try:
txt = pat[field+'_parsed']
except Exception as e:
print('error?', e)
continue
for w in txt:
tags = []
if len(set(w['repr']) & union(a.getMembers() for a in self.root.backend.concepts[currentConcept].anchors)):
tags.append('red')
spacer = ' '
for prefix, datatype in zip(self.prefixes, self.dataTypes):
if prefix == "":
continue
if any([prefix in r for r in w['repr']]):
tags.append(datatype)
for r in w['repr']:
tags.append(r)
spacer = '\n'
for r in w['repr']:
if 'negxxx' in r:
tags.append('purple')
self.patientDetails.insert(END, w['disp'], tuple(tags))
self.patientDetails.insert(END, spacer)
#for r in w['repr']:
# self.patientDetails.insert(END, r+' ', ('blue', ))
self.patientDetails.insert(END, '\n'+'-'*50+'\n')
if id in self.root.backend.validate_patient_set:
self.patientDetails.insert(END, 'VALIDATE PATIENT\n'+'-'*50+'\n')
else:
self.patientDetails.insert(END, 'TRAIN PATIENT\n'+'-'*50+'\n')
if id in self.root.backend.getActiveConcept().flagged_patients:
note = self.root.backend.getActiveConcept().flagged_patients[id]
self.patientDetails.insert(END, '\nNote:'+note+'\n-'*50+'\n')
class PatientListDisplay:
def __init__(self, parent, root, side=TOP, fill=BOTH):
#bottom listbox -- patient representation
self.parent = parent
self.root = root
self.patientList = Treeview(parent, columns=['pid'], displaycolumns=[])
self.patientList.pack(side=TOP, fill=BOTH, expand=1)
scrollbar = Scrollbar(self.patientList)
scrollbar.pack(side=RIGHT,fill=Y)
self.patientList.configure(yscroll=scrollbar.set)
scrollbar.config(command=self.patientList.yview)
parent.add(self.patientList)
self.summaryFields = [field.attrib['name'] for field in root.settings.findall('./displaySettings/patientSummary/displayFields/field')]
#tags
self.patientList.tag_configure("red", foreground="red")
self.patientList.tag_configure("blue", foreground="blue")
self.patientList.tag_configure("green", foreground="green")
#bindings
self.patientList.bind('<<TreeviewSelect>>', self.onPatientSelect)
self.patientList.bind('+', self.posTagPatient)
self.patientList.bind('-', self.negTagPatient)
self.patientList.bind('0', self.unTagPatient)
self.patientList.bind('f', self.flagPatient)
def onPatientSelect(self, event):
for p in event.widget.selection():
pid= event.widget.item(p)['values'][0]
self.root.patientDetailDisplay.displayPatient(pid)
def addPatientToDisplay(self, pat, showPrediction=True):
if pat==None:
print('adding empty patients')
self.patientList.insert("", END, text="", values=(""))
return
pat_description = " ".join([ET.fromstring(pat[field]).text.strip() for field in self.summaryFields]) + ' : '+ ",".join(pat['anchors'] - set([self.root.currentConcept]))
try:
if showPrediction:
pat_description = "{:.3f}".format(self.root.backend.getActiveConcept().ranking[pat['index']]) +': '+ pat_description
except Exception as inst:
#print inst
pass
tags = []
try:
if self.root.backend.getActiveConcept().human_labels[pat['index']] > 0:
tags = ['green']
if self.root.backend.getActiveConcept().human_labels[pat['index']] < 0:
tags = ['red']
except:
pass
self.patientList.insert("", END, text=pat_description, values=(pat['index']), tags=tags)
def displayPatients(self):
listbox = self.patientList
for c in listbox.get_children():
listbox.delete(c)
print("displaying patients")
ranking = self.root.backend.getActiveConcept().ranking
train_patients = set(self.root.backend.train_patient_ids)
validate_patients = self.root.backend.validate_patient_set
#if ranking:
# assert set(ranking.keys()) == all_patients
displayMode = self.root.displayMode.get()
if displayMode == 'recent':
target_patients = self.root.backend.getActiveConcept().recentPatients & validate_patients
elif displayMode == 'select':
s = self.root.anchorDisplay.selection
target_patients = set(self.root.backend.getActiveConcept().anchoredPatients[s]) & validate_patients
else:
anchors = self.root.backend.getActiveConcept().anchors
print('anchors')
for a in anchors:
print(a, a.id)
anchored_patients = union(self.root.backend.getActiveConcept().anchoredPatients[a.id] for a in anchors) & validate_patients - union(self.root.backend.getActiveConcept().anchoredPatients[a.id] for a in anchors if a.id[0] == '!')
if displayMode == 'filter':
target_patients = anchored_patients
elif displayMode == 'sort':
target_patients = validate_patients - anchored_patients - union(self.root.backend.getActiveConcept().anchoredPatients[a.id] for a | |
it's anything but 'or'
#Now let's figure out the QCODE, e.g. contains, match exact etc.
if term['QCODE'] == '0': newQuery = queriedForms.filter(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=rtypePK)#CONTAINS
elif term['QCODE'] == '1': newQuery = queriedForms.filter(formrecordattributevalue__record_value__icontains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=rtypePK)#ICONTAINS
elif term['QCODE'] == '2': newQuery = queriedForms.filter(formrecordattributevalue__record_value__exact=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=rtypePK)#MATCHES EXACT
elif term['QCODE'] == '3': newQuery = queriedForms.exclude(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=rtypePK)#EXCLUDES
elif term['QCODE'] == '4': newQuery = queriedForms.filter(formrecordattributevalue__record_value__isnull=True, formrecordattributevalue__record_attribute_type__pk=rtypePK)#IS_NULL
#save stats and query
term['count'] = newQuery.count()
termStats.append(term)
queriedForms = newQuery
else:#Otherwise it's an OR statement
#Now let's figure out the QCODE, e.g. contains, match exact etc.
if term['QCODE'] == '0': newQuery = (queryFormtype.form_set.all().filter(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=rtypePK))#CONTAINS
elif term['QCODE'] == '1': newQuery = (queryFormtype.form_set.all().filter(formrecordattributevalue__record_value__icontains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=rtypePK))#ICONTAINS
elif term['QCODE'] == '2': newQuery = (queryFormtype.form_set.all().filter(formrecordattributevalue__record_value__exact=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=rtypePK))#MATCHES EXACT
elif term['QCODE'] == '3': newQuery = (queryFormtype.form_set.all().exclude(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=rtypePK))#EXCLUDES
elif term['QCODE'] == '4': newQuery = (queryFormtype.form_set.all().filter(formrecordattributevalue__record_value__isnull=True, formrecordattributevalue__record_attribute_type__pk=rtypePK))#IS_NULL
#***RECYCLING BIN*** Make sure our NEW query is always filtered by recycling bin flags--All OR statements will need this filter
newQuery = newQuery.filter(flagged_for_deletion=False)
#save stats and query
term['count'] = newQuery.count()
termStats.append(term)
queriedForms = (newQuery | queriedForms)
logging.info("TimerB"+ " : " + str(time.clock()))
logging.info("TimerC"+ " : " + str(time.clock()))
#########################################$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#########################################$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#########################################$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
# (FRRT) FormRecordReferenceType Lookups
# This is where things can get complicated. I've added a 'deep' search -- or the ability to search fields from a related model
# --Right now, this just looks at the form IDs of the related field and looks for matches--it will still need to do that, but
# --it also needs to be able to look up FRAT or FRRTs in the same field--that will essentially double the code for this blocks
# --to do all of this, and will also cause the time of the query to significantly increase because we are doing another JOIN in the
# --SQL lookup to span this relationship. This won't affect the list of queried forms directly--they will be limited by what the
# --query finds obviously--but the user will only see the column for the related FRRT that had a match--not specifically the field that matched
# ----It WILL affect the counts for the graphs etc.
#########################################&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&#########################################$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$#########################################$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
elif rtype == 'FRRT':
#store the record type in a new rtype list if unique
if uniqueQuery: queryRTYPElist.append((rtypeCounter,'frrt',rtypePK,term['LABEL']))
rtypeCounter += 1
tCounter = 0;
#store stats
singleQueryStats['rtype_name'] = term['LABEL']
singleQueryStats['rtype_pk'] = rtypePK
singleQueryStats['rtype'] = rtype
termStats = []
singleQueryStats['all_terms'] = termStats
#get the deep values
deepPK, deepRTYPE = term['RTYPE-DEEP'].split('__')
print >>sys.stderr, deepPK + " : " + deepRTYPE + " <!-------------------------------------------"
#==========================================================================================================================================================================================
# IF WE ARE JUST LOOKING UP THE RTYPE FORM ID
#==========================================================================================================================================================================================
#TODO: This also needs to check external reference values if no match is found
if deepRTYPE == 'FORMID':
print >> sys.stderr, "WTF"
#Now begin modifying the SQL query which each term of each individual query
#skip the term if the field was left blank
if term['TVAL'] != "" or term['QCODE'] == '4':
newQuery = None
if term['ANDOR'] != 'or':#We can assume it is an AND like addition if it's anything but 'or'
#Now let's figure out the QCODE, e.g. contains, match exact etc.
if term['QCODE'] == '0': newQuery = queriedForms.filter(ref_to_parent_form__record_reference__form_name__contains=term['TVAL'], ref_to_parent_form__record_reference_type__pk=rtypePK) #CONTAINS
elif term['QCODE'] == '1': newQuery = queriedForms.filter(ref_to_parent_form__record_reference__form_name__icontains=term['TVAL'], ref_to_parent_form__record_reference_type__pk=rtypePK) #ICONTAINS
elif term['QCODE'] == '2': newQuery = queriedForms.filter(ref_to_parent_form__record_reference__form_name__exact=term['TVAL'], ref_to_parent_form__record_reference_type__pk=rtypePK)#MATCHES EXACT
elif term['QCODE'] == '3': newQuery = queriedForms.exclude(ref_to_parent_form__record_reference__form_name__contains=term['TVAL'], ref_to_parent_form__record_reference_type__pk=rtypePK)#EXCLUDES
elif term['QCODE'] == '4': newQuery = queriedForms.filter(ref_to_parent_form__record_reference__isnull=True, ref_to_parent_form__record_reference_type__pk=rtypePK) #IS_NULL
#save stats and query
term['count'] = newQuery.count()
termStats.append(term)
queriedForms = newQuery
else:#Otherwise it's an OR statement
#Now let's figure out the QCODE, e.g. contains, match exact etc.
if term['QCODE'] == '0': newQuery = (queryFormtype.form_set.all().filter(ref_to_parent_form__record_reference__form_name__contains=term['TVAL'], ref_to_parent_form__record_reference_type__pk=rtypePK))#CONTAINS
elif term['QCODE'] == '1': newQuery = (queryFormtype.form_set.all().filter(ref_to_parent_form__record_reference__form_name__icontains=term['TVAL'], ref_to_parent_form__record_reference_type__pk=rtypePK))#ICONTAINS
elif term['QCODE'] == '2': newQuery = (queryFormtype.form_set.all().filter(ref_to_parent_form__record_reference__form_name__exact=term['TVAL'], ref_to_parent_form__record_reference_type__pk=rtypePK))#MATCHES EXACT
elif term['QCODE'] == '3': newQuery = (queryFormtype.form_set.all().exclude(ref_to_parent_form__record_reference__form_name__contains=term['TVAL'], ref_to_parent_form__record_reference_type__pk=rtypePK))#EXCLUDES
elif term['QCODE'] == '4': newQuery = (queryFormtype.form_set.all().filter(ref_to_parent_form__record_reference__isnull=True, ref_to_parent_form__record_reference_type__pk=rtypePK))#IS_NULL
#***RECYCLING BIN*** Make sure our NEW query is always filtered by recycling bin flags--All OR statements will need this filter
newQuery = newQuery.filter(flagged_for_deletion=False)
#save stats and query
term['count'] = newQuery.count()
termStats.append(term)
queriedForms = (newQuery | queriedForms)
#==========================================================================================================================================================================================
# IF WE ARE LOOKING UP THE RELATIONS FRAT
#==========================================================================================================================================================================================
elif deepRTYPE == 'FRAT':
print >>sys.stderr, "We should be here"
#grab the formtype in question
deepFormType = FormType.objects.filter(pk=FormRecordAttributeType.objects.get(pk=deepPK).form_type.pk)
#***RECYCLING BIN*** Make sure our this Deep query FormType is always filtered by recycling bin flags
deepFormType = deepFormType.filter(flagged_for_deletion=False)
deepFormType = deepFormType[0]
#Now begin modifying the SQL query which each term of each individual query
#skip the term if the field was left blank
if term['TVAL'] != "" or term['QCODE'] == '4':
newQuery = None
#----------------------------------------------------------
# AND STATEMENT FOR A --TERM--
if term['ANDOR'] != 'or':#We can assume it is an AND like addition if it's anything but 'or'
#Now let's figure out the QCODE, e.g. contains, match exact etc.
#First we Get a flattened list of form pk values from the deepFormType
#Then we filter our current formtype queryset's frrt manytomany pks by the pk value list just created
if term['QCODE'] == '0':
flattenedSet = list(deepFormType.form_set.all().filter(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '1':
flattenedSet = list(deepFormType.form_set.all().filter(formrecordattributevalue__record_value__icontains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '2':
flattenedSet = list(deepFormType.form_set.all().filter(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '3':
flattenedSet = list(deepFormType.form_set.all().exclude(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '4':
flattenedSet = list(deepFormType.form_set.all().filter(formrecordattributevalue__record_value__isnull=True, formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
#save stats and query
term['count'] = newQuery.count()
termStats.append(term)
queriedForms = newQuery
#--------------------------------------------------------
# OR STATEMENT FOR a --TERM--
else:
#Now let's figure out the QCODE, e.g. contains, match exact etc.
if term['QCODE'] == '0':
flattenedSet = list(deepFormType.form_set.all().filter(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = queryFormtype.form_set.all().filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '1':
flattenedSet = list(deepFormType.form_set.all().filter(formrecordattributevalue__record_value__icontains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = queryFormtype.form_set.all().filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '2':
flattenedSet = list(deepFormType.form_set.all().filter(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = queryFormtype.form_set.all().filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '3':
flattenedSet = list(deepFormType.form_set.all().exclude(formrecordattributevalue__record_value__contains=term['TVAL'], formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = queryFormtype.form_set.all().filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '4':
flattenedSet = list(deepFormType.form_set.all().filter(formrecordattributevalue__record_value__isnull=True, formrecordattributevalue__record_attribute_type__pk=deepPK).values_list('pk', flat=True)) #CONTAINS
newQuery = queryFormtype.form_set.all().filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
#***RECYCLING BIN*** Make sure our NEW query is always filtered by recycling bin flags--All OR statements will need this filter
newQuery = newQuery.filter(flagged_for_deletion=False)
#save stats and query
term['count'] = newQuery.count()
termStats.append(term)
queriedForms = (newQuery | queriedForms)
#==========================================================================================================================================================================================
# IF WE ARE LOOKING UP THE RELATION'S FRRT(Only form ID allowed)
#==========================================================================================================================================================================================
elif deepRTYPE == 'FRRT':
print >>sys.stderr, "We should be here 3"
#grab the formtype in question
deepFormType = FormType.objects.get(pk=FormRecordReferenceType.objects.get(pk=deepPK).form_type_parent.pk)
#***RECYCLING BIN*** Make sure our this Deep query FormType is always filtered by recycling bin flags
deepFormType = deepFormType.filter(flagged_for_deletion=False)
#Now begin modifying the SQL query which each term of each individual query
#skip the term if the field was left blank
if term['TVAL'] != "" or term['QCODE'] == '4':
newQuery = None
#----------------------------------------------------------
# AND STATEMENT FOR A --TERM--
if term['T-ANDOR'] != 'or':#We can assume it is an AND like addition if it's anything but 'or'
#Now let's figure out the QCODE, e.g. contains, match exact etc.
#First we Get a flattened list of form pk values from the deepFormType
#Then we filter our current formtype queryset's frrt manytomany pks by the pk value list just created
if term['QCODE'] == '0':
flattenedSet = list(deepFormType.form_set.all().filter(ref_to_parent_form__record_reference__form_name__contains=term['TVAL']).values_list('pk', flat=True)) #CONTAINS
print >>sys.stderr, "LOOK HERE ROBERT"
print >>sys.stderr, flattenedSet
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '1':
flattenedSet = list(deepFormType.form_set.all().filter(ref_to_parent_form__record_reference__form_name__contains=term['TVAL']).values_list('pk', flat=True)) #ICONTAINS
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '2':
flattenedSet = list(deepFormType.form_set.all().filter(ref_to_parent_form__record_reference__form_name__contains=term['TVAL']).values_list('pk', flat=True)) #EXACT MATCH
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '3':
flattenedSet = list(deepFormType.form_set.all().exclude(ref_to_parent_form__record_reference__form_name__contains=term['TVAL']).values_list('pk', flat=True)) #EXCLUDES
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
elif term['QCODE'] == '4':
flattenedSet = list(deepFormType.form_set.all().filter(ref_to_parent_form__record_reference__form_name__isnull=True).values_list('pk', flat=True)) #IS NULL
newQuery = queriedForms.filter(ref_to_parent_form__record_reference__pk__in=flattenedSet)
#save stats and query
term['count'] = newQuery.count()
termStats.append(term)
queriedForms = newQuery
#--------------------------------------------------------
# OR STATEMENT FOR a | |
<gh_stars>0
"""
player.py
PlayerModel code.
"""
import copy
import utils
import engagement
import perception
import decision
from packable import pack, unpack
from diffable import diff
class PlayerModel:
"""
Models a player as having a DecisionMethod, a PriorityMethod, and a
collection of modes of engagement, each of which includes multiple goals at
different priorities. The player may have a strict ranking of these different
modes, or may have per-mode priority modifiers, or may even have per-goal
priority modifiers or overrides. At any point a player-specific combined mode
of engagement can be constructed which assigns global priorities to all
player goals.
Note that priorities for specific goals are endemic to a player model and
cannot be compared between player models because of arbitrary numbering
concerns. Priority differences likewise cannot be compared, but priority
*orderings* can be.
For the purposes of soft goal priorities, mode ranking stacks goal priorities
of a lower-priority mode immediately after priorities from a higher-priority
mode. For best results, avoid combining mode ranking with soft goal
priorities, and use mode adjustments instead to achieve a precise goal
priority setup.
Also note that if modes of engagement define goals with negative priorities
(they shouldn't) these may break the strict mode-of-engagement hierarchy
defined here.
"""
def __init__(
self,
name,
decision_method,
modes_of_engagement,
priority_method=engagement.PriorityMethod.softpriority,
mode_ranking=None,
mode_adjustments=None,
goal_adjustments=None,
goal_overrides=None
):
"""
Note: Invalid mode/goal names in the following arguments will be pruned and
generate warnings.
name:
The name for this player model.
decision_method:
A DecisionMethod object (see decision.py) representing how this player
makes decisions. Can be updated with set_decision_method.
modes_of_engagement:
A dictionary mapping names to ModeOfEngagement objects (see
engagement.py) that this player will use.
priority_method:
A PriorityMethod object (see engagement.py) representing how this player
prioritizes goals. Can be updated using set_priority_method.
mode_ranking:
A strict ranking allowing some modes absolute precedence over others.
Lower numbers represent higher priorities. This should be a dictionary
mapping mode names to priority numbers. Priorities not given default to
engagement.DEFAULT_PRIORITY.
mode_adjustments:
A mapping from mode names to priority adjustments (positive or negative
integers) to be applied to all of the goals from a given mode of
engagement. This allows controlling how modes interact to some degree
without necessarily establishing a strict hierarchy. Of course only modes
given equal ranks will have goal priorities that interact. Modes not
specified in the adjustments mapping will be assigned the default
adjustment of 0.
goal_adjustments:
A mapping from goal names to priority adjustments for individual goals.
These are applied within-mode before between-mode effects like mode
ranking happen. Goals with no specified adjustment will be assigned an
adjustment of 0.
goal_overrides:
A mapping from goal names to new priority values for individual goals.
These are absolute, and will set a goal's priority independent of goal
priorities derived from modes of engagement. The goal that they refer to
must still be included in a constituent mode of engagement, though, and
mode rankings take priorities over goal overrides. So if you have a goal
with override priority 1, it will still be lower priority than all goals
from modes of engagement which don't include it that are ranked higher in
the mode ranking.
"""
self.name = name
self.decision_method = decision_method
if not isinstance(self.decision_method, decision.DecisionMethod):
raise TypeError("decision_method must be a DecisionMethod.")
if not isinstance(modes_of_engagement, dict):
raise TypeError(
"modes_of_engagement must be dictionary of ModeOfEngagement objects."
)
self.modes = dict(modes_of_engagement)
if not all(
isinstance(m, engagement.ModeOfEngagement) for m in self.modes.values()
):
raise TypeError(
"modes_of_engagement must be dictionary of ModeOfEngagement objects."
)
self.priority_method = priority_method
if not isinstance(self.priority_method, engagement.PriorityMethod):
raise TypeError("priority_method must be a PriorityMethod.")
all_mode_names = set(self.modes.keys())
self.mode_ranking = copy.deepcopy(mode_ranking) or {
m: engagement.DEFAULT_PRIORITY
for m in all_mode_names
}
utils.conform_keys(
all_mode_names,
self.mode_ranking,
engagement.DEFAULT_PRIORITY,
"Ranking for mode '{}' discarded (no matching mode)."
)
self.mode_adjustments = copy.deepcopy(mode_adjustments) or {
m: 0 for m in all_mode_names
}
utils.conform_keys(
all_mode_names,
self.mode_adjustments,
0,
"Adjustment for mode '{}' discarded (no matching mode)."
)
all_goal_names = set()
for k in all_mode_names:
all_goal_names.update(self.modes[k].goals.keys())
self.goal_adjustments = copy.deepcopy(goal_adjustments) or {
g: 0 for g in all_goal_names
}
utils.conform_keys(
all_goal_names,
self.goal_adjustments,
0,
"Adjustment for goal '{}' discarded (no matching goal in any mode)."
)
self.goal_overrides = copy.deepcopy(goal_overrides) or {}
utils.conform_keys(
all_goal_names,
self.goal_overrides,
utils.NoDefault,
"Override for goal '{}' discarded (no matching goal in any mode)."
)
self._synthesize_moe()
def __str__(self):
# TODO: Different here?
return "PlayerModel('{}')".format(self.name)
def __eq__(self, other):
if type(other) != PlayerModel:
return False
if self.name != other.name:
return False
if self.decision_method != other.decision_method:
return False
if self.modes != other.modes:
return False
if self.priority_method != other.priority_method:
return False
if self.mode_ranking != other.mode_ranking:
return False
if self.mode_adjustments != other.mode_adjustments:
return False
if self.goal_adjustments != other.goal_adjustments:
return False
if self.goal_overrides != other.goal_overrides:
return False
return True
def __hash__(self):
result = hash(self.name) + hash(self.decision_method)
for moe in self.modes.keys():
result ^= hash(moe)
result += hash(self.priority_method)
for mn in self.mode_ranking:
result ^= (self.mode_ranking[mn] + 17) * hash(mn)
for mn in self.mode_adjustments:
result ^= (self.mode_adjustments[mn] + 4039493) * hash(mn)
for gn in self.goal_adjustments:
result ^= (self.goal_adjustments[gn] + 6578946) * hash(gn)
for gn in self.goal_overrides:
result ^= (self.goal_overrides[gn] + 795416) * hash(gn)
return result
def _diff_(self, other):
"""
Reports differences (see diffable.py).
"""
result = []
if self.name != other.name:
result.append("names: '{}' != '{}'".format(self.name, other.name))
result.extend([
"decision_methods: {}".format(d)
for d in diff(self.decision_method, other.decision_method)
])
result.extend([
"modes: {}".format(d)
for d in diff(self.modes, other.modes)
])
result.extend([
"priority_methods: {}".format(d)
for d in diff(self.priority_method, other.priority_method)
])
result.extend([
"mode_rankings: {}".format(d)
for d in diff(self.mode_ranking, other.mode_ranking)
])
result.extend([
"mode_adjustments: {}".format(d)
for d in diff(self.mode_adjustments, other.mode_adjustments)
])
result.extend([
"goal_adjustments: {}".format(d)
for d in diff(self.goal_adjustments, other.goal_adjustments)
])
result.extend([
"goal_overrides: {}".format(d)
for d in diff(self.goal_overrides, other.goal_overrides)
])
return result
def _pack_(self):
"""
Returns a simple representation of this option suitable for direct
conversion to JSON.
Example:
(Note that this example's use of mode/goal rankings/adjustments is a bit
silly as the mode rankings are enough to affectively achieve the desired
priorities alone.)
```
PlayerModel(
"aggressive",
DecisionMethod.utilizing,
{
"defensive": ModeOfEngagement(
"defensive",
[ "attack", "defend" ],
{ "attack": 3, "defend": 2 }
),
"aggressive": ModeOfEngagement(
"aggressive",
[ "attack", "defend" ],
{ "attack": 2, "defend": 3 }
)
},
SoftPriority(0.6),
mode_ranking={ "defensive": 2, "aggressive": 1 },
mode_adjustments={ "defensive": 1 },
goal_adjustments={ "defend": 1 },
goal_overrides={ "attack": 1 }
)
```
{
"name": "aggressive",
"decision_method": "utilizing",
"modes": {
"defensive": {
"name": "defensive",
"goals": [
{ "name": "attack", "type": "PlayerGoal" },
{ "name": "defend", "type": "PlayerGoal" }
],
"priorities": { "attack": 3, "defend": 2 }
},
"aggressive": {
"name": "aggressive",
"goals": [
{ "name": "attack", "type": "PlayerGoal" },
{ "name": "defend", "type": "PlayerGoal" }
],
"priorities": { "attack": 2, "defend": 3 }
}
},
"priority_method": [ "softpriority", 0.6 ],
"mode_ranking": { "defensive": 2, "aggressive": 1 },
"mode_adjustments": { "defensive": 1 },
"goal_adjustments": { "defend": 1 },
"goal_overrides": { "attack": 1 }
}
```
"""
result = {
"name": self.name,
"decision_method": pack(self.decision_method),
"modes": { key: pack(val) for (key, val) in self.modes.items() }
}
if self.priority_method != engagement.PriorityMethod.softpriority:
result["priority_method"] = pack(self.priority_method)
nondefault_mode_rankings = {
mn: self.mode_ranking[mn]
for mn in self.mode_ranking
if self.mode_ranking[mn] != engagement.DEFAULT_PRIORITY
}
if nondefault_mode_rankings:
result["mode_ranking"] = nondefault_mode_rankings
nonzero_adjustments = {
mn: self.mode_adjustments[mn]
for mn in self.mode_adjustments
if self.mode_adjustments[mn] != 0
}
if nonzero_adjustments:
result["mode_adjustments"] = nonzero_adjustments
nonzero_adjustments = {
gn: self.goal_adjustments[gn]
for gn in self.goal_adjustments
if self.goal_adjustments[gn] != 0
}
if nonzero_adjustments:
result["goal_adjustments"] = nonzero_adjustments
if self.goal_overrides:
result["goal_overrides"] = self.goal_overrides
return result
def _unpack_(obj):
"""
The inverse of `_pack_`; constructs an instance from a simple object (e.g.,
one produced by json.loads).
"""
return PlayerModel(
obj["name"],
unpack(obj["decision_method"], decision.DecisionMethod),
{
key: unpack(val, engagement.ModeOfEngagement)
for (key, val) in obj["modes"].items()
},
unpack(obj["priority_method"], engagement.PriorityMethod) \
if "priority_method" in obj else engagement.PriorityMethod.softpriority,
mode_ranking=obj["mode_ranking"] if "mode_ranking" in obj else None,
mode_adjustments=obj["mode_adjustments"] \
if "mode_adjustments" in obj else None,
goal_adjustments=obj["goal_adjustments"] \
if "goal_adjustments" in obj else None,
goal_overrides = obj["goal_overrides"] \
if "goal_overrides" in obj else None
)
def set_decision_method(self, dm):
"""
Updates this player's decision method.
"""
self.decision_method = dm
def set_priority_method(self, pm):
"""
Updates this player's priority method.
"""
self.priority_method = | |
<filename>lib/usfm/__init__.py
'''
The USFM parser module, provides the default sytlesheet for USFM and
USFM specific textype parsers to the palaso.sfm module. These guide the
palaso.sfm parser to so it can correctly parser USFM document structure.
'''
__version__ = '20101011'
__date__ = '11 October 2010'
__author__ = '<NAME> <<EMAIL>>'
__history__ = '''
20081210 - djd - Seperated SFM definitions from the module
to allow for parsing other kinds of SFM models
Also changed the name to parse_sfm.py as the
module is more generalized now
20091026 - tse - renamed and refactored generatoion of markers
dict to module import time as part of import into palaso
package.
20101026 - tse - rewrote to enable the parser to use the stylesheets to
direct how to parse structure and USFM specific semantics.
20101109 - tse - Ensure cached usfm.sty is upto date after package code
changes.
'''
from . import sfm, style
from itertools import chain
from functools import reduce
from . sfm import ErrorLevel
import bz2
import contextlib
import operator
import os
import pickle
import re
import site
_PALASO_DATA = os.path.join(
site.getuserbase(),
'palaso-python', 'sfm')
_package_dir = os.path.dirname(__file__)
def _check_paths(pred, paths):
return next(filter(pred, map(os.path.normpath, paths)), None)
def _source_path(path):
return _check_paths(os.path.exists,
[os.path.join(_PALASO_DATA, path),
os.path.join(_package_dir, path)])
def _newer(cache, benchmark):
return os.path.getmtime(benchmark) <= os.path.getmtime(cache)
def _is_fresh(cached_path, benchmarks):
return reduce(operator.and_, (_newer(cached_path, b) for b in benchmarks))
def _cached_stylesheet(path):
cached_path = os.path.normpath(os.path.join(
_PALASO_DATA,
path+os.extsep+'cz'))
source_path = _source_path(path)
if os.path.exists(cached_path):
import glob
if _is_fresh(cached_path, [source_path]
+ glob.glob(os.path.join(_package_dir, '*.py'))):
return cached_path
else:
path = os.path.dirname(cached_path)
if not os.path.exists(path):
os.makedirs(path)
import pickletools
with contextlib.closing(bz2.BZ2File(cached_path, 'wb')) as zf:
zf.write(pickletools.optimize(
pickle.dumps(style.parse(open(source_path, 'r')))))
return cached_path
def _load_cached_stylesheet(path):
try:
if not site.getuserbase():
raise FileNotFoundError
cached_path = _cached_stylesheet(path)
try:
try:
with contextlib.closing(bz2.BZ2File(cached_path, 'rb')) as sf:
return pickle.load(sf)
except (OSError, EOFError, pickle.UnpicklingError):
os.unlink(cached_path)
cached_path = _cached_stylesheet(path)
with contextlib.closing(bz2.BZ2File(cached_path, 'rb')) as sf:
return pickle.load(sf)
except (OSError, pickle.UnpicklingError):
os.unlink(cached_path)
raise
except OSError:
return style.parse(open(_source_path(path), 'r'))
default_stylesheet = _load_cached_stylesheet('usfm.sty')
_default_meta = style.Marker(
TextType=style.CaselessStr('Milestone'),
OccursUnder={None},
Endmarker=None,
StyleType=None
)
class parser(sfm.parser):
'''
>>> import warnings
Tests for inline markers
>>> list(parser([r'\\test'], parser.extend_stylesheet('test')))
[Element('test')]
>>> list(parser([r'\\test text'], parser.extend_stylesheet('test')))
[Element('test'), Text(' text')]
>>> list(parser([r'\\id JHN\\ior text\\ior*']))
[Element('id', content=[Text('JHN'), Element('ior', content=[Text('text')])])]
>>> list(parser([r'\\id MAT\\mt Text \\f + \\fk deep\\fk*\\f*more text.']))
[Element('id', content=[Text('MAT'), Element('mt', content=[Text('Text '), Element('f', args=['+'], content=[Element('fk', content=[Text('deep')])]), Text('more text.')])])]
>>> list(parser([r'\\id MAT\\mt Text \\f + \\fk deep \\+qt A quote \\+qt*more\\fk*\\f*more text.']))
[Element('id', content=[Text('MAT'), Element('mt', content=[Text('Text '), Element('f', args=['+'], content=[Element('fk', content=[Text('deep '), Element('qt', content=[Text('A quote ')]), Text('more')])]), Text('more text.')])])]
Test end marker recognition when it's a prefix
>>> with warnings.catch_warnings():
... warnings.simplefilter("error")
... list(parser([r'\\id TEST\\mt \\f + text\\f*suffixed text']))
... list(parser([r'\\id TEST\\mt '
... r'\\f + \\fr ref \\ft text\\f*suffixed text']))
[Element('id', content=[Text('TEST'), Element('mt', content=[Element('f', args=['+'], content=[Text('text')]), Text('suffixed text')])])]
[Element('id', content=[Text('TEST'), Element('mt', content=[Element('f', args=['+'], content=[Element('fr', content=[Text('ref ')]), Text('text')]), Text('suffixed text')])])]
Test footnote canonicalisation flag
>>> with warnings.catch_warnings():
... warnings.simplefilter("error")
... list(parser([r'\\id TEST\\mt \\f + text\\f*suffixed text'],
... canonicalise_footnotes=False))
... list(parser([r'\\id TEST\\mt '
... r'\\f + \\fr ref \\ft text\\f*suffixed text'],
... canonicalise_footnotes=False))
[Element('id', content=[Text('TEST'), Element('mt', content=[Element('f', args=['+'], content=[Text('text')]), Text('suffixed text')])])]
[Element('id', content=[Text('TEST'), Element('mt', content=[Element('f', args=['+'], content=[Element('fr', content=[Text('ref ')]), Element('ft', content=[Text('text')])]), Text('suffixed text')])])]
Test marker parameters, particularly chapter and verse markers
>>> list(parser([r'\\id TEST' r'\\c 1']))
[Element('id', content=[Text('TEST'), Element('c', args=['1'])])]
>>> list(parser([r'\\id TEST' r'\\c 2 \\s text']))
[Element('id', content=[Text('TEST'), Element('c', args=['2'], content=[Element('s', content=[Text('text')])])])]
>>> list(parser([r'\\id TEST\\c 0\\p' r'\\v 1']))
[Element('id', content=[Text('TEST'), Element('c', args=['0'], content=[Element('p', content=[Element('v', args=['1'])])])])]
>>> list(parser([r'\\id TEST\\c 0\\p' r'\\v 1-3']))
[Element('id', content=[Text('TEST'), Element('c', args=['0'], content=[Element('p', content=[Element('v', args=['1-3'])])])])]
>>> list(parser([r'\\id TEST\\c 0\\p' r'\\v 2 text']))
[Element('id', content=[Text('TEST'), Element('c', args=['0'], content=[Element('p', content=[Element('v', args=['2']), Text('text')])])])]
>>> list(parser([r'\\id TEST' r'\\c 2 \\p \\v 3 text\\v 4 verse']))
[Element('id', content=[Text('TEST'), Element('c', args=['2'], content=[Element('p', content=[Element('v', args=['3']), Text('text'), Element('v', args=['4']), Text('verse')])])])]
Test for error detection and reporting for structure
>>> list(parser([r'\\id TEST\\mt text\\f*']))
Traceback (most recent call last):
...
SyntaxError: <string>: line 1,17: orphan end marker \\f*: no matching opening marker \\f
>>> list(parser([r'\\id TEST \\p 1 text']))
Traceback (most recent call last):
...
SyntaxError: <string>: line 1,14: orphan marker \\p: may only occur under \\c
>>> list(parser([r'\\id TEST\\mt \\f + text\\fe*']))
Traceback (most recent call last):
...
SyntaxError: <string>: line 1,22: orphan end marker \\fe*: no matching opening marker \\fe
>>> list(parser([r'\\id TEST\\mt \\f + text'], ))
Traceback (most recent call last):
...
SyntaxError: <string>: line 1,1: invalid end marker end-of-file: \\f (line 1,13) can only be closed with \\f*
Test for error detection and reporting for USFM specific parses
Chapter numbers
>>> list(parser(['\\id TEST\\c\\p \\v 1 text']))
Traceback (most recent call last):
...
SyntaxError: <string>: line 1,9: missing chapter number after \\c
>>> list(parser(['\\id TEST\\c A\\p \\v 1 text']))
Traceback (most recent call last):
...
SyntaxError: <string>: line 1,9: missing chapter number after \\c
>>> list(parser([r'\\id TEST\\c 1 text\\p \\v 1 text']))
Traceback (most recent call last):
...
SyntaxError: <string>: line 1,14: text cannot follow chapter marker '\\c 1'
>>> list(parser([r'\\id TEST\\c 1text\\p \\v 1 text']))
Traceback (most recent call last):
...
SyntaxError: <string>: line 1,13: missing space after chapter number '1'
Verse numbers
>>> list(parser([r'\\id TEST\\c 1\\p \\v \\p text']))
Traceback (most recent call last):
...
SyntaxError: <string>: line 1,16: missing verse number after \\v
>>> list(parser([r'\\id TEST\\c 1\\p \\v text']))
Traceback (most recent call last):
...
SyntaxError: <string>: line 1,16: missing verse number after \\v
>>> list(parser([r'\\id TEST\\c 1\\p \\v 1text']))
Traceback (most recent call last):
...
SyntaxError: <string>: line 1,21: missing space after verse number '1t'
Note text parsing
>>> list(parser([r'\\id TEST\\mt \\f \\fk key\\fk* text.\\f*']))
Traceback (most recent call last):
...
SyntaxError: <string>: line 1,13: missing caller parameter after \\f
>>> list(parser([r'\\id TEST\\mt \\f +text \\fk key\\fk* text.\\f*']))
Traceback (most recent call last):
...
SyntaxError: <string>: line 1,17: missing space after caller parameter '+'
Test warnable condition detection and reporting
>>> with warnings.catch_warnings():
... warnings.simplefilter("error", SyntaxWarning)
... list(parser([r'\\id TEST\\mt \\whoops']))
Traceback (most recent call last):
...
SyntaxWarning: <string>: line 1,14: unknown marker \whoops: not in stylesheet
>>> with warnings.catch_warnings():
... warnings.simplefilter("error", SyntaxWarning)
... list(parser([r'\\id TEST\\mt \\whoops'],
... error_level=sfm.ErrorLevel.Marker))
Traceback (most recent call last):
...
SyntaxError: <string>: line 1,14: unknown marker \whoops: not in stylesheet
>>> with warnings.catch_warnings():
... warnings.simplefilter("error", SyntaxWarning)
... list(parser([r'\\id TEST\\mt \\zwhoops'],
... error_level=sfm.ErrorLevel.Note))
Traceback (most recent call last):
...
SyntaxWarning: <string>: line 1,14: unknown private marker \zwhoops: not it stylesheet using default marker definition
>>> with warnings.catch_warnings():
... warnings.simplefilter("error", SyntaxWarning)
... list(parser([r'\\id TEST\\c 1\\p a \\png b \\+w c \\+nd d \\png e \\png*']))
... # doctest: +NORMALIZE_WHITESPACE
[Element('id',
content=[Text('TEST'),
Element('c', args=['1'],
content=[Element('p',
content=[Text('a '),
Element('png',
content=[Text('b '),
Element('w',
content=[Text('c '),
Element('nd',
content=[Text('d ')])])]),
Element('png',
content=[Text('e ')])])])])]
>>> with warnings.catch_warnings():
... warnings.simplefilter("error", SyntaxWarning)
... list(parser([r'\\id TEST\\c 1\\p a \\f + \\fr 1:1 \\ft a \\png b\\png*']))
Traceback (most recent call last):
...
SyntaxError: <string>: line 1,1: invalid end marker end-of-file: \\f (line 1,18) can only be closed with \\f*
''' # noqa: E501
default_meta = _default_meta
numeric_re = re.compile(r'\s*(\d+(:?[-\u2010\2011]\d+)?)', re.UNICODE)
verse_re = re.compile(r'\s*(\d+\w?(:?[-,\u200B-\u2011]+\d+\w?)?)',
re.UNICODE)
caller_re = re.compile(r'\s*([^\s\\])', re.UNICODE)
sep_re = re.compile(r'\s|$', re.UNICODE)
__unspecified_metas = {
('Section', True): 's',
('Title', True): 't',
('VerseText', True): 'p',
('VerseText', False): 'nd',
('Other', True): 'p',
('Other', False): 'nd',
('Unspecified', True): 'p',
('Unspecified', False): 'nd'
}
@classmethod
def extend_stylesheet(cls, *names, **kwds):
return super().extend_stylesheet(
kwds.get('stylesheet', default_stylesheet), *names)
def __init__(self, source,
stylesheet=default_stylesheet,
default_meta=_default_meta,
canonicalise_footnotes=True,
*args, **kwds):
if not canonicalise_footnotes:
self._canonicalise_footnote = lambda x: x
stylesheet = self.__synthesise_private_meta(stylesheet, default_meta)
for m in stylesheet.values():
if m['StyleType'] == 'Milestone':
m.update(Endmarker='*')
super().__init__(source,
stylesheet,
default_meta,
private_prefix='z',
*args, **kwds)
@classmethod
def __synthesise_private_meta(cls, sty, default_meta):
private_metas = dict(r for r in sty.items() if r[0].startswith('z'))
metas = {
n: sty.get(
cls.__unspecified_metas.get(
(m['TextType'], m['Endmarker'] is None
and m.get('StyleType', None) == 'Paragraph'),
None),
default_meta).copy()
for n, m in private_metas.items()
}
return style.update_sheet(sty,
style.update_sheet(metas, private_metas))
def _force_close(self, parent, tok):
if tok is not sfm.parser._eos \
and ('NoteText' in parent.meta.get('TextType', [])
or parent.meta.get('StyleType', None) == 'Character'):
self._error(ErrorLevel.Note,
'implicit end marker before {token}: \\{0.name} '
'(line {0.pos.line},{0.pos.col}) '
'should be closed with \\{1}', tok, parent,
parent.meta['Endmarker'])
else:
super()._force_close(parent, tok)
def _ChapterNumber_(self, chapter_marker):
tok = next(self._tokens)
chapter = self.numeric_re.match(tok)
if not chapter:
self._error(ErrorLevel.Content,
'missing chapter number after \\c',
chapter_marker)
chapter_marker.args = ['\uFFFD']
else:
chapter_marker.args = [str(tok[chapter.start(1):chapter.end(1)])]
tok = tok[chapter.end():]
if tok and not self.sep_re.match(tok):
self._error(ErrorLevel.Content,
'missing | |
<reponame>udif/oscope-scpi<filename>oscope_scpi/oscilloscope.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018,2019,2020,2021, <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#-------------------------------------------------------------------------------
# Control of Oscilloscopes with PyVISA and SCPI command set. This started as
# specific code for the HP/Agilent/Keysight MSO-X/DSO-X 3000A Oscilloscope and
# has been made more generic to be used with Agilent UXR and MXR Oscilloscopes.
# The hope is that these commands in this package are generic enough to be
# used with other brands but may need to make this an Agilent specific
# package in the future if find that not to be true.
#-------------------------------------------------------------------------------
# For future Python3 compatibility:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
try:
from .scpi import SCPI
except Exception:
sys.path.append(os.getcwd())
from scpi import SCPI
from quantiphy import Quantity
import numpy as np
import csv
class Oscilloscope(SCPI):
"""Base class for controlling and accessing an Oscilloscope with PyVISA and SCPI commands"""
def __init__(self, resource, maxChannel=1, wait=0,
cmd_prefix = ':',
read_strip = '\n',
read_termination = '',
write_termination = '\n'):
"""Init the class with the instruments resource string
resource - resource string or VISA descriptor, like TCPIP0::172.16.2.13::INSTR
maxChannel - number of channels
wait - float that gives the default number of seconds to wait after sending each command
cmd_prefix - optional command prefix (ie. some instruments require a ':' prefix)
read_strip - optional read_strip parameter used to strip any returned termination characters
read_termination - optional read_termination parameter to pass to open_resource()
write_termination - optional write_termination parameter to pass to open_resource()
"""
# NOTE: maxChannel is accessible in this package via parent as: self._max_chan
super(Oscilloscope, self).__init__(resource, max_chan=maxChannel, wait=wait,
cmd_prefix=cmd_prefix,
read_strip=read_strip,
read_termination=read_termination,
write_termination=write_termination
)
# Return list of valid analog channel strings.
self._chanAnaValidList = [str(x) for x in range(1,self._max_chan+1)]
# list of ALL valid channel strings.
#
# NOTE: Currently, only valid values are a numerical string for
# the analog channels, POD1 for digital channels 0-7 or POD2 for
# digital channels 8-15
self._chanAllValidList = self._chanAnaValidList + [str(x) for x in ['POD1','POD2']]
# Give the Series a name
self._series = 'GENERIC'
@property
def chanAnaValidList(self):
return self._chanAnaValidList
@property
def chanAllValidList(self):
return self._chanAllValidList
@property
def series(self):
# Use this so can branch activities based on oscilloscope series name
return self._series
def getBestClass(self):
"""Open the connection and based on ID strings, create an object that
is the most appropriate child class for this
oscilloscope. Returns the new object.
"""
## Make sure calling SCPI open which gets the ID String and parses it and then close
superduper = super()
superduper.open()
superduper.close()
# Default is to return myself as no child class that fits better than this
newobj = self
if (self._IDNmanu.upper().startswith('KEYSIGHT') or
self._IDNmanu.upper().startswith('AGILENT')):
# An Agilent/Keysight scope so check model
if (self._IDNmodel.upper().startswith('MXR')):
try:
from .mxr import MXR, MXRxx8A, MXRxx4A
except Exception:
sys.path.append(os.getcwd())
from mxr import MXR, MXRxx8A, MXRxx4A
# One of the MXR Oscilloscopes
if (self._IDNmodel.upper().endswith('8A')):
# 8 channel MXR
newobj = MXRxx8A(self._resource, wait=self._wait)
elif (self._IDNmodel.upper().endswith('4A')):
# 4 channel MXR
newobj = MXRxx4A(self._resource, wait=self._wait)
else:
# Generic MXR
newobj = MXR(self._resource, wait=self._wait)
elif (self._IDNmodel.upper().startswith('EXR')):
try:
from .exr import EXR, EXRxx8A, EXRxx4A
except Exception:
sys.path.append(os.getcwd())
from exr import EXR, EXRxx8A, EXRxx4A
# One of the EXR Oscilloscopes
if (self._IDNmodel.upper().endswith('8A')):
# 8 channel EXR
newobj = EXRxx8A(self._resource, wait=self._wait)
elif (self._IDNmodel.upper().endswith('4A')):
# 4 channel EXR
newobj = EXRxx4A(self._resource, wait=self._wait)
else:
# Generic EXR
newobj = EXR(self._resource, wait=self._wait)
elif (self._IDNmodel.upper().startswith('UXR')):
try:
from .uxr import UXR, UXRxxx4A, UXRxxx2A
except Exception:
sys.path.append(os.getcwd())
from uxr import UXR, UXRxxx4A, UXRxxx2A
# One of the UXR Oscilloscopes
if (self._IDNmodel.upper().endswith('4A') or
self._IDNmodel.upper().endswith('4AP')):
# 4 channel UXR
newobj = UXRxxx4A(self._resource, wait=self._wait)
elif (self._IDNmodel.upper().endswith('2A') or
self._IDNmodel.upper().endswith('2AP')):
# 2 channel UXR
newobj = UXRxxx2A(self._resource, wait=self._wait)
else:
# Generic UXR
newobj = UXR(self._resource, wait=self._wait)
elif (self._IDNmodel.upper().startswith('DSO-X')):
try:
from .dso import DSOX, DSOX3xx4A, DSOX3xx2A, DSOX3xx4T, DSOX3xx2T
except Exception:
sys.path.append(os.getcwd())
from dso import DSOX, DSOX3xx4A, DSOX3xx2A, DSOX3xx4T, DSOX3xx2T
# One of the DSOX Oscilloscopes
if (self._IDNmodel.upper().startswith('DSO-X 3') and
self._IDNmodel.upper().endswith('4A')):
# 4 channel DSOX3xxx model
newobj = DSOX3xx4A(self._resource, wait=self._wait)
elif (self._IDNmodel.upper().startswith('DSO-X 3') and
self._IDNmodel.upper().endswith('2A')):
# 2 channel DSOX3xxx model
newobj = DSOX3xx2A(self._resource, wait=self._wait)
elif (self._IDNmodel.upper().startswith('DSO-X 3') and
self._IDNmodel.upper().endswith('4T')):
# 4 channel DSOX3xxx model but newer T suffix
newobj = DSOX3xx4T(self._resource, wait=self._wait)
elif (self._IDNmodel.upper().startswith('DSO-X 3') and
self._IDNmodel.upper().endswith('2T')):
# 2 channel DSOX3xxx model but newer T suffix
newobj = DSOX3xx2T(self._resource, wait=self._wait)
else:
# Generic DSOX
newobj = DSOX(self._resource, wait=self._wait)
elif (self._IDNmodel.upper().startswith('MSO-X')):
try:
from .dso import MSOX, MSOX3xx4A, MSOX3xx2A, MSOX3xx4T, MSOX3xx2T
except Exception:
sys.path.append(os.getcwd())
from dso import MSOX, MSOX3xx4A, MSOX3xx2A, MSOX3xx4T, MSOX3xx2T
# One of the MSOX Oscilloscopes
if (self._IDNmodel.upper().startswith('MSO-X 3') and
self._IDNmodel.upper().endswith('4A')):
# 4 channel MSOX3xxx model
newobj = MSOX3xx4A(self._resource, wait=self._wait)
elif (self._IDNmodel.upper().startswith('MSO-X 3') and
self._IDNmodel.upper().endswith('2A')):
# 2 channel MSOX3xxx model
newobj = MSOX3xx2A(self._resource, wait=self._wait)
elif (self._IDNmodel.upper().startswith('MSO-X 3') and
self._IDNmodel.upper().endswith('4T')):
# 4 channel MSOX3xxx model but newer T suffix
newobj = MSOX3xx4T(self._resource, wait=self._wait)
elif (self._IDNmodel.upper().startswith('MSO-X 3') and
self._IDNmodel.upper().endswith('2T')):
# 2 channel MSOX3xxx model but newer T suffix
newobj = MSOX3xx2T(self._resource, wait=self._wait)
else:
# Generic MSOX
newobj = MSOX(self._resource, wait=self._wait)
else:
try:
from .keysight import Keysight
except Exception:
sys.path.append(os.getcwd())
from keysight import Keysight
# Generic Keysight Oscilloscope
newobj = Keysight(self._resource, wait=self._wait)
return newobj
# =========================================================
# Based on the save oscilloscope setup example from the MSO-X 3000 Programming
# Guide and modified to work within this class ...
# =========================================================
def setupSave(self, filename):
""" Fetch the oscilloscope setup and save to a file with given filename. """
oscopeSetup = self._instQueryIEEEBlock("SYSTem:SETup?")
# Save setup to file.
f = open(filename, "wb")
f.write(oscopeSetup)
f.close()
#print('Oscilloscope Setup bytes saved: {} to "{}"'.format(len(oscopeSetup),filename))
# Return number of bytes saved to file
return len(oscopeSetup)
# =========================================================
# Based on the loading a previous setup example from the MSO-X 3000 Programming
# Guide and modified to work within this class ...
# =========================================================
def setupLoad(self, filename):
""" Restore the oscilloscope setup from file with given filename. """
# Load setup from file.
f = open(filename, "rb")
oscopeSetup = f.read()
f.close()
#print('Oscilloscope Setup bytes loaded: {} from "{}"'.format(len(oscopeSetup),filename))
self._instWriteIEEEBlock("SYSTem:SETup ", oscopeSetup)
# Return number of bytes saved to file
return len(oscopeSetup)
def autoscale(self):
""" Autoscale Oscilloscope"""
self._instWrite("AUToscale")
def waveform(self, filename, channel=None, points=None):
"""Download waveform data of a selected channel into a csv file.
NOTE: This is a LEGACY function to prevent breaking API but it
is deprecated so use above waveform functions instead.
NOTE: Now that newer oscilloscopes have very large data
downloads, csv file format is not a good format for storing
because the files are so large that the convenience of csv
files has diminishing returns. They are too large for Excel to
load and are only useful from a scripting system like Python
or MATLAB or Root. See waveformSaveNPZ() for a better option.
filename - base filename to store the data
channel - channel, as string, to be measured - set to None to use the default channel
points - number of points to capture - if None, captures all available points
for newer devices, the | |
<gh_stars>100-1000
"""
module for accessing a USB HID YubiKey
"""
# Copyright (c) 2010, 2011, 2012 Yubico AB
# See the file COPYING for licence statement.
__all__ = [
# constants
# functions
# classes
'YubiKeyUSBHID',
'YubiKeyUSBHIDError',
'YubiKeyUSBHIDStatus',
]
from .yubico_version import __version__
from . import yubico_util
from . import yubico_exception
from . import yubikey_frame
from . import yubikey_config
from . import yubikey_defs
from . import yubikey_base
from .yubikey_defs import SLOT, YUBICO_VID, PID
from .yubikey_base import YubiKey
import struct
import time
import sys
import usb
# Various USB/HID parameters
_USB_TYPE_CLASS = (0x01 << 5)
_USB_RECIP_INTERFACE = 0x01
_USB_ENDPOINT_IN = 0x80
_USB_ENDPOINT_OUT = 0x00
_HID_GET_REPORT = 0x01
_HID_SET_REPORT = 0x09
_USB_TIMEOUT_MS = 2000
# from ykcore_backend.h
_FEATURE_RPT_SIZE = 8
_REPORT_TYPE_FEATURE = 0x03
# dict used to select command for mode+slot in _challenge_response
_CMD_CHALLENGE = {'HMAC': {1: SLOT.CHAL_HMAC1, 2: SLOT.CHAL_HMAC2},
'OTP': {1: SLOT.CHAL_OTP1, 2: SLOT.CHAL_OTP2},
}
class YubiKeyUSBHIDError(yubico_exception.YubicoError):
""" Exception raised for errors with the USB HID communication. """
class YubiKeyUSBHIDCapabilities(yubikey_base.YubiKeyCapabilities):
"""
Capture the capabilities of the various versions of YubiKeys.
Overrides just the functions from YubiKeyCapabilities() that are available
in one or more versions, leaving the other ones at False through default_answer.
"""
def __init__(self, model, version, default_answer):
super(YubiKeyUSBHIDCapabilities, self).__init__(
model=model,
version=version,
default_answer=default_answer)
def have_yubico_OTP(self):
""" Yubico OTP support has always been available in the standard YubiKey. """
return True
def have_OATH(self, mode):
""" OATH HOTP was introduced in YubiKey 2.2. """
if mode not in ['HOTP']:
return False
return (self.version >= (2, 1, 0,))
def have_challenge_response(self, mode):
""" Challenge-response was introduced in YubiKey 2.2. """
if mode not in ['HMAC', 'OTP']:
return False
return (self.version >= (2, 2, 0,))
def have_serial_number(self):
""" Reading serial number was introduced in YubiKey 2.2, but depends on extflags set too. """
return (self.version >= (2, 2, 0,))
def have_ticket_flag(self, flag):
return flag.is_compatible(model = self.model, version = self.version)
def have_config_flag(self, flag):
return flag.is_compatible(model = self.model, version = self.version)
def have_extended_flag(self, flag):
return flag.is_compatible(model = self.model, version = self.version)
def have_extended_scan_code_mode(self):
return (self.version >= (2, 0, 0,))
def have_shifted_1_mode(self):
return (self.version >= (2, 0, 0,))
def have_configuration_slot(self, slot):
return (slot in [1, 2])
class YubiKeyHIDDevice(object):
"""
High-level wrapper for low-level HID commands for a HID based YubiKey.
"""
def __init__(self, debug=False, skip=0):
"""
Find and connect to a YubiKey (USB HID).
Attributes :
skip -- number of YubiKeys to skip
debug -- True or False
"""
self.debug = debug
self._usb_handle = None
if not self._open(skip):
raise YubiKeyUSBHIDError('YubiKey USB HID initialization failed')
self.status()
def status(self):
"""
Poll YubiKey for status.
"""
data = self._read()
self._status = YubiKeyUSBHIDStatus(data)
return self._status
def __del__(self):
try:
if self._usb_handle:
self._close()
except (IOError, AttributeError):
pass
def _write_config(self, cfg, slot):
""" Write configuration to YubiKey. """
old_pgm_seq = self._status.pgm_seq
frame = cfg.to_frame(slot=slot)
self._debug("Writing %s frame :\n%s\n" % \
(yubikey_config.command2str(frame.command), cfg))
self._write(frame)
self._waitfor_clear(yubikey_defs.SLOT_WRITE_FLAG)
# make sure we have a fresh pgm_seq value
self.status()
self._debug("Programmed slot %i, sequence %i -> %i\n" % (slot, old_pgm_seq, self._status.pgm_seq))
cfgs = self._status.valid_configs()
if not cfgs and self._status.pgm_seq == 0:
return
if self._status.pgm_seq == old_pgm_seq + 1:
return
raise YubiKeyUSBHIDError('YubiKey programming failed (seq %i not increased (%i))' % \
(old_pgm_seq, self._status.pgm_seq))
def _read_response(self, may_block=False):
""" Wait for a response to become available, and read it. """
# wait for response to become available
res = self._waitfor_set(yubikey_defs.RESP_PENDING_FLAG, may_block)[:7]
# continue reading while response pending is set
while True:
this = self._read()
flags = yubico_util.ord_byte(this[7])
if flags & yubikey_defs.RESP_PENDING_FLAG:
seq = flags & 0b00011111
if res and (seq == 0):
break
res += this[:7]
else:
break
self._write_reset()
return res
def _read(self):
""" Read a USB HID feature report from the YubiKey. """
request_type = _USB_TYPE_CLASS | _USB_RECIP_INTERFACE | _USB_ENDPOINT_IN
value = _REPORT_TYPE_FEATURE << 8 # apparently required for YubiKey 1.3.2, but not 2.2.x
recv = self._usb_handle.controlMsg(request_type,
_HID_GET_REPORT,
_FEATURE_RPT_SIZE,
value = value,
timeout = _USB_TIMEOUT_MS)
if len(recv) != _FEATURE_RPT_SIZE:
self._debug("Failed reading %i bytes (got %i) from USB HID YubiKey.\n"
% (_FEATURE_RPT_SIZE, recv))
raise YubiKeyUSBHIDError('Failed reading from USB HID YubiKey')
data = b''.join(yubico_util.chr_byte(c) for c in recv)
self._debug("READ : %s" % (yubico_util.hexdump(data, colorize=True)))
return data
def _write(self, frame):
"""
Write a YubiKeyFrame to the USB HID.
Includes polling for YubiKey readiness before each write.
"""
for data in frame.to_feature_reports(debug=self.debug):
debug_str = None
if self.debug:
(data, debug_str) = data
# first, we ensure the YubiKey will accept a write
self._waitfor_clear(yubikey_defs.SLOT_WRITE_FLAG)
self._raw_write(data, debug_str)
return True
def _write_reset(self):
"""
Reset read mode by issuing a dummy write.
"""
data = b'\x00\x00\x00\x00\x00\x00\x00\x8f'
self._raw_write(data)
self._waitfor_clear(yubikey_defs.SLOT_WRITE_FLAG)
return True
def _raw_write(self, data, debug_str = None):
"""
Write data to YubiKey.
"""
if self.debug:
if not debug_str:
debug_str = ''
hexdump = yubico_util.hexdump(data, colorize=True)[:-1] # strip LF
self._debug("WRITE : %s %s\n" % (hexdump, debug_str))
request_type = _USB_TYPE_CLASS | _USB_RECIP_INTERFACE | _USB_ENDPOINT_OUT
value = _REPORT_TYPE_FEATURE << 8 # apparently required for YubiKey 1.3.2, but not 2.2.x
sent = self._usb_handle.controlMsg(request_type,
_HID_SET_REPORT,
data,
value = value,
timeout = _USB_TIMEOUT_MS)
if sent != _FEATURE_RPT_SIZE:
self.debug("Failed writing %i bytes (wrote %i) to USB HID YubiKey.\n"
% (_FEATURE_RPT_SIZE, sent))
raise YubiKeyUSBHIDError('Failed talking to USB HID YubiKey')
return sent
def _waitfor_clear(self, mask, may_block=False):
"""
Wait for the YubiKey to turn OFF the bits in 'mask' in status responses.
Returns the 8 bytes last read.
"""
return self._waitfor('nand', mask, may_block)
def _waitfor_set(self, mask, may_block=False):
"""
Wait for the YubiKey to turn ON the bits in 'mask' in status responses.
Returns the 8 bytes last read.
"""
return self._waitfor('and', mask, may_block)
def _waitfor(self, mode, mask, may_block, timeout=2):
"""
Wait for the YubiKey to either turn ON or OFF certain bits in the status byte.
mode is either 'and' or 'nand'
timeout is a number of seconds (precision about ~0.5 seconds)
"""
finished = False
sleep = 0.01
# After six sleeps, we've slept 0.64 seconds.
wait_num = (timeout * 2) - 1 + 6
resp_timeout = False # YubiKey hasn't indicated RESP_TIMEOUT (yet)
while not finished:
time.sleep(sleep)
this = self._read()
flags = yubico_util.ord_byte(this[7])
if flags & yubikey_defs.RESP_TIMEOUT_WAIT_FLAG:
if not resp_timeout:
resp_timeout = True
seconds_left = flags & yubikey_defs.RESP_TIMEOUT_WAIT_MASK
self._debug("Device indicates RESP_TIMEOUT (%i seconds left)\n" \
% (seconds_left))
if may_block:
# calculate new wait_num - never more than 20 seconds
seconds_left = min(20, seconds_left)
wait_num = (seconds_left * 2) - 1 + 6
if mode == 'nand':
if not flags & mask == mask:
finished = True
else:
self._debug("Status %s (0x%x) has not cleared bits %s (0x%x)\n"
% (bin(flags), flags, bin(mask), mask))
elif mode == 'and':
if flags & mask == mask:
finished = True
else:
self._debug("Status %s (0x%x) has not set bits %s (0x%x)\n"
% (bin(flags), flags, bin(mask), mask))
else:
assert()
if not finished:
wait_num -= 1
if wait_num == 0:
if mode == 'nand':
reason = 'Timed out waiting for YubiKey to clear status 0x%x' % mask
else:
reason = 'Timed out waiting for YubiKey to set status 0x%x' % mask
raise yubikey_base.YubiKeyTimeout(reason)
sleep = min(sleep + sleep, 0.5)
else:
return this
def _open(self, skip=0):
""" Perform HID initialization """
usb_device = self._get_usb_device(skip)
if usb_device:
usb_conf = usb_device.configurations[0]
self._usb_int = usb_conf.interfaces[0][0]
else:
raise YubiKeyUSBHIDError('No USB YubiKey found')
try:
self._usb_handle = usb_device.open()
self._usb_handle.detachKernelDriver(0)
except Exception as error:
if 'could not detach kernel driver from interface' in str(error):
self._debug('The in-kernel-HID driver has already been detached\n')
else:
self._debug("detachKernelDriver not supported!\n")
try:
self._usb_handle.setConfiguration(1)
except usb.USBError:
self._debug("Unable to set configuration, ignoring...\n")
self._usb_handle.claimInterface(self._usb_int)
return True
def _close(self):
"""
Release the USB interface again.
"""
self._usb_handle.releaseInterface()
try:
# If we're using PyUSB >= 1.0 we can re-attach the kernel driver here.
self._usb_handle.dev.attach_kernel_driver(0)
except:
pass
self._usb_int = None
self._usb_handle = None
return True
def _get_usb_device(self, skip=0):
"""
Get YubiKey USB device.
Optionally allows you to skip n devices, to support multiple attached YubiKeys.
"""
try:
# PyUSB >= 1.0, this is a workaround for a problem with libusbx
# on Windows.
import usb.core
import usb.legacy
devices = [usb.legacy.Device(d) for d in usb.core.find(
find_all=True, idVendor=YUBICO_VID)]
except ImportError:
# Using PyUsb < 1.0.
import usb
devices = [d for bus in usb.busses() for d in bus.devices]
for device in devices:
if device.idVendor == YUBICO_VID:
if device.idProduct in PID.all(otp=True):
if skip == 0:
return device
skip -= 1
| |
from .._compat import basestring
from ..adapters.mssql import (
MSSQL,
MSSQLN,
MSSQL3,
MSSQL4,
MSSQL3N,
MSSQL4N,
Vertica,
Sybase,
)
from ..helpers.methods import varquote_aux
from ..objects import Expression
from .base import SQLDialect
from . import dialects, sqltype_for
@dialects.register_for(MSSQL)
class MSSQLDialect(SQLDialect):
true = 1
false = 0
true_exp = "1=1"
false_exp = "1=0"
dt_sep = "T"
@sqltype_for("boolean")
def type_boolean(self):
return "BIT"
@sqltype_for("blob")
def type_blob(self):
return "IMAGE"
@sqltype_for("integer")
def type_integer(self):
return "INT"
@sqltype_for("bigint")
def type_bigint(self):
return "BIGINT"
@sqltype_for("double")
def type_double(self):
return "FLOAT"
@sqltype_for("date")
def type_date(self):
return "DATE"
@sqltype_for("time")
def type_time(self):
return "CHAR(8)"
@sqltype_for("datetime")
def type_datetime(self):
return "DATETIME"
@sqltype_for("id")
def type_id(self):
return "INT IDENTITY PRIMARY KEY"
@sqltype_for("reference")
def type_reference(self):
return (
"INT%(null)s%(unique)s, CONSTRAINT %(constraint_name)s "
+ "FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON "
+ "DELETE %(on_delete_action)s"
)
@sqltype_for("big-id")
def type_big_id(self):
return "BIGINT IDENTITY PRIMARY KEY"
@sqltype_for("big-reference")
def type_big_reference(self):
return (
"BIGINT%(null)s%(unique)s, CONSTRAINT %(constraint_name)s"
+ " FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s "
+ "ON DELETE %(on_delete_action)s"
)
@sqltype_for("reference FK")
def type_reference_fk(self):
return (
", CONSTRAINT FK_%(constraint_name)s FOREIGN KEY "
+ "(%(field_name)s) REFERENCES %(foreign_key)s ON DELETE "
+ "%(on_delete_action)s"
)
@sqltype_for("reference TFK")
def type_reference_tfk(self):
return (
" CONSTRAINT FK_%(constraint_name)s_PK FOREIGN KEY "
+ "(%(field_name)s) REFERENCES %(foreign_table)s "
+ "(%(foreign_key)s) ON DELETE %(on_delete_action)s"
)
@sqltype_for("geometry")
def type_geometry(self):
return "geometry"
@sqltype_for("geography")
def type_geography(self):
return "geography"
def varquote(self, val):
return varquote_aux(val, "[%s]")
def update(self, table, values, where=None):
tablename = self.writing_alias(table)
whr = ""
if where:
whr = " %s" % self.where(where)
return "UPDATE %s SET %s FROM %s%s;" % (
table.sql_shortref,
values,
tablename,
whr,
)
def delete(self, table, where=None):
tablename = self.writing_alias(table)
whr = ""
if where:
whr = " %s" % self.where(where)
return "DELETE %s FROM %s%s;" % (table.sql_shortref, tablename, whr)
def select(
self,
fields,
tables,
where=None,
groupby=None,
having=None,
orderby=None,
limitby=None,
distinct=False,
for_update=False,
):
dst, whr, grp, order, limit, upd = "", "", "", "", "", ""
if distinct is True:
dst = " DISTINCT"
elif distinct:
dst = " DISTINCT ON (%s)" % distinct
if where:
whr = " %s" % self.where(where)
if groupby:
grp = " GROUP BY %s" % groupby
if having:
grp += " HAVING %s" % having
if orderby:
order = " ORDER BY %s" % orderby
if limitby:
(lmin, lmax) = limitby
limit = " TOP %i" % lmax
if for_update:
upd = " FOR UPDATE"
return "SELECT%s%s %s FROM %s%s%s%s%s;" % (
dst,
limit,
fields,
tables,
whr,
grp,
order,
upd,
)
def left_join(self, val, query_env={}):
# Left join must always have an ON clause
if not isinstance(val, basestring):
val = self.expand(val, query_env=query_env)
return "LEFT OUTER JOIN %s" % val
def random(self):
return "NEWID()"
def cast(self, first, second, query_env={}):
# apparently no cast necessary in MSSQL
return first
def _mssql_like_normalizer(self, term):
term = term.replace("[", "[[]")
return term
def _like_escaper_default(self, term):
if isinstance(term, Expression):
return term
return self._mssql_like_normalizer(
super(MSSQLDialect, self)._like_escaper_default(term)
)
def concat(self, *items, **kwargs):
query_env = kwargs.get("query_env", {})
tmp = (self.expand(x, "string", query_env=query_env) for x in items)
return "(%s)" % " + ".join(tmp)
def regexp(self, first, second, query_env={}):
second = self.expand(second, "string", query_env=query_env)
second = second.replace("\\", "\\\\")
second = second.replace(r"%", r"\%").replace("*", "%").replace(".", "_")
return "(%s LIKE %s ESCAPE '\\')" % (
self.expand(first, query_env=query_env),
second,
)
def extract(self, first, what, query_env={}):
return "DATEPART(%s,%s)" % (what, self.expand(first, query_env=query_env))
def epoch(self, val, query_env={}):
return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(
val, query_env=query_env
)
def length(self, val, query_env={}):
return "LEN(%s)" % self.expand(val, query_env=query_env)
def aggregate(self, first, what, query_env={}):
if what == "LENGTH":
what = "LEN"
return super(MSSQLDialect, self).aggregate(first, what, query_env)
@property
def allow_null(self):
return " NULL"
def substring(self, field, parameters, query_env={}):
return "SUBSTRING(%s,%s,%s)" % (
self.expand(field, query_env=query_env),
parameters[0],
parameters[1],
)
def primary_key(self, key):
return "PRIMARY KEY CLUSTERED (%s)" % key
def concat_add(self, tablename):
return "; ALTER TABLE %s ADD " % tablename
def drop_index(self, name, table):
return "DROP INDEX %s ON %s;" % (self.quote(name), table._rname)
def st_astext(self, first, query_env={}):
return "%s.STAsText()" % self.expand(first, query_env=query_env)
def st_contains(self, first, second, query_env={}):
return "%s.STContains(%s)=1" % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env),
)
def st_distance(self, first, second, query_env={}):
return "%s.STDistance(%s)" % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env),
)
def st_equals(self, first, second, query_env={}):
return "%s.STEquals(%s)=1" % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env),
)
def st_intersects(self, first, second, query_env={}):
return "%s.STIntersects(%s)=1" % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env),
)
def st_overlaps(self, first, second, query_env={}):
return "%s.STOverlaps(%s)=1" % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env),
)
def st_touches(self, first, second, query_env={}):
return "%s.STTouches(%s)=1" % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env),
)
def st_within(self, first, second, query_env={}):
return "%s.STWithin(%s)=1" % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env),
)
@dialects.register_for(MSSQLN)
class MSSQLNDialect(MSSQLDialect):
@sqltype_for("string")
def type_string(self):
return "NVARCHAR(%(length)s)"
@sqltype_for("text")
def type_text(self):
return "NTEXT"
def ilike(self, first, second, escape=None, query_env={}):
if isinstance(second, Expression):
second = self.expand(second, "string", query_env=query_env)
else:
second = self.expand(second, "string", query_env=query_env).lower()
if escape is None:
escape = "\\"
second = second.replace(escape, escape * 2)
if second.startswith("n'"):
second = "N'" + second[2:]
return "(%s LIKE %s ESCAPE '%s')" % (
self.lower(first, query_env),
second,
escape,
)
@dialects.register_for(MSSQL3)
class MSSQL3Dialect(MSSQLDialect):
@sqltype_for("text")
def type_text(self):
return "VARCHAR(MAX)"
@sqltype_for("time")
def type_time(self):
return "TIME(7)"
def _rebuild_select_for_limit(
self, fields, tables, dst, whr, grp, order, lmin, lmax
):
f_outer = ["f_%s" % i for i in range(len(fields.split(",")))]
f_inner = [field for field in fields.split(", ")]
f_iproxy = ", ".join([self._as(o, n) for (o, n) in zip(f_inner, f_outer)])
f_oproxy = ", ".join(f_outer)
interp = (
"SELECT%s %s FROM ("
+ "SELECT%s ROW_NUMBER() OVER (%s) AS w_row, %s FROM %s%s%s)"
+ " TMP WHERE w_row BETWEEN %i and %i;"
)
return interp % (
dst,
f_oproxy,
dst,
order,
f_iproxy,
tables,
whr,
grp,
lmin,
lmax,
)
def select(
self,
fields,
tables,
where=None,
groupby=None,
having=None,
orderby=None,
limitby=None,
distinct=False,
for_update=False,
):
dst, whr, grp, order, limit, offset, upd = "", "", "", "", "", "", ""
if distinct is True:
dst = " DISTINCT"
elif distinct:
dst = " DISTINCT ON (%s)" % distinct
if where:
whr = " %s" % self.where(where)
if groupby:
grp = " GROUP BY %s" % groupby
if having:
grp += " HAVING %s" % having
if orderby:
order = " ORDER BY %s" % orderby
if limitby:
(lmin, lmax) = limitby
if lmin == 0:
dst += " TOP %i" % lmax
else:
return self._rebuild_select_for_limit(
fields, tables, dst, whr, grp, order, lmin, lmax
)
if for_update:
upd = " FOR UPDATE"
return "SELECT%s %s FROM %s%s%s%s%s%s%s;" % (
dst,
fields,
tables,
whr,
grp,
order,
limit,
offset,
upd,
)
@dialects.register_for(MSSQL4)
class MSSQL4Dialect(MSSQL3Dialect):
def select(
self,
fields,
tables,
where=None,
groupby=None,
having=None,
orderby=None,
limitby=None,
distinct=False,
for_update=False,
):
dst, whr, grp, order, limit, offset, upd = "", "", "", "", "", "", ""
if distinct is True:
dst = " DISTINCT"
elif distinct:
dst = " DISTINCT ON (%s)" % distinct
if where:
whr = " %s" % self.where(where)
if groupby:
grp = " GROUP BY %s" % groupby
if having:
grp += " HAVING %s" % having
if orderby:
order = " ORDER BY %s" % orderby
if limitby:
(lmin, lmax) = limitby
if lmin == 0:
dst += " TOP %i" % lmax
else:
if not order:
order = " ORDER BY %s" % self.random
offset = " OFFSET %i ROWS FETCH NEXT %i ROWS ONLY" % (
lmin,
(lmax - lmin),
)
if for_update:
upd = " FOR UPDATE"
return "SELECT%s %s FROM %s%s%s%s%s%s%s;" % (
dst,
fields,
tables,
whr,
grp,
order,
limit,
offset,
upd,
)
@dialects.register_for(MSSQL3N)
class MSSQL3NDialect(MSSQLNDialect, MSSQL3Dialect):
@sqltype_for("text")
def type_text(self):
return "NVARCHAR(MAX)"
@dialects.register_for(MSSQL4N)
class MSSQL4NDialect(MSSQLNDialect, MSSQL4Dialect):
@sqltype_for("text")
def type_text(self):
return "NVARCHAR(MAX)"
@dialects.register_for(Vertica)
class VerticaDialect(MSSQLDialect):
dt_sep = " "
@sqltype_for("boolean")
def type_boolean(self):
return "BOOLEAN"
@sqltype_for("text")
def type_text(self):
return "BYTEA"
@sqltype_for("json")
def type_json(self):
return self.types["string"]
@sqltype_for("blob")
def type_blob(self):
return "BYTEA"
@sqltype_for("double")
def type_double(self):
return "DOUBLE PRECISION"
@sqltype_for("time")
def type_time(self):
return "TIME"
@sqltype_for("id")
def type_id(self):
return "IDENTITY"
@sqltype_for("reference")
def type_reference(self):
return "INT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s"
@sqltype_for("big-reference")
def type_big_reference(self):
return "BIGINT REFERENCES %(foreign_key)s ON DELETE" + " %(on_delete_action)s"
def extract(self, first, what, query_env={}):
return "DATE_PART('%s', TIMESTAMP %s)" % (
what,
self.expand(first, query_env=query_env),
)
def truncate(self, table, mode=""):
if mode:
mode = " %s" % mode
return | |
self._boxes = None
self._masks = None
# Allows the user to pass an Image object as the first parameter and have a perfect copy,
# only overriding additional metdata passed in. If this pattern is compelling, we can generalize.
if isinstance(data_or_path, Image):
self._initialize_from_wbimage(data_or_path)
elif isinstance(data_or_path, six.string_types):
self._initialize_from_path(data_or_path)
else:
self._initialize_from_data(data_or_path, mode)
self._set_initialization_meta(grouping, caption, classes, boxes, masks)
def _set_initialization_meta(
self,
grouping = None,
caption = None,
classes = None,
boxes = None,
masks = None,
):
if grouping is not None:
self._grouping = grouping
if caption is not None:
self._caption = caption
if classes is not None:
if not isinstance(classes, Classes):
self._classes = Classes(classes)
else:
self._classes = classes
if boxes:
if not isinstance(boxes, dict):
raise ValueError('Images "boxes" argument must be a dictionary')
boxes_final = {}
for key in boxes:
box_item = boxes[key]
if isinstance(box_item, BoundingBoxes2D):
boxes_final[key] = box_item
elif isinstance(box_item, dict):
boxes_final[key] = BoundingBoxes2D(box_item, key)
self._boxes = boxes_final
if masks:
if not isinstance(masks, dict):
raise ValueError('Images "masks" argument must be a dictionary')
masks_final = {}
for key in masks:
mask_item = masks[key]
if isinstance(mask_item, ImageMask):
masks_final[key] = mask_item
elif isinstance(mask_item, dict):
masks_final[key] = ImageMask(mask_item, key)
self._masks = masks_final
self._width, self._height = self._image.size # type: ignore
def _initialize_from_wbimage(self, wbimage):
self._grouping = wbimage._grouping
self._caption = wbimage._caption
self._width = wbimage._width
self._height = wbimage._height
self._image = wbimage._image
self._classes = wbimage._classes
self._path = wbimage._path
self._is_tmp = wbimage._is_tmp
self._extension = wbimage._extension
self._sha256 = wbimage._sha256
self._size = wbimage._size
self.format = wbimage.format
self._artifact_source = wbimage._artifact_source
self._artifact_target = wbimage._artifact_target
# We do not want to implicitly copy boxes or masks, just the image-related data.
# self._boxes = wbimage._boxes
# self._masks = wbimage._masks
def _initialize_from_path(self, path):
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
self._set_file(path, is_tmp=False)
self._image = pil_image.open(path)
self._image.load()
ext = os.path.splitext(path)[1][1:]
self.format = ext
def _initialize_from_data(self, data, mode = None,):
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
if util.is_matplotlib_typename(util.get_full_typename(data)):
buf = six.BytesIO()
util.ensure_matplotlib_figure(data).savefig(buf)
self._image = pil_image.open(buf)
elif isinstance(data, pil_image.Image):
self._image = data
elif util.is_pytorch_tensor_typename(util.get_full_typename(data)):
vis_util = util.get_module(
"torchvision.utils", "torchvision is required to render images"
)
if hasattr(data, "requires_grad") and data.requires_grad:
data = data.detach()
data = vis_util.make_grid(data, normalize=True)
self._image = pil_image.fromarray(
data.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()
)
else:
if hasattr(data, "numpy"): # TF data eager tensors
data = data.numpy()
if data.ndim > 2:
data = data.squeeze() # get rid of trivial dimensions as a convenience
self._image = pil_image.fromarray(
self.to_uint8(data), mode=mode or self.guess_mode(data)
)
tmp_path = os.path.join(_MEDIA_TMP.name, util.generate_id() + ".png")
self.format = "png"
self._image.save(tmp_path, transparency=None)
self._set_file(tmp_path, is_tmp=True)
@classmethod
def from_json(
cls, json_obj, source_artifact
):
classes = None
if json_obj.get("classes") is not None:
classes = source_artifact.get(json_obj["classes"]["path"])
masks = json_obj.get("masks")
_masks = None
if masks:
_masks = {}
for key in masks:
_masks[key] = ImageMask.from_json(masks[key], source_artifact)
_masks[key]._set_artifact_source(source_artifact)
_masks[key]._key = key
boxes = json_obj.get("boxes")
_boxes = None
if boxes:
_boxes = {}
for key in boxes:
_boxes[key] = BoundingBoxes2D.from_json(boxes[key], source_artifact)
_boxes[key]._key = key
return cls(
source_artifact.get_path(json_obj["path"]).download(),
caption=json_obj.get("caption"),
grouping=json_obj.get("grouping"),
classes=classes,
boxes=_boxes,
masks=_masks,
)
@classmethod
def get_media_subdir(cls):
return os.path.join("media", "images")
def bind_to_run(
self,
run,
key,
step,
id_ = None,
):
super(Image, self).bind_to_run(run, key, step, id_)
if self._boxes is not None:
for i, k in enumerate(self._boxes):
id_ = "{}{}".format(id_, i) if id_ is not None else None
self._boxes[k].bind_to_run(run, key, step, id_)
if self._masks is not None:
for i, k in enumerate(self._masks):
id_ = "{}{}".format(id_, i) if id_ is not None else None
self._masks[k].bind_to_run(run, key, step, id_)
def to_json(self, run_or_artifact):
json_dict = super(Image, self).to_json(run_or_artifact)
json_dict["_type"] = Image._log_type
json_dict["format"] = self.format
if self._width is not None:
json_dict["width"] = self._width
if self._height is not None:
json_dict["height"] = self._height
if self._grouping:
json_dict["grouping"] = self._grouping
if self._caption:
json_dict["caption"] = self._caption
run_class, artifact_class = _safe_sdk_import()
if isinstance(run_or_artifact, artifact_class):
artifact = run_or_artifact
if (
self._masks is not None or self._boxes is not None
) and self._classes is None:
raise ValueError(
"classes must be passed to wandb.Image which have masks or bounding boxes when adding to artifacts"
)
if self._classes is not None:
# Here, rather than give each class definition it's own name (and entry), we
# purposely are giving a non-unique class name of /media/cls.classes.json.
# This may create user confusion if if multiple different class definitions
# are expected in a single artifact. However, we want to catch this user pattern
# if it exists and dive deeper. The alternative code is provided below.
#
class_name = os.path.join("media", "cls")
#
# class_name = os.path.join(
# "media", "classes", os.path.basename(self._path) + "_cls"
# )
#
classes_entry = artifact.add(self._classes, class_name)
json_dict["classes"] = {
"type": "classes-file",
"path": classes_entry.path,
"digest": classes_entry.digest,
}
elif not isinstance(run_or_artifact, run_class):
raise ValueError("to_json accepts wandb_run.Run or wandb_artifact.Artifact")
if self._boxes:
json_dict["boxes"] = {
k: box.to_json(run_or_artifact) for (k, box) in self._boxes.items()
}
if self._masks:
json_dict["masks"] = {
k: mask.to_json(run_or_artifact) for (k, mask) in self._masks.items()
}
return json_dict
def guess_mode(self, data):
"""
Guess what type of image the np.array is representing
"""
# TODO: do we want to support dimensions being at the beginning of the array?
if data.ndim == 2:
return "L"
elif data.shape[-1] == 3:
return "RGB"
elif data.shape[-1] == 4:
return "RGBA"
else:
raise ValueError(
"Un-supported shape for image conversion %s" % list(data.shape)
)
@classmethod
def to_uint8(cls, data):
"""
Converts floating point image on the range [0,1] and integer images
on the range [0,255] to uint8, clipping if necessary.
"""
np = util.get_module(
"numpy",
required="wandb.Image requires numpy if not supplying PIL Images: pip install numpy",
)
# I think it's better to check the image range vs the data type, since many
# image libraries will return floats between 0 and 255
# some images have range -1...1 or 0-1
dmin = np.min(data)
if dmin < 0:
data = (data - np.min(data)) / np.ptp(data)
if np.max(data) <= 1.0:
data = (data * 255).astype(np.int32)
# assert issubclass(data.dtype.type, np.integer), 'Illegal image format.'
return data.clip(0, 255).astype(np.uint8)
@classmethod
def seq_to_json(
cls,
seq,
run,
key,
step,
):
"""
Combines a list of images into a meta dictionary object describing the child images.
"""
if wandb.TYPE_CHECKING and TYPE_CHECKING:
seq = cast(Sequence["Image"], seq)
jsons = [obj.to_json(run) for obj in seq]
media_dir = cls.get_media_subdir()
for obj in jsons:
expected = util.to_forward_slash_path(media_dir)
if not obj["path"].startswith(expected):
raise ValueError(
"Files in an array of Image's must be in the {} directory, not {}".format(
cls.get_media_subdir(), obj["path"]
)
)
num_images_to_log = len(seq)
width, height = seq[0]._image.size # type: ignore
format = jsons[0]["format"]
def size_equals_image(image):
img_width, img_height = image._image.size # type: ignore
return img_width == width and img_height == height # type: ignore
sizes_match = all(size_equals_image(img) for img in seq)
if not sizes_match:
logging.warning(
"Images sizes do not match. This will causes images to be display incorrectly in the UI."
)
meta = {
"_type": "images/separated",
"width": width,
"height": height,
"format": format,
"count": num_images_to_log,
}
captions = Image.all_captions(seq)
if captions:
meta["captions"] = captions
all_masks = Image.all_masks(seq, run, key, step)
if all_masks:
meta["all_masks"] = all_masks
all_boxes = Image.all_boxes(seq, run, key, step)
if all_boxes:
meta["all_boxes"] = all_boxes
return meta
@classmethod
def all_masks(
cls,
images,
run,
run_key,
step,
):
all_mask_groups = []
for image in images:
if image._masks:
mask_group = {}
for k in image._masks:
mask = image._masks[k]
mask_group[k] = mask.to_json(run)
all_mask_groups.append(mask_group)
else:
all_mask_groups.append(None)
if all_mask_groups and not all(x is None for x in all_mask_groups):
return all_mask_groups
else:
return False
@classmethod
def all_boxes(
cls,
images,
run,
run_key,
step,
):
all_box_groups = []
for image in images:
if image._boxes:
box_group = {}
for k in image._boxes:
box = image._boxes[k]
box_group[k] = box.to_json(run)
all_box_groups.append(box_group)
else:
all_box_groups.append(None)
if all_box_groups and not all(x is None for x in all_box_groups):
return all_box_groups
else:
return False
@classmethod
def all_captions(
cls, images
):
return cls.captions(images)
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
if not isinstance(other, Image):
return False
else:
return (
self._grouping == other._grouping
and self._caption == other._caption
and self._width == other._width
and self._height == other._height
and self._image == other._image
and self._classes == other._classes
)
def to_data_array(self):
| |
# -*- coding: utf-8 -*-
"""
Mission plan parser for DEFCON28 submissions
"""
import re
import pandas as pd
import datetime as dt
import pytz
def parse_mission_plan(filepath,oppWindow):
"""
Parse text at given filepath
Input Parameters
----------
filepath : str
- Filepath for file to be parsed
oppWindow: str
- Opportunity window allowed to capture target image
- Times must be in UTC in the form '<start>-to-<end>' where start and end are written as yr:mo:ddThh:mm:ssZ
- Ex: oppWindow = '2020-03-19T21:11:15Z-to-2020-03-19T21:12:15Z'
Returns
-------
t_image: time to capture target image
q_cmd: commanded quaternion
oppWindowFlag: flag that checks whether t_image is within the opportunity window
slewCmdFlag: flag that checks is slew to commanded quaternion exists in submission
cam_exp: camera exposure time in micro-seconds
actualCmdOrder: command order in mission plan submission
"""
### Parse through opportunity window
opp = re.split('[-to=T:Z\n]',oppWindow)
# Remove empty cells in list
while ("" in opp):
opp.remove("")
while ('' in opp):
opp.remove('')
# Set epoch (this just serves as a starting reference to check the starting and ending windows) and convert
# start/end time to the same format as the epoch
epoch = dt.datetime(1970,1,1,0,0,0)
startOpp = dt.datetime(int(opp[0]), int(opp[1]), int(opp[2]), int(opp[3]), int(opp[4]), int(opp[5]))
endOpp = dt.datetime(int(opp[6]), int(opp[7]), int(opp[8]), int(opp[9]), int(opp[10]), int(opp[11]))
# Convert opportunity endpoints to seconds
startOpp = (startOpp - epoch).total_seconds()
endOpp = (endOpp - epoch).total_seconds()
### Parse and check mission plan
#Initialize variables required by the evaluation script (i.e. time to capture image, quaternion command and opportunity window flag)
t_image = []
q_cmd = []
cam_exp = []
slewCmdFlag = 0
oppWindowFlag = 0
tBtwnCmds = 0
cmdExecTimeFlag = 0
unknownCmdFlag = 0
# Initialize missing command parameters
actualCmdOrder = ['']*6 # Max 6 commands are expected
cnt = 0;
# Initialize error hints list
errHintsTmp = ['']*40
errHints = ['']*40
hintsCnt = 0
# Remove empty lines/spaces from the original submissions and convert all text to lower case.
# Write results to a new text file that will be parsed below
new_filepath = rewriteMissionPlan(filepath)
# Create error list
errorFile = open("submissionErrors.txt","w")
#Parse through re-written mission plan (content in new file should be identical to initial submission)
with open(new_filepath, encoding="utf-8") as fp:
# Read current line in text file
line = fp.readline()
while line:
### Check if ACS init command has been provided
reg_match = _RegExLib(line)
if reg_match.initACS:
# Store command in actualCmdOrder list. Will be checked against expected command list later
cmdText = "Initialize ACS"
actualCmdOrder[cnt] = cmdText
# Read current line in text file
line = fp.readline()
# Check if command execution time has been provided for init ACS command
reg_match = _RegExLib(line)
if reg_match.cmdExecTime:
[validTime, value] = parseTime(line)
if validTime:
# Initialize timing check between commands
tmpCheck = dt.datetime(int(value[0]), int(value[1]), int(value[2]), int(value[3]), int(value[4]), int(value[5]))
tBtwnCmds = (tmpCheck - epoch).total_seconds()
# Proceed to next line
line = fp.readline()
else:
# Save error hint
hintTxt = 'Errors exist in the command execution time format for the initialize ACS command.'
errHintsTmp[hintsCnt] = hintTxt
hintsCnt += 1
# Write error text to file
errTxt = 'Bad command execution time format for initialize ACS command. \n'
errorFile.writelines(errTxt)
line = fp.readline()
else:
# Save error hint
hintTxt = 'Initialize ACS command arguments are missing.'
errHintsTmp[hintsCnt] = hintTxt
hintsCnt += 1
# Write error text to file
errTxt = 'Command execution time for initialize ACS command was not provided. \n'
errorFile.writelines(errTxt)
### Check if slew to quaternion command has been provided
elif reg_match.slew2CmdQuat:
cmdText = 'Slew to Commanded Quaternion'
actualCmdOrder[cnt] = cmdText
# Read current line in text file
line = fp.readline()
# Check if command execution time has been provided for slew to quaternion command
reg_match = _RegExLib(line)
if reg_match.cmdExecTime:
[validTime, value] = parseTime(line)
if validTime:
tmpCheck = dt.datetime(int(value[0]), int(value[1]), int(value[2]), int(value[3]), int(value[4]), int(value[5]))
tmpCheck = (tmpCheck - epoch).total_seconds()
# If initialize ACS command was provided, check if time between init ACS an slew to quat command is at 35 min (2100 s)
if tBtwnCmds > 1:
if actualCmdOrder[cnt] == 'Initialize ACS':
if ((tmpCheck - tBtwnCmds) < 2100):
# Save error hint
hintTxt = 'Errors exist in the timing between initialize ACS and slew to quaternion command.'
errHintsTmp[hintsCnt] = hintTxt
hintsCnt += 1
# Write error text to file
errTxt = 'Warning: Time between initialize ACS and slew to quaternion command is less than the required 2100 sec. \n'
errorFile.writelines(errTxt)
else:
if ((tmpCheck - tBtwnCmds) < 5):
# Save error hint
hintTxt = 'Errors exist in the timing between the slew to quaternion command and the previous command.'
errHintsTmp[hintsCnt] = hintTxt
hintsCnt += 1
# Write error text to file
errTxt = 'Warning: Time between the slew to quaternion command and the previous command is less than the required 5 sec. \n'
errorFile.writelines(errTxt)
else:
tBtwnCmds = tmpCheck
tBtwnCmds = tmpCheck
cmdExecTimeFlag = 1
line = fp.readline()
else:
# Save error hint
hintTxt = 'Errors exist in the command execution time format for the slew to quaternion command.'
errHintsTmp[hintsCnt] = hintTxt
hintsCnt += 1
# Write error text to file
errTxt = 'Bad command execution time format for slew to quaternion command. \n'
errorFile.writelines(errTxt)
line = fp.readline()
else:
# Save error hint
hintTxt = 'Slew to quaternion command has missing arguments.'
errHintsTmp[hintsCnt] = hintTxt
hintsCnt += 1
# Write error text to file
errTxt = 'Command execution time for slew to quaternion command was not provided. \n'
errorFile.writelines(errTxt)
# Check if ECI to Body quaternion has been provided for slew to quaternion command
argName = re.split("=",line)
dum = re.sub("-"," ", argName[0])
line = dum + argName[1]
reg_match = _RegExLib(line)
if reg_match.eci2BodyQuat:
value = re.sub("[0-9a-z]+\.\s+|[\[\]]|\n","",line)
value = re.split('[a-z]+|.[eci to body quaternion=]|,', value)
# Replace all non-ascii characters with a hyphen.
for ii in range(len(value)):
value[ii] = re.sub(r'[^\x00-\x7F]+','-',value[ii])
while("" in value) :
value.remove("")
while ('' in value):
value.remove('')
try:
q_cmd = [float(value[0]), float(value[1]), float(value[2]), float(value[3])]
line = fp.readline()
slewCmdFlag = 1
except:
slewCmdFlag = 0
hintTxt = 'Error in ECI-to-Body quaternion. \n'
errHintsTmp[hintsCnt] = hintTxt
hintsCnt += 1
# Write error text to file
errTxt = 'Error in ECI-to-Body quaternion. \n'
errorFile.writelines(errTxt)
line = fp.readline()
else:
# Save error hint
hintTxt = 'Slew to quaternion command has missing arguments.'
errHintsTmp[hintsCnt] = hintTxt
hintsCnt += 1
# Write error text to file
errTxt = 'ECI-to-Body quaternion for slew to quaternion command was not provided. \n'
errorFile.writelines(errTxt)
# Check if slew completion time has been provided for slew to quaternion command
reg_match = _RegExLib(line)
if reg_match.slewCompTime:
value = re.sub("[0-9a-z]+\.\s+","",line)
value = re.split('[-slew completion time.=t:z\n]', value)
while("" in value) :
value.remove("")
try :
if cmdExecTimeFlag:
tmpCheck = dt.datetime(int(value[0]), int(value[1]), int(value[2]), int(value[3]), int(value[4]), int(value[5]))
tmpCheck = (tmpCheck - epoch).total_seconds()
if ((tmpCheck - tBtwnCmds) < 180):
# Save error hint
hintTxt = 'Errors exist in the timing between command execution time and slew completion time in the slew to quaternion command.'
errHintsTmp[hintsCnt] = hintTxt
hintsCnt += 1
# Write error text to file
errTxt = 'Warning: Time between command execution time and slew completion time in the slew to quaternion command is less than the required 180 sec. \n'
errorFile.writelines(errTxt)
line = fp.readline()
except:
# Save error hint
hintTxt = 'Errors exist in the slew completion time format for the slew to quaternion command.'
errHintsTmp[hintsCnt] = hintTxt
hintsCnt += 1
# Write error text to file
errTxt = 'Bad slew completion time format for slew to quaternion command. \n'
errorFile.writelines(errTxt)
line = fp.readline()
else:
# Save error hint
hintTxt = 'Slew to quaternion command has missing arguments.'
errHintsTmp[hintsCnt] = hintTxt
hintsCnt += 1
# Write error text to file
errTxt = 'Slew completion | |
<filename>src/htsql/core/adapter.py
#
# Copyright (c) 2006-2013, Prometheus Research, LLC
#
"""
:mod:`htsql.core.adapter`
=========================
This module provides a mechanism for pluggable extensions.
"""
from .util import listof, aresubclasses, toposort
from .context import context
import sys
import types
class Component(object):
"""
A unit of extension in the HTSQL component architecture.
*HTSQL component architecture* allows you to:
- declare *interfaces* that provide various services;
- define *components* implementing the interfaces;
- given an interface and a *dispatch key*, produce a component which
implements the interface for the given key.
Three types of interfaces are supported: *utilities*, *adapters* and
*protocols*; see :class:`Utility`, :class:`Adapter`, :class:`Protocol`
respectively.
"""
# Augment method names with prefix `<name>.` to make the adapter
# name visible in tracebacks.
class __metaclass__(type):
def __new__(mcls, name, bases, content):
# Iterate over all values in the class namespace.
for value in content.values():
# Ignore non-function attributes.
if not isinstance(value, types.FunctionType):
continue
# Update the code name and regenerate the code object.
code = value.func_code
code_name = code.co_name
if '.' in code_name:
continue
code_name = '%s.%s' % (name, code_name)
code = types.CodeType(code.co_argcount, code.co_nlocals,
code.co_stacksize, code.co_flags,
code.co_code, code.co_consts,
code.co_names, code.co_varnames,
code.co_filename, code_name,
code.co_firstlineno, code.co_lnotab,
code.co_freevars, code.co_cellvars)
# Patch the function object.
value.func_code = code
# Create the class.
return type.__new__(mcls, name, bases, content)
def __repr__(cls):
# Overriden to make a printable representation.
return "%s.%s" % (cls.__module__, cls.__name__)
@staticmethod
def __components__():
"""
Produce a list of all components of the active application.
"""
# Get the component registry of the active application.
registry = context.app.component_registry
# A shortcut: return cached components.
if registry.components is not None:
return registry.components
# A list of `Component` subclasses defined in modules exported by addons.
components = [Component]
idx = 0
while idx < len(components):
for subclass in components[idx].__subclasses__():
# Skip realizations.
if issubclass(subclass, Realization):
continue
# Check if the component belongs to the current application.
if subclass.__enabled__():
components.append(subclass)
idx += 1
# Cache and return the components.
registry.components = components
return components
@classmethod
def __implementations__(interface):
"""
Produces a list of all components implementing the interface.
"""
# Get the component registry of the active application.
registry = context.app.component_registry
# A shortcut: return cached implementations.
try:
return registry.implementations[interface]
except KeyError:
pass
# Get all active components.
components = interface.__components__()
# Leave only components implementing the interface.
implementations = [component
for component in components
if component.__implements__(interface)]
# Cache and return the implementations.
registry.implementations[interface] = implementations
return implementations
@classmethod
def __realize__(interface, dispatch_key):
"""
Produces a realization of the interface for the given dispatch key.
"""
# Get the component registry of the active application.
registry = context.app.component_registry
# A shortcut: if the realization for the given interface and the
# dispatch key is already built, return it.
try:
return registry.realizations[interface, dispatch_key]
except KeyError:
pass
# Get the implementations of the interface.
implementations = interface.__implementations__()
# Leave only implementations matching the dispatch key.
implementations = [implementation
for implementation in implementations
if implementation.__matches__(dispatch_key)]
# Note: commented out since we force the interface component
# to match any dispatch keys.
## Check that we have at least one matching implementation.
#if not implementations:
# raise RuntimeError("when realizing interface %s for key %r,"
# " unable to find matching implementations"
# % (interface.__module__, dispatch_key))
# Generate a function:
# order(implementation) -> [dominated implementations].
order_graph = dict((implementation, [])
for implementation in implementations)
for implementation in implementations:
for challenger in implementations:
if implementation is challenger:
continue
if implementation.__dominates__(challenger):
order_graph[implementation].append(challenger)
elif implementation.__follows__(challenger):
order_graph[challenger].append(implementation)
order = (lambda implementation: order_graph[implementation])
# Now we need to order the implementations unambiguously.
try:
implementations = toposort(implementations, order, is_total=True)
except RuntimeError, exc:
# We intercept exceptions to provide a nicer error message.
# `message` is an explanation we discard; `conflict` is a list
# of implementations which either form a domination loop or
# have no ordering relation between them.
message, conflict = exc
interface_name = str(interface)
component_names = ", ".join(str(component)
for component in conflict)
if conflict[0] is conflict[-1]:
problem = "an ordering loop"
else:
problem = "ambiguous ordering"
# Report a problem.
raise RuntimeError("when realizing interface %s for key %r,"
" detected %s in components: %s"
% (interface_name, dispatch_key,
problem, component_names))
# We want the most specific implementations first.
implementations.reverse()
# Force the interface component to the list of implementations.
if interface not in implementations:
implementations.append(interface)
# Generate the name of the realization of the form:
# interface[implementation1,implementation2,...]
module = interface.__module__
name = "%s[%s]" % (interface.__name__,
",".join(str(component)
for component in implementations
if component is not interface))
# Get the list of bases for the realization.
bases = tuple([Realization] + implementations)
# Class attributes for the realization.
attributes = {
'__module__': module,
'__interface__': interface,
'__dispatch_key__': dispatch_key,
}
# Generate the realization.
realization = type(name, bases, attributes)
# Cache and return the realization.
registry.realizations[interface, dispatch_key] = realization
return realization
@classmethod
def __enabled__(component):
"""
Tests if the component is a part of the current application.
"""
registry = context.app.component_registry
return (component.__module__ in registry.modules)
@classmethod
def __implements__(component, interface):
"""
Tests if the component implements the interface.
"""
return issubclass(component, interface)
@classmethod
def __dominates__(component, other):
"""
Tests if the component dominates another component.
"""
# Refine in subclasses.
return issubclass(component, other)
@classmethod
def __follows__(component, other):
"""
Tests if the component is dominated by another component.
"""
# Refine in subclasses.
return False
@classmethod
def __matches__(component, dispatch_key):
"""
Tests if the component matches a dispatch key.
"""
# Override in subclasses.
return False
@classmethod
def __dispatch__(interface, *args, **kwds):
"""
Extracts the dispatch key from the constructor arguments.
"""
# Override in subclasses.
return None
@classmethod
def __prepare__(interface, *args, **kwds):
"""
Instantiates the interface to the given arguments.
"""
# Extract polymorphic parameters.
dispatch_key = interface.__dispatch__(*args, **kwds)
# Realize the interface.
realization = interface.__realize__(dispatch_key)
# Instantiate and return the realization.
return realization(*args, **kwds)
@classmethod
def __invoke__(interface, *args, **kwds):
"""
Realizes and applies the interface to the given arguments.
Use ``__prepare__()()`` instead when traversing a deeply nested tree.
"""
# Extract polymorphic parameters.
dispatch_key = interface.__dispatch__(*args, **kwds)
# Realize the interface.
realization = interface.__realize__(dispatch_key)
# Instantiate and call the realization.
instance = realization(*args, **kwds)
return instance()
def __new__(interface, *args, **kwds):
# Only realizations are permitted to instantiate.
assert False
@classmethod
def __call__(self):
"""
Executes the implementation.
"""
raise NotImplementedError("interface %s is not implemented for: %r"
% (self.__interface__.__name__,
self.__dispatch_key__))
class Realization(Component):
"""
A realization of an interface for some dispatch key.
"""
__interface__ = None
__dispatch_key__ = None
def __new__(cls, *args, **kwds):
# Allow realizations to instantiate.
return object.__new__(cls)
class Utility(Component):
"""
Provides utility interfaces.
An utility is an interface with a single realization.
This is an abstract class; to declare an utility interface, create
a subclass of :class:`Utility`. To add an implementation of the
interface, create a subclass of the interface class.
Class attributes:
`__rank__` (a number)
The relative weight of the component relative to the other
components implementing the same utility.
The following example declared an interface ``SayHello`` and provide
an implementation ``PrintHello`` that prints ``'Hello, World!`` to
the standard output::
class SayHello(Utility):
def __call__(self):
raise NotImplementedError("interface is not implemented")
class PrintHello(SayHello):
def __call__(self):
print "Hello, World!"
hello = SayHello.__invoke__
>>> hello()
Hello, World!
"""
__rank__ = 0.0
@classmethod
def __dominates__(component, other):
if issubclass(component, other):
return True
if component.__rank__ > other.__rank__:
return True
return False
@classmethod
def __matches__(component, dispatch_key):
# For an utility, the dispatch key is always a 0-tuple.
assert dispatch_key == ()
return True
@classmethod
def __dispatch__(interface, *args, **kwds):
# The dispatch key is always a 0-tuple.
return ()
def rank(value):
assert isinstance(value, (int, float))
frame = sys._getframe(1)
frame.f_locals['__rank__'] = value
class Adapter(Component):
"""
Provides adapter interfaces.
An adapter interface provides mechanism for polymorphic dispatch
based on the types of the arguments.
This is an abstract class; to declare an adapter interface, create
a subclass of :class:`Adapter` and indicate the most generic type
signature of the polymorphic arguments using function :func:`adapt`.
To add an implementation of an adapter interface, create a subclass
of the interface class and indicate | |
export_list = self.csv_db.keys()
self.export_sub_set(p_export_file_name, export_list, p_export_delimiter)
pass
# end export
def export_sub_set(
self, p_export_file_name, p_sub_set_list, p_export_delimiter='|'
):
'''Description'''
export_file = open(p_export_file_name, 'w+', encoding='utf-8', errors='ignore')
export_str = p_export_delimiter.join(map(str, self.header)) + '\n'
export_file.write(export_str)
list_len = len(p_sub_set_list)
export_qty = 0
msg = beetools.msg_display(
'Writing subset {} ({})'.format(
os.path.split(p_export_file_name)[1], list_len
),
p_len=self.msg_width,
)
dfx = displayfx.DisplayFx(
self.logger_name,
list_len,
p_msg=msg,
p_verbose=self.silent,
p_bar_len=self.bar_len,
)
# This loop was not tested for a tuple and have to be corrected if found to be not working
if isinstance(self.csv_db, (list, tuple)):
field_pos = self.header.index(self.key1)
for dfx_cntr, sub_set_id in enumerate(p_sub_set_list):
# Traverse the entire list becbeetools.e there might be more that fulfill the criteria
# in a list of tuple scenario.
for csv_db_row in self.csv_db:
if sub_set_id == csv_db_row[field_pos]:
export_str = (
p_export_delimiter.join(map(str, csv_db_row)) + '\n'
)
export_file.write(export_str)
export_qty += 1
dfx.update(dfx_cntr)
elif isinstance(self.csv_db, dict):
for dfx_cntr, sub_set_id in enumerate(p_sub_set_list):
export_tow = []
if sub_set_id in self.csv_db:
for field in self.header:
export_tow.append(self.csv_db[sub_set_id][field])
export_file.write(
p_export_delimiter.join(map(str, export_tow)) + '\n'
)
export_qty += 1
dfx.update(dfx_cntr)
export_file.close()
return export_qty
# end export_sub_set
def read_csv_corr_str_file(self):
'''Parameters'''
if self.csv_corr_str_file_name:
corr_file = open(
self.csv_corr_str_file_name, 'r', encoding='utf-8', errors='ignore'
)
raw_corr_data = corr_file.readlines()
corr_file.close()
for row in raw_corr_data:
self.corr_str_list.append(row[:-1].split('~'))
# end read_csv_corr_str_file
def read_one_key_csv(self):
'''Description'''
def fix_row(p_row):
'''Description'''
def adjust_delimiters(p_row):
'''Split the row in fields'''
row = p_row
row = row.split(self.data_delimiter)
return row
# end adjust_delimiters
def replace_contents(p_row):
'''Replace the line target string with correct details'''
row = p_row
for corr in self.corr_str_list:
row = p_row.replace(corr[0], corr[1])
return row
# end replace_contents
row = replace_contents(p_row)
row = adjust_delimiters(row)
if not self.t_tow:
self.combined_field = ''
new_row = []
else:
new_row = self.t_tow
self.combined_field = self.combined_field[:-1]
for i, field in enumerate(row):
if self.combined_field:
field = self.combined_field + self.data_delimiter + field
self.combined_field = ''
if field[:1] == '"':
if field[-1:] == '"' and len(field) > 1:
new_row.append(field[1:-1])
else:
self.combined_field = field
else:
new_row.append(field)
if self.convert_none:
for i, field in enumerate(new_row):
if field in ['NULL', 'None']:
new_row[i] = None
if self.combined_field:
self.t_tow = new_row
else:
self.t_tow = ''
return new_row
# end fix_row
def get_delimiter():
'''Description'''
if self.data_delimiter == '':
delimiter_cntr = 0
delimiter_pos = 0
for i, delimiter in enumerate(self.delimiter_list):
if (
str(raw_csv_file_data[0]).count(self.delimiter_list[i])
> delimiter_cntr
):
delimiter_cntr = str(raw_csv_file_data[0]).count(
self.delimiter_list[i]
)
delimiter_pos = i
self.data_delimiter = self.delimiter_list[delimiter_pos]
if not self.header_delimiter:
self.header_delimiter = self.data_delimiter
pass
# end getDelimeter
def append_row_to_list(p_row):
'''Append the corrected row to a list structure'''
if key1_index is not None:
if p_row[key1_index].isnumeric():
if not self.subset_range or (
int(p_row[key1_index]) >= self.subset_range[0]
and int(p_row[key1_index]) <= self.subset_range[1]
):
self.csv_db.append(p_row)
else:
self.csv_db.append(p_row)
else:
self.csv_db.append(p_row)
pass
# end append_row_to_list
def append_row_to_db(p_row):
'''Append the corrected row to a db (tuple) structure'''
csv_row = tuple(p_row)
if key1_index is not None:
if csv_row[key1_index].isnumeric():
if not self.subset_range or (
int(csv_row[key1_index]) >= self.subset_range[0]
and int(csv_row[key1_index]) <= self.subset_range[1]
):
self.csv_db.append(csv_row)
else:
self.csv_db.append(csv_row)
else:
self.csv_db.append(csv_row)
pass
# end append_row_to_db
def append_row_to_dict(p_row):
'''Append the corrected row to a dictionary structure'''
csv_row = {}
for j, field in enumerate(self.header):
csv_row[field] = p_row[j]
if csv_row[self.key1].isnumeric():
if not self.subset_range or (
int(csv_row[self.key1]) >= self.subset_range[0]
and int(csv_row[self.key1]) <= self.subset_range[1]
):
self.csv_db[csv_row[self.key1]] = csv_row
else:
self.csv_db[csv_row[self.key1]] = csv_row
pass
# end append_row_to_dict
def del_header():
'''Delete the header according to parameter switch'''
if self.del_head:
if self.struc_type == [] or self.struc_type == ():
del self.csv_db[0]
elif self.struc_type == {}:
del self.csv_db[self.key1]
pass
# end del_header
def replace_header():
'''Replace the header according to parameter switch'''
if not self.header:
self.header = (
raw_csv_file_data[0]
.rstrip('\n')
.replace('"', '')
.split(self.data_delimiter)
)
if self.replace_header:
if self.struc_type == []:
self.csv_db.append(self.header)
elif self.struc_type == ():
self.csv_db.append(tuple(self.header))
elif self.struc_type == {}:
self.csv_db[self.key1] = dict(
zip(
self.header,
[
x.strip('\n"')
for x in raw_csv_file_data[0].split(
self.header_delimiter
)
],
)
)
del raw_csv_file_data[0]
self.nr_of_rows -= 1
pass
# end replace_header
if os.path.isfile(self.csv_file_fame):
csv_file = open(self.csv_file_fame, 'r', encoding='utf-8', errors='ignore')
raw_csv_file_data = csv_file.readlines()
csv_file.close()
self.nr_of_rows = len(raw_csv_file_data)
if self.nr_of_rows:
get_delimiter()
# str_end = 0
replace_header()
header_len = len(self.header)
if self.key1:
key1_index = self.header.index(self.key1)
else:
key1_index = None
msg = beetools.msg_display(
'Reading {} ({})'.format(
os.path.split(self.csv_file_fame)[1], self.nr_of_rows
),
p_len=self.msg_width,
)
dfx = displayfx.DisplayFx(
self.logger_name,
self.nr_of_rows,
p_msg=msg,
p_verbose=self.silent,
p_bar_len=self.bar_len,
)
for row_cntr, row in enumerate(raw_csv_file_data):
fixed_row = fix_row(row.rstrip('\n'))
if not self.t_tow:
if (
self.match_nr_of_fields and header_len == len(fixed_row)
) or not self.match_nr_of_fields:
if self.struc_type == []:
append_row_to_list(fixed_row)
elif self.struc_type == ():
append_row_to_db(fixed_row)
else:
append_row_to_dict(fixed_row)
elif self.match_nr_of_fields:
log_str = '{};{};Unequal fields. Removed rec #{} from {};"{}";"{}";"{}"'.format(
header_len,
len(fixed_row),
row_cntr,
self.csv_file_fame,
','.join(self.header),
row,
','.join(fixed_row),
)
self.logger.warning(log_str)
dfx.update(row_cntr)
self.success = True
else:
if not self.silent:
log_str = 'File does not have data - {}'.format(self.csv_file_fame)
self.logger.warning(log_str)
del_header()
else:
if not self.silent:
log_str = 'File does not exist: {}\n'.format(self.csv_file_fame)
self.logger.warning(log_str)
return self.csv_db
# end read_one_key_csv
def read_two_key_csv(self):
'''Description'''
log_str = self.read_one_key_csv()
self.csv_db = {}
msg = beetools.msg_display('Build two key structure', p_len=self.msg_width)
dfx = displayfx.DisplayFx(
self.logger_name,
len(log_str),
p_msg=msg,
p_verbose=self.silent,
p_bar_len=self.bar_len,
)
for row_cntr, row in enumerate(log_str):
row_dict = {}
for field in self.header:
row_dict[field] = row[self.header.index(field)]
if row_dict[self.key1] not in self.csv_db:
self.csv_db[row_dict[self.key1]] = {row_dict[self.key2]: row_dict}
else:
# Cater for duplicate keys. Asume the last entry is the incorrect one.
if not row_dict[self.key2] in self.csv_db[row_dict[self.key1]]:
self.csv_db[row_dict[self.key1]][row_dict[self.key2]] = row_dict
else:
# Log duplicates. Insert wrting errors to file here if necessary.
error_str = 'read_two_key_csv Duplicate entry: %s %s' % (
row_dict[self.key1],
row_dict[self.key2],
)
print(error_str)
dfx.update(row_cntr)
# end read_two_key_csv
# end CsvWrpr
def do_tests(p_app_path='', p_cls=True):
'''This definition drives the testing and is also called from the PackageIt
module during the PIP process to establish correct functioning before
packaging it.
'''
def basic_test():
'''Basic and mandatory scenario tests for certification of the class'''
success = True
test_folder = Path(__file__).absolute().parents[3] / _name / 'Data'
test_dict01 = {
'PID': {
'PID': 'PID',
'FIDE_PlayerCode': 'FIDE_PlayerCode',
'PlayerName': 'PlayerName',
'FIDE_Federation': 'FIDE_Federation',
'Gender': 'Gender',
'BirthYear': 'BirthYear',
'FIDE_Title': 'FIDE_Title',
},
'61854': {
'PID': '61854',
'FIDE_PlayerCode': '4406176',
'PlayerName': '<NAME>',
'FIDE_Federation': 'COL',
'Gender': 'F',
'BirthYear': '1999',
'FIDE_Title': 'WCM',
},
'62277': {
'PID': '62277',
'FIDE_PlayerCode': '119296',
'PlayerName': '<NAME>',
'FIDE_Federation': 'ARG',
'Gender': 'F',
'BirthYear': '1999',
'FIDE_Title': 'WFM',
},
'116355': {
'PID': '116355',
'FIDE_PlayerCode': '3003191',
'PlayerName': '<NAME>',
'FIDE_Federation': 'URU',
'Gender': 'F',
'BirthYear': '1998',
'FIDE_Title': None,
},
'130161': {
'PID': '130161',
'FIDE_PlayerCode': '3611906',
'PlayerName': '<NAME>',
'FIDE_Federation': 'ECU',
'Gender': 'F',
'BirthYear': '1999',
'FIDE_Title': None,
},
'847094': {
'PID': '847094',
'FIDE_PlayerCode': '5239109',
'PlayerName': ', <NAME>',
'FIDE_Federation': 'PHI',
'Gender': 'M',
'BirthYear': '2002',
'FIDE_Title': '',
},
}
test_dict02 = {
'PID': {
'PID': 'PID',
'FIDE_PlayerCode': 'FIDE_PlayerCode',
'PlayerName': 'PlayerName',
'FIDE_Federation': 'FIDE_Federation',
'Gender': 'Gender',
'BirthYear': 'BirthYear',
'FIDE_Title': 'FIDE_Title',
},
'62277': {
'PID': '62277',
'FIDE_PlayerCode': '119296',
'PlayerName': '<NAME>',
'FIDE_Federation': 'ARG',
'Gender': 'F',
'BirthYear': '1999',
'FIDE_Title': 'WFM',
},
'116355': {
'PID': '116355',
'FIDE_PlayerCode': '3003191',
'PlayerName': '<NAME>',
'FIDE_Federation': 'URU',
'Gender': 'F',
'BirthYear': '1998',
'FIDE_Title': None,
},
'130161': {
'PID': '130161',
'FIDE_PlayerCode': '3611906',
'PlayerName': '<NAME>',
'FIDE_Federation': 'ECU',
'Gender': 'F',
'BirthYear': '1999',
'FIDE_Title': None,
},
}
test_dict03 = {
'PID': {
'PID': 'PID',
'FIDE_PlayerCode': 'FIDE_PlayerCode',
'PlayerName': 'PlayerName',
'Country': 'FIDE_Federation',
'Gender': 'Gender',
'BirthYear': 'BirthYear',
'FIDE_Title': 'FIDE_Title',
},
'61854': {
'PID': '61854',
'FIDE_PlayerCode': '4406176',
'PlayerName': '<NAME>',
'Country': 'COL',
'Gender': 'F',
'BirthYear': '1999',
'FIDE_Title': 'WCM',
},
'62277': {
'PID': '62277',
'FIDE_PlayerCode': '119296',
'PlayerName': '<NAME>',
'Country': 'ARG',
'Gender': 'F',
'BirthYear': '1999',
'FIDE_Title': 'WFM',
},
'116355': {
'PID': '116355',
'FIDE_PlayerCode': '3003191',
'PlayerName': '<NAME>',
'Country': 'URU',
'Gender': 'F',
'BirthYear': '1998',
'FIDE_Title': None,
},
'130161': {
'PID': '130161',
'FIDE_PlayerCode': '3611906',
'PlayerName': '<NAME>',
'Country': 'ECU',
'Gender': 'F',
'BirthYear': '1999',
'FIDE_Title': None,
},
'847094': {
'PID': '847094',
'FIDE_PlayerCode': '5239109',
'PlayerName': ', <NAME>',
'Country': 'PHI',
'Gender': 'M',
'BirthYear': '2002',
'FIDE_Title': '',
},
}
test_dict04 = {
'EventID': {
'GameID': {
'EventID': 'EventID',
'GameID': 'GameID',
'WhitePlayerID': 'WhitePlayerID',
'BlackPlayerID': 'BlackPlayerID',
'WhiteScore': 'WhiteScore',
'DayID': 'DayID',
'M60': 'M60',
}
},
'10103447': {
'63905487': {
'EventID': '10103447',
'GameID': '63905487',
'WhitePlayerID': '213929',
'BlackPlayerID': '275294',
'WhiteScore': '1',
'DayID': '3259',
'M60': '120',
},
'63905515': {
'EventID': '10103447',
'GameID': '63905515',
'WhitePlayerID': '165389',
'BlackPlayerID': '213929',
'WhiteScore': '0.5',
'DayID': '3261',
'M60': '120',
},
'63905554': {
'EventID': '10103447',
'GameID': '63905554',
'WhitePlayerID': '213929',
'BlackPlayerID': '142290',
'WhiteScore': '0.5',
'DayID': '3263',
'M60': '120',
},
},
'10113973': {
'57140921': {
'EventID': '10113973',
'GameID': '57140921',
'WhitePlayerID': '426349',
'BlackPlayerID': '47414',
'WhiteScore': '0.5',
'DayID': '3404',
'M60': '120',
},
'57140922': {
'EventID': '10113973',
'GameID': | |
# Program by <NAME>
# Based on the the 1981 board game InnerCircle, (Milton Bradley company)
import random
import sys
import os
from colored import fg, bg, attr
from board1 import B1
from board2 import B2
from board3 import B3
from board4 import B4
COLORIZE_BOARD = True
HARD_MODE = False
def flatten(l): return [item for sublist in l for item in sublist]
RESET = fg(15)+bg(0)+attr(0)
def colorize(text, foreground, background, attribute):
"""Colorize text."""
return fg(foreground)+bg(background)+attr(attribute)+text+RESET
class HiddenPrints:
"""Overides print function."""
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
class Display(object):
def __init__(self, board, *args):
"""
Constructor for Display class.
Parameters:
board: board to be displayed.
*args: attributes in board data to be displayed.
"""
self.board = board
self.board_attributes = args
def board_output(self):
"""Formats board data into readable output."""
output = ""
if len(self.board_attributes) == 0:
output += "{:^120}{:^60}\n".format("(name, has_piece, dots)", "sub_dots")
for row in self.board:
new_line = str([(space["name"], space["has_piece"], space["dots"]) for space in row])
new_line = new_line.replace("False,", "--,") # False is default for has_piece
new_line = new_line.replace("'", "")
new_line = new_line.replace("), (", ") (")
sub_dots = str(["?" if HARD_MODE and space["has_piece"] and space["is_hole"] else space["sub_dots"] for space in row])
sub_dots = sub_dots.replace("False", "-")
sub_dots = sub_dots.replace("'", "")
output += "{:^120}{:^60}\n".format(new_line, sub_dots)
if HARD_MODE:
for player in range(1, 19):
for dots in range(1, 5):
output = output.replace("P{}, {}".format(player, dots), "P{}, ?".format(player))
if COLORIZE_BOARD:
output = output.replace("P1,", colorize("P1", 1, 0, 4) + ",") # Red
output = output.replace("P2", colorize("P2", 2, 0, 4)) # Green
output = output.replace("P3", colorize("P3", 4, 0, 4)) # Blue
output = output.replace("P4", colorize("P4", 3, 0, 4)) # Yellow
output = output.replace("P5", colorize("P5", 124, 0, 4)) # Red
output = output.replace("P6", colorize("P6", 114, 0, 4)) # Green
output = output.replace("P7", colorize("P7", 104, 0, 4)) # Blue
output = output.replace("P8", colorize("P8", 94, 0, 4)) # Yellow
output = output.replace("C", colorize("C", 0, 7, 1)) # White bg
output = output.replace("H", colorize("H", 0, 5, 1)) # Purple bg
else:
for attribute in self.board_attributes:
output += "{:^60}".format(attribute)
output += "\n"
for row in self.board:
for attribute in self.board_attributes:
output += "{:^60}".format(str([space[attribute] for space in row]))
output += "\n"
return output
def __str__(self):
"""Print board data."""
return self.board_output()
def retrieve_attr_data(self):
"""Retrieves data for board arguments in initialization."""
return (*[[[space[attribute] for space in row] for row in self.board] for attribute in self.board_attributes],)
class Actions(object):
def __init__(self, board):
"""Constructor for Actions class."""
self.board = board
def rotate(self, r):
"""Rotate a board r rotations counterclockwise."""
old_board = self.board
ring_1 = [old_board[2][2], old_board[2][3], old_board[3][4], old_board[4][3], old_board[4][2], old_board[3][2]]
ring_2 = [old_board[1][1], old_board[1][2], old_board[1][3], old_board[2][4], old_board[3][5], old_board[4][4],
old_board[5][3], old_board[5][2], old_board[5][1], old_board[4][1], old_board[3][1], old_board[2][1]]
ring_3 = [old_board[0][0], old_board[0][1], old_board[0][2], old_board[0][3], old_board[1][4], old_board[2][5],
old_board[3][6], old_board[4][5], old_board[5][4], old_board[6][3], old_board[6][2], old_board[6][1],
old_board[6][0], old_board[5][0], old_board[4][0], old_board[3][0], old_board[2][0], old_board[1][0]]
# Rotate each ring
inner_ring = ring_1[-r:] + ring_1[:-r]
middle_ring = ring_2[-2*r:] + ring_2[:-2*r]
outer_ring = ring_3[-3*r:] + ring_3[:-3*r]
new_board = [[0]*4, [0]*5, [0]*6, [0]*7, [0]*6, [0]*5, [0]*4]
new_board[2][2], new_board[2][3], new_board[3][4], new_board[4][3], new_board[4][2], new_board[3][2] = inner_ring
(new_board[1][1], new_board[1][2], new_board[1][3], new_board[2][4], new_board[3][5], new_board[4][4],
new_board[5][3], new_board[5][2], new_board[5][1], new_board[4][1], new_board[3][1], new_board[2][1]) = middle_ring
(new_board[0][0], new_board[0][1], new_board[0][2], new_board[0][3], new_board[1][4], new_board[2][5],
new_board[3][6], new_board[4][5], new_board[5][4], new_board[6][3], new_board[6][2], new_board[6][1],
new_board[6][0], new_board[5][0], new_board[4][0], new_board[3][0], new_board[2][0], new_board[1][0]) = outer_ring
new_board[3][3] = old_board[3][3]
return new_board
def find_correct_space(self, piece):
"""Determines correct space based on name of piece, required when board is rotated and index doesn't match name."""
for x in range(len(self.board)):
for y in range(len(self.board[x])):
if self.board[x][y]["name"] == piece:
return "i{}{}".format(x, y)
def legal_moves(self, piece):
"""Determines legal moves for a given piece."""
x = int(piece[1]) # Row
y = int(piece[2]) # Collumn
legal_spaces = self.board[x][y]["moves_to"]
legal_spaces_without_pieces = []
for space in legal_spaces:
piece_index = self.find_correct_space(space)
if not self.board[int(piece_index[1])][int(piece_index[2])]["has_piece"]:
legal_spaces_without_pieces.append(space)
return legal_spaces_without_pieces
def take_turn_random(self, CP_pieces, CP_name):
"""Execute turn for player through random choice."""
center_name = "i33"
if center_name in CP_pieces:
piece = center_name
piece_index = center_name
print(CP_name, "has a piece in the center...")
else:
unblocked_pieces = [a_piece for a_piece in CP_pieces if len(self.legal_moves(self.find_correct_space(a_piece))) > 0]
print("Available pieces:", CP_pieces)
print("Unblocked_pieces:", unblocked_pieces)
if len(unblocked_pieces) == 0:
print(CP_name, "has no available pieces. All pieces are blocked")
return False
piece = random.choice(unblocked_pieces)
piece_index = self.find_correct_space(piece)
dots = self.board[int(piece_index[1])][int(piece_index[2])]["dots"]
legal_spaces = self.legal_moves(piece_index)
selected_move = random.choice(legal_spaces)
selected_move_index = self.find_correct_space(selected_move)
print("Selected piece from list:", piece)
print("Selected piece has index:", piece_index)
print("Piece at index {} moves {} spaces".format(piece_index, dots))
print("Legal spaces:", legal_spaces)
print("Selected move from list:", selected_move)
print("Selected move has index:", selected_move_index)
x0 = int(piece_index[1])
y0 = int(piece_index[2])
x1 = int(selected_move_index[1])
y1 = int(selected_move_index[2])
self.board[x0][y0]["has_piece"] = False
self.board[x1][y1]["has_piece"] = CP_name
def take_turn(self, CP_pieces, CP_name):
"""Execute turn for player through user's choice."""
center_name = "i33"
legal_spaces = []
available_pieces = CP_pieces.copy()
if center_name in CP_pieces:
piece = center_name
piece_index = center_name
dots = self.board[int(piece_index[1])][int(piece_index[2])]["dots"] # "C"
legal_spaces = self.legal_moves(piece_index)
print(CP_name, "has a piece in the center...")
else:
while len(legal_spaces) == 0:
print("Available pieces:", available_pieces)
selected_piece = input("These are the available pieces for {}... {}:\n==> ".format(CP_name, available_pieces))
while not selected_piece.isdigit() or int(selected_piece) >= len(available_pieces):
selected_piece = input("These are the available pieces for {}... {}:\n==> ".format(CP_name, available_pieces))
piece = available_pieces[int(selected_piece)]
piece_index = self.find_correct_space(piece)
dots = self.board[int(piece_index[1])][int(piece_index[2])]["dots"]
legal_spaces = self.legal_moves(piece_index)
print("Piece at index {} moves {} spaces".format(piece_index, dots))
print("Legal spaces:", legal_spaces)
if len(legal_spaces) == 0:
print("Selected piece is blocked")
if HARD_MODE:
return False
available_pieces.remove(piece)
if len(available_pieces) == 0:
print(CP_name, "has no available pieces; all pieces are blocked")
return False
selected_legal_space = input("These are the available moves for piece {}... {}:\n==> ".format(piece, legal_spaces))
while not selected_legal_space.isdigit() or int(selected_legal_space) >= len(legal_spaces):
selected_legal_space = input("These are the available moves for piece {}... {}:\n==> ".format(piece, legal_spaces))
selected_move = legal_spaces[int(selected_legal_space)]
selected_move_index = self.find_correct_space(selected_move)
print("Selected piece from list:", piece)
print("Selected piece has index:", piece_index)
print("Piece at index {} moves {} spaces".format(piece_index, dots))
print("Legal spaces:", legal_spaces)
print("Selected move from list:", selected_move)
print("Selected move has index:", selected_move_index)
x0 = int(piece_index[1])
y0 = int(piece_index[2])
x1 = int(selected_move_index[1])
y1 = int(selected_move_index[2])
self.board[x0][y0]["has_piece"] = False
self.board[x1][y1]["has_piece"] = CP_name
class Players(object):
def __init__(self, player_count):
"""
Constructor for Player class.
Parameters:
player_count: the number of players playing the game int in range(2, 19).
"""
self.player_count = player_count
self.players = {"P{}".format(n): {"pieces": [], "is_active": True} for n in range(1, self.player_count + 1)}
def get_active_players(self):
"""Update active players."""
return [player for player in self.players if self.players[player]["is_active"]]
def update_players(self, board):
"""Update player object."""
active_players = self.get_active_players()
for player in active_players:
self.players[player]["pieces"] = [space["name"] for space in flatten(board) if space["has_piece"] == player and not space["is_hole"]]
def update_player_status(self, board):
"""Update player object when moving down boards, removing eliminated players."""
active_players = self.get_active_players()
for player in active_players:
self.players[player]["pieces"] = [space["name"] for space in flatten(board) if space["has_piece"] == player]
if len(self.players[player]["pieces"]) == 0:
self.players[player]["is_active"] = False
def remove_inactive_players(self, starting_spaces_length):
"""Remove players when the number of players is greater than the number of starting spaces."""
if self.player_count > starting_spaces_length:
for player in self.players:
player_number = int(player[1:])
if player_number > starting_spaces_length:
self.players[player]["is_active"] = False
class NewGame(object):
def __init__(self, top_board, player_count, random_gameplay):
"""
Constructor for NewGame class.
Parameters:
board: the top board for gameplay (B4, B3, B2, B1).
player_count: the number of players playing the game int in range(2, 19).
random_gameplay: will gameplay will be executed through random choice or user input.
"""
self.board = top_board
self.board_array = [B4, B3, B2, B1][[B4, B3, B2, B1].index(self.board):] # List of boards being used
self.player_count = player_count
self.players = Players(self.player_count)
self.random_gameplay = random_gameplay
self.turn = 1
self.winner = False
def configure_boards(self):
"""Rotate boards in board_array, then add sub_dots."""
for i in range(1, len(self.board_array)):
r = random.randint(0, 5)
self.board_array[i] = Actions(self.board_array[i]).rotate(r)
upper_board = self.board_array[i-1]
lower_board = self.board_array[i]
for x in range(len(upper_board)):
for y in range(len(upper_board[x])):
if upper_board[x][y]["is_hole"]:
upper_board[x][y]["sub_dots"] = lower_board[x][y]["dots"]
def get_starting_spaces(self):
"""Get starting spaces and determine pieces per player."""
if self.board == B4:
starting_spaces = [space["name"] for space in flatten(self.board) if space["starting_space"]]
random.shuffle(starting_spaces)
equal_spaces = (18//self.player_count)*self.player_count
starting_spaces = starting_spaces[:equal_spaces]
else:
r = random.randint(0, 5)
upper_board = B2 if self.board == B1 else (B3 if self.board == B2 else B4)
flat_upper_board = flatten(Actions(upper_board).rotate(r))
flat_starting_board = flatten(self.board)
starting_spaces = [starting_board["name"] for starting_board,upper_board in zip(flat_starting_board, flat_upper_board) if upper_board["is_hole"]]
random.shuffle(starting_spaces)
self.starting_spaces_length | |
obj[3]<=3:
# {"feature": "Bar", "instances": 438, "metric_value": 0.9956, "depth": 6}
if obj[5]>-1.0:
# {"feature": "Restaurant20to50", "instances": 435, "metric_value": 0.9963, "depth": 7}
if obj[6]>-1.0:
# {"feature": "Occupation", "instances": 432, "metric_value": 0.997, "depth": 8}
if obj[4]<=7.599537037037037:
# {"feature": "Direction_same", "instances": 275, "metric_value": 1.0, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
elif obj[4]>7.599537037037037:
# {"feature": "Direction_same", "instances": 157, "metric_value": 0.9786, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[6]<=-1.0:
return 'False'
else: return 'False'
elif obj[5]<=-1.0:
return 'False'
else: return 'False'
elif obj[3]>3:
# {"feature": "Bar", "instances": 42, "metric_value": 0.8926, "depth": 6}
if obj[5]<=2.0:
# {"feature": "Occupation", "instances": 35, "metric_value": 0.7755, "depth": 7}
if obj[4]<=11:
# {"feature": "Restaurant20to50", "instances": 29, "metric_value": 0.8498, "depth": 8}
if obj[6]<=2.0:
# {"feature": "Direction_same", "instances": 26, "metric_value": 0.8905, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[6]>2.0:
return 'True'
else: return 'True'
elif obj[4]>11:
return 'True'
else: return 'True'
elif obj[5]>2.0:
# {"feature": "Occupation", "instances": 7, "metric_value": 0.8631, "depth": 7}
if obj[4]<=1:
# {"feature": "Restaurant20to50", "instances": 6, "metric_value": 0.65, "depth": 8}
if obj[6]<=0.0:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.9183, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
elif obj[6]>0.0:
return 'False'
else: return 'False'
elif obj[4]>1:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Education", "instances": 81, "metric_value": 0.7412, "depth": 5}
if obj[3]<=2:
# {"feature": "Occupation", "instances": 57, "metric_value": 0.8564, "depth": 6}
if obj[4]<=19:
# {"feature": "Restaurant20to50", "instances": 52, "metric_value": 0.8905, "depth": 7}
if obj[6]>0.0:
# {"feature": "Bar", "instances": 37, "metric_value": 0.9569, "depth": 8}
if obj[5]>0.0:
# {"feature": "Direction_same", "instances": 26, "metric_value": 0.8404, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
elif obj[5]<=0.0:
# {"feature": "Direction_same", "instances": 11, "metric_value": 0.9457, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]<=0.0:
# {"feature": "Bar", "instances": 15, "metric_value": 0.5665, "depth": 8}
if obj[5]<=3.0:
# {"feature": "Direction_same", "instances": 14, "metric_value": 0.3712, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
elif obj[5]>3.0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[4]>19:
return 'False'
else: return 'False'
elif obj[3]>2:
# {"feature": "Occupation", "instances": 24, "metric_value": 0.2499, "depth": 6}
if obj[4]<=16:
return 'False'
elif obj[4]>16:
# {"feature": "Bar", "instances": 3, "metric_value": 0.9183, "depth": 7}
if obj[5]<=0.0:
# {"feature": "Restaurant20to50", "instances": 2, "metric_value": 1.0, "depth": 8}
if obj[6]<=0.0:
return 'False'
elif obj[6]>0.0:
return 'True'
else: return 'True'
elif obj[5]>0.0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[0]<=0:
# {"feature": "Bar", "instances": 20, "metric_value": 0.6098, "depth": 4}
if obj[5]>0.0:
# {"feature": "Occupation", "instances": 12, "metric_value": 0.8113, "depth": 5}
if obj[4]>1:
# {"feature": "Education", "instances": 9, "metric_value": 0.5033, "depth": 6}
if obj[3]<=0:
# {"feature": "Restaurant20to50", "instances": 5, "metric_value": 0.7219, "depth": 7}
if obj[6]<=1.0:
return 'True'
elif obj[6]>1.0:
# {"feature": "Time", "instances": 2, "metric_value": 1.0, "depth": 8}
if obj[1]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 1.0, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[3]>0:
return 'True'
else: return 'True'
elif obj[4]<=1:
# {"feature": "Education", "instances": 3, "metric_value": 0.9183, "depth": 6}
if obj[3]>0:
return 'False'
elif obj[3]<=0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[5]<=0.0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[2]<=1:
# {"feature": "Bar", "instances": 2258, "metric_value": 0.9865, "depth": 2}
if obj[5]>0.0:
# {"feature": "Time", "instances": 1296, "metric_value": 0.9972, "depth": 3}
if obj[1]>0:
# {"feature": "Passanger", "instances": 933, "metric_value": 0.9999, "depth": 4}
if obj[0]<=2:
# {"feature": "Restaurant20to50", "instances": 743, "metric_value": 0.9941, "depth": 5}
if obj[6]<=1.0:
# {"feature": "Occupation", "instances": 457, "metric_value": 0.9772, "depth": 6}
if obj[4]<=13.909237135911898:
# {"feature": "Education", "instances": 377, "metric_value": 0.9915, "depth": 7}
if obj[3]>1:
# {"feature": "Distance", "instances": 211, "metric_value": 0.9698, "depth": 8}
if obj[8]>1:
# {"feature": "Direction_same", "instances": 138, "metric_value": 0.9903, "depth": 9}
if obj[7]<=0:
return 'False'
elif obj[7]>0:
return 'False'
else: return 'False'
elif obj[8]<=1:
# {"feature": "Direction_same", "instances": 73, "metric_value": 0.8989, "depth": 9}
if obj[7]<=0:
return 'False'
elif obj[7]>0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[3]<=1:
# {"feature": "Distance", "instances": 166, "metric_value": 0.9999, "depth": 8}
if obj[8]<=2:
# {"feature": "Direction_same", "instances": 121, "metric_value": 0.996, "depth": 9}
if obj[7]<=0:
return 'False'
elif obj[7]>0:
return 'False'
else: return 'False'
elif obj[8]>2:
# {"feature": "Direction_same", "instances": 45, "metric_value": 0.9565, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[4]>13.909237135911898:
# {"feature": "Distance", "instances": 80, "metric_value": 0.8113, "depth": 7}
if obj[8]<=2:
# {"feature": "Education", "instances": 60, "metric_value": 0.6873, "depth": 8}
if obj[3]<=3:
# {"feature": "Direction_same", "instances": 56, "metric_value": 0.7147, "depth": 9}
if obj[7]<=0:
return 'False'
elif obj[7]>0:
return 'False'
else: return 'False'
elif obj[3]>3:
return 'False'
else: return 'False'
elif obj[8]>2:
# {"feature": "Education", "instances": 20, "metric_value": 0.9928, "depth": 8}
if obj[3]>0:
# {"feature": "Direction_same", "instances": 12, "metric_value": 0.9799, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
elif obj[3]<=0:
# {"feature": "Direction_same", "instances": 8, "metric_value": 1.0, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
elif obj[6]>1.0:
# {"feature": "Education", "instances": 286, "metric_value": 0.9983, "depth": 6}
if obj[3]<=3:
# {"feature": "Occupation", "instances": 261, "metric_value": 0.9995, "depth": 7}
if obj[4]>0:
# {"feature": "Direction_same", "instances": 260, "metric_value": 0.9996, "depth": 8}
if obj[7]<=0:
# {"feature": "Distance", "instances": 225, "metric_value": 1.0, "depth": 9}
if obj[8]>1:
return 'False'
elif obj[8]<=1:
return 'True'
else: return 'True'
elif obj[7]>0:
# {"feature": "Distance", "instances": 35, "metric_value": 0.971, "depth": 9}
if obj[8]>1:
return 'True'
elif obj[8]<=1:
return 'False'
else: return 'False'
else: return 'True'
elif obj[4]<=0:
return 'True'
else: return 'True'
elif obj[3]>3:
# {"feature": "Direction_same", "instances": 25, "metric_value": 0.9427, "depth": 7}
if obj[7]<=0:
# {"feature": "Occupation", "instances": 20, "metric_value": 0.8813, "depth": 8}
if obj[4]>1:
# {"feature": "Distance", "instances": 11, "metric_value": 0.994, "depth": 9}
if obj[8]<=2:
return 'True'
elif obj[8]>2:
return 'False'
else: return 'False'
elif obj[4]<=1:
# {"feature": "Distance", "instances": 9, "metric_value": 0.5033, "depth": 9}
if obj[8]>1:
return 'True'
elif obj[8]<=1:
return 'True'
else: return 'True'
else: return 'True'
elif obj[7]>0:
# {"feature": "Distance", "instances": 5, "metric_value": 0.971, "depth": 8}
if obj[8]>1:
# {"feature": "Occupation", "instances": 3, "metric_value": 0.9183, "depth": 9}
if obj[4]>1:
return 'True'
elif obj[4]<=1:
return 'True'
else: return 'True'
elif obj[8]<=1:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[0]>2:
# {"feature": "Occupation", "instances": 190, "metric_value": 0.9364, "depth": 5}
if obj[4]<=19.70715618872958:
# {"feature": "Restaurant20to50", "instances": 179, "metric_value": 0.9539, "depth": 6}
if obj[6]>0.0:
# {"feature": "Education", "instances": 160, "metric_value": 0.9395, "depth": 7}
if obj[3]<=4:
# {"feature": "Distance", "instances": 158, "metric_value": 0.9433, "depth": 8}
if obj[8]>1:
# {"feature": "Direction_same", "instances": 134, "metric_value": 0.953, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[8]<=1:
# {"feature": "Direction_same", "instances": 24, "metric_value": 0.8709, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>4:
return 'True'
else: return 'True'
elif obj[6]<=0.0:
# {"feature": "Education", "instances": 19, "metric_value": 0.998, "depth": 7}
if obj[3]<=2:
# {"feature": "Distance", "instances": 14, "metric_value": 0.9403, "depth": 8}
if obj[8]>1:
# {"feature": "Direction_same", "instances": 13, "metric_value": 0.9612, "depth": 9}
if obj[7]<=0:
return 'False'
else: return 'False'
elif obj[8]<=1:
return 'False'
else: return 'False'
elif obj[3]>2:
# {"feature": "Distance", "instances": 5, "metric_value": 0.7219, "depth": 8}
if obj[8]<=1:
# {"feature": "Direction_same", "instances": 3, "metric_value": 0.9183, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[8]>1:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
elif obj[4]>19.70715618872958:
return 'True'
else: return 'True'
else: return 'True'
elif obj[1]<=0:
# {"feature": "Passanger", "instances": 363, "metric_value": 0.9542, "depth": 4}
if obj[0]<=1:
# {"feature": "Distance", "instances": 333, "metric_value": 0.9298, "depth": 5}
if obj[8]<=1:
# {"feature": "Restaurant20to50", "instances": 170, "metric_value": 0.8338, "depth": 6}
if obj[6]<=1.0:
# {"feature": "Education", "instances": 102, "metric_value": 0.9183, "depth": 7}
if obj[3]<=3:
# {"feature": "Occupation", "instances": 98, "metric_value": 0.9313, "depth": 8}
if obj[4]>2.1406480683313776:
# {"feature": "Direction_same", "instances": 78, "metric_value": 0.952, "depth": 9}
if obj[7]<=1:
return 'True'
else: return 'True'
elif obj[4]<=2.1406480683313776:
# {"feature": "Direction_same", "instances": 20, "metric_value": 0.8113, "depth": 9}
if obj[7]<=1:
return 'True'
else: return 'True'
else: return 'True'
elif obj[3]>3:
return 'True'
else: return 'True'
elif obj[6]>1.0:
# {"feature": "Education", "instances": 68, "metric_value": 0.6385, "depth": 7}
if obj[3]>1:
# {"feature": "Occupation", "instances": 40, "metric_value": 0.469, "depth": 8}
if obj[4]>8:
# {"feature": "Direction_same", "instances": 22, "metric_value": 0.684, "depth": 9}
if obj[7]<=1:
return 'True'
else: return 'True'
elif obj[4]<=8:
return 'True'
else: return 'True'
elif obj[3]<=1:
# {"feature": "Occupation", "instances": 28, "metric_value": 0.8113, "depth": 8}
if obj[4]>5:
# {"feature": "Direction_same", "instances": 21, "metric_value": 0.4537, "depth": 9}
if obj[7]<=1:
return 'True'
else: return 'True'
elif obj[4]<=5:
# {"feature": "Direction_same", "instances": 7, "metric_value": 0.8631, "depth": 9}
if obj[7]<=1:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
else: return 'True'
elif obj[8]>1:
# {"feature": "Restaurant20to50", "instances": 163, "metric_value": 0.9856, "depth": 6}
if obj[6]>0.0:
# {"feature": "Education", "instances": 140, "metric_value": 0.9666, "depth": 7}
if obj[3]<=3:
# {"feature": "Direction_same", "instances": 130, "metric_value": 0.9792, "depth": 8}
if obj[7]<=0:
# {"feature": "Occupation", "instances": 128, "metric_value": 0.9745, "depth": 9}
if obj[4]<=13.844008971972023:
return 'True'
elif obj[4]>13.844008971972023:
return 'True'
else: return 'True'
elif obj[7]>0:
return 'False'
else: return 'False'
elif obj[3]>3:
# {"feature": "Occupation", "instances": 10, "metric_value": 0.469, "depth": 8}
if obj[4]<=2:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.7219, "depth": 9}
if obj[7]<=0:
return 'True'
else: return 'True'
elif obj[4]>2:
return 'True'
else: return 'True'
else: return 'True'
elif obj[6]<=0.0:
# {"feature": "Occupation", "instances": 23, "metric_value": 0.9321, "depth": 7}
if obj[4]<=21:
# {"feature": "Direction_same", "instances": 22, "metric_value": 0.9024, "depth": 8}
if obj[7]<=0:
# {"feature": "Education", "instances": 21, "metric_value": 0.8631, "depth": 9}
if obj[3]<=3:
return 'False'
elif obj[3]>3:
return 'True'
else: return 'True'
elif obj[7]>0:
return 'True'
else: return 'True'
elif obj[4]>21:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
elif obj[0]>1:
# {"feature": "Occupation", "instances": 30, "metric_value": 0.8813, "depth": 5}
if obj[4]<=14:
# {"feature": "Restaurant20to50", "instances": 23, "metric_value": 0.9656, "depth": 6}
if obj[6]>0.0:
# {"feature": "Education", "instances": 21, "metric_value": 0.9183, "depth": 7}
if obj[3]<=2:
# {"feature": "Direction_same", "instances": 16, "metric_value": 0.9887, "depth": 8}
if obj[7]<=0:
# {"feature": "Distance", "instances": 13, "metric_value": 0.9957, "depth": 9}
if obj[8]>1:
return 'False'
elif obj[8]<=1:
return 'False'
else: return 'False'
elif obj[7]>0:
# {"feature": "Distance", "instances": 3, "metric_value": 0.9183, "depth": 9}
if obj[8]<=2:
return 'False'
else: return 'False'
else: return 'False'
elif obj[3]>2:
return 'False'
else: return 'False'
elif obj[6]<=0.0:
return 'True'
else: return 'True'
elif obj[4]>14:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[5]<=0.0:
# {"feature": "Restaurant20to50", "instances": 962, "metric_value": 0.8792, "depth": 3}
if obj[6]<=2.0:
# {"feature": "Distance", "instances": 914, "metric_value": 0.8629, "depth": 4}
if obj[8]<=2:
# {"feature": "Time", "instances": 746, "metric_value": 0.8912, "depth": 5}
if obj[1]<=2:
# {"feature": "Education", "instances": 406, "metric_value": 0.9294, "depth": 6}
if obj[3]<=4:
# {"feature": | |
<filename>tests/visualization_tests/test_pareto_front.py
import datetime
import itertools
from textwrap import dedent
from typing import Callable
from typing import List
from typing import Optional
from typing import Sequence
import numpy as np
import pytest
import optuna
from optuna.distributions import FloatDistribution
from optuna.testing.visualization import prepare_study_with_trials
from optuna.trial import FrozenTrial
from optuna.trial import TrialState
from optuna.visualization import plot_pareto_front
from optuna.visualization._pareto_front import _make_hovertext
from optuna.visualization._plotly_imports import go
from optuna.visualization._utils import COLOR_SCALE
def _check_data(figure: "go.Figure", axis: str, expected: Sequence[int]) -> None:
"""Compare `figure` against `expected`.
Concatenate `data` in `figure` in reverse order, pick the desired `axis`, and compare with
the `expected` result.
Args:
figure: A figure.
axis: The axis to be checked.
expected: The expected result.
"""
n_data = len(figure.data)
actual = tuple(
itertools.chain(*list(map(lambda i: figure.data[i][axis], reversed(range(n_data)))))
)
assert actual == expected
@pytest.mark.parametrize("include_dominated_trials", [False, True])
@pytest.mark.parametrize("use_constraints_func", [False, True])
@pytest.mark.parametrize("axis_order", [None, [0, 1], [1, 0]])
@pytest.mark.parametrize("targets", [None, lambda t: (t.values[0], t.values[1])])
def test_plot_pareto_front_2d(
include_dominated_trials: bool,
use_constraints_func: bool,
axis_order: Optional[List[int]],
targets: Optional[Callable[[FrozenTrial], Sequence[float]]],
) -> None:
if axis_order is not None and targets is not None:
pytest.skip("skip using both axis_order and targets")
# Test with no trial.
study = optuna.create_study(directions=["minimize", "minimize"])
figure = plot_pareto_front(
study=study,
include_dominated_trials=include_dominated_trials,
axis_order=axis_order,
)
assert len(figure.data) == 2
assert (figure.data[1]["x"] + figure.data[0]["x"]) == ()
assert (figure.data[1]["y"] + figure.data[0]["y"]) == ()
# Test with four trials.
study.enqueue_trial({"x": 1, "y": 2})
study.enqueue_trial({"x": 1, "y": 1})
study.enqueue_trial({"x": 0, "y": 2})
study.enqueue_trial({"x": 1, "y": 0})
study.optimize(lambda t: [t.suggest_int("x", 0, 2), t.suggest_int("y", 0, 2)], n_trials=4)
constraints_func: Optional[Callable[[FrozenTrial], Sequence[float]]]
if use_constraints_func:
# (x, y) = (1, 0) is infeasible; others are feasible.
def constraints_func(t: FrozenTrial) -> Sequence[float]:
return [1.0] if t.params["x"] == 1 and t.params["y"] == 0 else [-1.0]
else:
constraints_func = None
figure = plot_pareto_front(
study=study,
include_dominated_trials=include_dominated_trials,
axis_order=axis_order,
constraints_func=constraints_func,
targets=targets,
)
actual_axis_order = axis_order or [0, 1]
if use_constraints_func:
assert len(figure.data) == 3
if include_dominated_trials:
# The enqueue order of trial is: infeasible, feasible non-best, then feasible best.
data = [(1, 0, 1, 1), (1, 2, 2, 0)] # type: ignore
else:
# The enqueue order of trial is: infeasible, feasible.
data = [(1, 0, 1), (1, 2, 0)] # type: ignore
else:
assert len(figure.data) == 2
if include_dominated_trials:
# The last elements come from dominated trial that is enqueued firstly.
data = [(0, 1, 1, 1), (2, 0, 2, 1)] # type: ignore
else:
data = [(0, 1), (2, 0)] # type: ignore
_check_data(figure, "x", data[actual_axis_order[0]])
_check_data(figure, "y", data[actual_axis_order[1]])
titles = ["Objective {}".format(i) for i in range(2)]
assert figure.layout.xaxis.title.text == titles[actual_axis_order[0]]
assert figure.layout.yaxis.title.text == titles[actual_axis_order[1]]
# Test with `target_names` argument.
error_message = "The length of `target_names` is supposed to be 2."
with pytest.raises(ValueError, match=error_message):
plot_pareto_front(
study=study,
target_names=[],
include_dominated_trials=include_dominated_trials,
targets=targets,
)
with pytest.raises(ValueError, match=error_message):
plot_pareto_front(
study=study,
target_names=["Foo"],
include_dominated_trials=include_dominated_trials,
targets=targets,
)
with pytest.raises(ValueError, match=error_message):
plot_pareto_front(
study=study,
target_names=["Foo", "Bar", "Baz"],
include_dominated_trials=include_dominated_trials,
axis_order=axis_order,
targets=targets,
)
target_names = ["Foo", "Bar"]
figure = plot_pareto_front(
study=study,
target_names=target_names,
include_dominated_trials=include_dominated_trials,
axis_order=axis_order,
constraints_func=constraints_func,
targets=targets,
)
assert figure.layout.xaxis.title.text == target_names[actual_axis_order[0]]
assert figure.layout.yaxis.title.text == target_names[actual_axis_order[1]]
@pytest.mark.parametrize("include_dominated_trials", [False, True])
@pytest.mark.parametrize("use_constraints_func", [False, True])
@pytest.mark.parametrize(
"axis_order", [None] + list(itertools.permutations(range(3), 3)) # type: ignore
)
@pytest.mark.parametrize("targets", [None, lambda t: (t.values[0], t.values[1], t.values[2])])
def test_plot_pareto_front_3d(
include_dominated_trials: bool,
use_constraints_func: bool,
axis_order: Optional[List[int]],
targets: Optional[Callable[[FrozenTrial], Sequence[float]]],
) -> None:
if axis_order is not None and targets is not None:
pytest.skip("skip using both axis_order and targets")
# Test with no trial.
study = optuna.create_study(directions=["minimize", "minimize", "minimize"])
figure = plot_pareto_front(
study=study,
include_dominated_trials=include_dominated_trials,
axis_order=axis_order,
)
assert len(figure.data) == 2
assert (figure.data[1]["x"] + figure.data[0]["x"]) == ()
assert (figure.data[1]["y"] + figure.data[0]["y"]) == ()
assert (figure.data[1]["z"] + figure.data[0]["z"]) == ()
# Test with three trials.
study.enqueue_trial({"x": 1, "y": 1, "z": 2})
study.enqueue_trial({"x": 1, "y": 1, "z": 1})
study.enqueue_trial({"x": 1, "y": 0, "z": 2})
study.enqueue_trial({"x": 1, "y": 1, "z": 0})
study.optimize(
lambda t: [t.suggest_int("x", 0, 1), t.suggest_int("y", 0, 2), t.suggest_int("z", 0, 2)],
n_trials=4,
)
constraints_func: Optional[Callable[[FrozenTrial], Sequence[float]]]
if use_constraints_func:
# (x, y, z) = (1, 1, 0) is infeasible; others are feasible.
def constraints_func(t: FrozenTrial) -> Sequence[float]:
return (
[1.0]
if t.params["x"] == 1 and t.params["y"] == 1 and t.params["z"] == 0
else [-1.0]
)
else:
constraints_func = None
figure = plot_pareto_front(
study=study,
include_dominated_trials=include_dominated_trials,
axis_order=axis_order,
constraints_func=constraints_func,
targets=targets,
)
actual_axis_order = axis_order or [0, 1, 2]
if use_constraints_func:
assert len(figure.data) == 3
if include_dominated_trials:
# The enqueue order of trial is: infeasible, feasible non-best, then feasible best.
data = [(1, 1, 1, 1), (1, 0, 1, 1), (1, 2, 2, 0)] # type: ignore
else:
# The enqueue order of trial is: infeasible, feasible.
data = [(1, 1, 1), (1, 0, 1), (1, 2, 0)] # type: ignore
else:
assert len(figure.data) == 2
if include_dominated_trials:
# The last elements come from dominated trial that is enqueued firstly.
data = [(1, 1, 1, 1), (0, 1, 1, 1), (2, 0, 2, 1)] # type: ignore
else:
data = [(1, 1), (0, 1), (2, 0)] # type: ignore
_check_data(figure, "x", data[actual_axis_order[0]])
_check_data(figure, "y", data[actual_axis_order[1]])
_check_data(figure, "z", data[actual_axis_order[2]])
titles = ["Objective {}".format(i) for i in range(3)]
assert figure.layout.scene.xaxis.title.text == titles[actual_axis_order[0]]
assert figure.layout.scene.yaxis.title.text == titles[actual_axis_order[1]]
assert figure.layout.scene.zaxis.title.text == titles[actual_axis_order[2]]
# Test with `target_names` argument.
error_message = "The length of `target_names` is supposed to be 3."
with pytest.raises(ValueError, match=error_message):
plot_pareto_front(
study=study,
target_names=[],
include_dominated_trials=include_dominated_trials,
axis_order=axis_order,
targets=targets,
)
with pytest.raises(ValueError, match=error_message):
plot_pareto_front(
study=study,
target_names=["Foo"],
include_dominated_trials=include_dominated_trials,
axis_order=axis_order,
targets=targets,
)
with pytest.raises(ValueError, match=error_message):
plot_pareto_front(
study=study,
target_names=["Foo", "Bar"],
include_dominated_trials=include_dominated_trials,
axis_order=axis_order,
targets=targets,
)
with pytest.raises(ValueError, match=error_message):
plot_pareto_front(
study=study,
target_names=["Foo", "Bar", "Baz", "Qux"],
include_dominated_trials=include_dominated_trials,
axis_order=axis_order,
targets=targets,
)
target_names = ["Foo", "Bar", "Baz"]
figure = plot_pareto_front(study=study, target_names=target_names, axis_order=axis_order)
assert figure.layout.scene.xaxis.title.text == target_names[actual_axis_order[0]]
assert figure.layout.scene.yaxis.title.text == target_names[actual_axis_order[1]]
assert figure.layout.scene.zaxis.title.text == target_names[actual_axis_order[2]]
@pytest.mark.parametrize("include_dominated_trials", [False, True])
@pytest.mark.parametrize("use_constraints_func", [False, True])
def test_plot_pareto_front_unsupported_dimensions(
include_dominated_trials: bool, use_constraints_func: bool
) -> None:
constraints_func = (lambda _: [-1.0]) if use_constraints_func else None
error_message = (
"`plot_pareto_front` function only supports 2 or 3 objective"
" studies when using `targets` is `None`. Please use `targets`"
" if your objective studies have more than 3 objectives."
)
# Unsupported: n_objectives == 1.
with pytest.raises(ValueError, match=error_message):
study = optuna.create_study(directions=["minimize"])
study.optimize(lambda _: [0], n_trials=1)
plot_pareto_front(
study=study,
include_dominated_trials=include_dominated_trials,
constraints_func=constraints_func,
)
with pytest.raises(ValueError, match=error_message):
study = optuna.create_study(direction="minimize")
study.optimize(lambda _: [0], n_trials=1)
plot_pareto_front(
study=study,
include_dominated_trials=include_dominated_trials,
constraints_func=constraints_func,
)
# Unsupported: n_objectives == 4.
with pytest.raises(ValueError, match=error_message):
study = optuna.create_study(directions=["minimize", "minimize", "minimize", "minimize"])
study.optimize(lambda _: [0, 0, 0, 0], n_trials=1)
plot_pareto_front(
study=study,
include_dominated_trials=include_dominated_trials,
constraints_func=constraints_func,
)
@pytest.mark.parametrize("dimension", [2, 3])
@pytest.mark.parametrize("include_dominated_trials", [False, True])
@pytest.mark.parametrize("use_constraints_func", [False, True])
def test_plot_pareto_front_invalid_axis_order(
dimension: int, include_dominated_trials: bool, use_constraints_func: bool
) -> None:
study = optuna.create_study(directions=["minimize"] * dimension)
constraints_func = (lambda _: [-1.0]) if use_constraints_func else None
# Invalid: len(axis_order) != dimension
with pytest.raises(ValueError):
invalid_axis_order = list(range(dimension + 1))
assert len(invalid_axis_order) != dimension
plot_pareto_front(
study=study,
include_dominated_trials=include_dominated_trials,
axis_order=invalid_axis_order,
constraints_func=constraints_func,
)
# Invalid: np.unique(axis_order).size != dimension
with pytest.raises(ValueError):
invalid_axis_order = list(range(dimension))
invalid_axis_order[1] = invalid_axis_order[0]
assert np.unique(invalid_axis_order).size != dimension
plot_pareto_front(
study=study,
include_dominated_trials=include_dominated_trials,
axis_order=invalid_axis_order,
constraints_func=constraints_func,
)
# Invalid: max(axis_order) > (dimension - 1)
with pytest.raises(ValueError):
invalid_axis_order = list(range(dimension))
invalid_axis_order[-1] += 1
assert max(invalid_axis_order) > (dimension - 1)
plot_pareto_front(
study=study,
include_dominated_trials=include_dominated_trials,
axis_order=invalid_axis_order,
constraints_func=constraints_func,
)
# Invalid: min(axis_order) < 0
with pytest.raises(ValueError):
study = optuna.create_study(directions=["minimize", "minimize"])
invalid_axis_order = list(range(dimension))
invalid_axis_order[0] -= 1
assert min(invalid_axis_order) < 0
plot_pareto_front(
study=study,
include_dominated_trials=include_dominated_trials,
axis_order=invalid_axis_order,
constraints_func=constraints_func,
)
def test_plot_pareto_front_targets_without_target_names() -> None:
study = optuna.create_study(directions=["minimize", "minimize", "minimize"])
with pytest.raises(
ValueError,
match="If `targets` is specified for empty studies, `target_names` must be specified.",
):
plot_pareto_front(
study=study,
target_names=None,
targets=lambda t: (t.values[0], t.values[1], t.values[2]),
)
def test_plot_pareto_front_invalid_target_values() -> None:
study = optuna.create_study(directions=["minimize", "minimize", "minimize", "minimize"])
study.optimize(lambda _: [0, 0, 0, 0], n_trials=3)
with pytest.raises(
ValueError,
match="targets` should return a sequence of target values. your `targets`"
" returns <class 'float'>",
):
plot_pareto_front(
study=study,
targets=lambda t: t.values[0],
)
@pytest.mark.parametrize(
"targets",
[
lambda t: (t.values[0],),
lambda t: (t.values[0], t.values[1], t.values[2], t.values[3]),
],
)
def test_plot_pareto_front_n_targets_unsupported(
targets: Callable[[FrozenTrial], Sequence[float]]
) -> None:
study = optuna.create_study(directions=["minimize", "minimize", "minimize", "minimize"])
study.optimize(lambda _: [0, 0, 0, 0], n_trials=3)
n_targets = len(targets(study.best_trials[0]))
with pytest.raises(
ValueError,
match="`plot_pareto_front` function only supports 2 or 3 targets."
" you used {} targets now.".format(n_targets),
):
plot_pareto_front(
study=study,
targets=targets,
)
def test_plot_pareto_front_using_axis_order_and_targets() -> None:
study = optuna.create_study(directions=["minimize", "minimize", "minimize"])
with pytest.raises(
ValueError,
match="Using both `targets` and `axis_order` is not supported."
" Use either `targets` or `axis_order`.",
):
plot_pareto_front(
study=study,
axis_order=[0, 1, 2],
targets=lambda t: (t.values[0], t.values[1], t.values[2]),
)
def test_make_hovertext() -> None:
trial_no_user_attrs = FrozenTrial(
number=0,
trial_id=0,
state=TrialState.COMPLETE,
value=0.2,
datetime_start=datetime.datetime.now(),
datetime_complete=datetime.datetime.now(),
params={"x": 10},
distributions={"x": |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.