gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Unit tests for the contents of fastboot_utils.py
"""
# pylint: disable=protected-access,unused-argument
import io
import logging
import unittest
from devil import devil_env
from devil.android import device_errors
from devil.android import device_utils
from devil.android import fastboot_utils
from devil.android.sdk import fastboot
from devil.utils import mock_calls
with devil_env.SysPath(devil_env.config.LocalPath('pymock')):
import mock # pylint: disable=import-error
_BOARD = 'board_type'
_SERIAL = '0123456789abcdef'
_PARTITIONS = ['cache', 'userdata', 'system', 'bootloader', 'radio']
_IMAGES = {
'cache': 'cache.img',
'userdata': 'userdata.img',
'system': 'system.img',
'bootloader': 'bootloader.img',
'radio': 'radio.img',
}
_VALID_FILES = [_BOARD + '.zip', 'android-info.txt']
_INVALID_FILES = ['test.zip', 'android-info.txt']
class MockFile(object):
def __init__(self, name='/tmp/some/file'):
self.file = mock.MagicMock(spec=file)
self.file.name = name
def __enter__(self):
return self.file
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@property
def name(self):
return self.file.name
def _FastbootWrapperMock(test_serial):
fastbooter = mock.Mock(spec=fastboot.Fastboot)
fastbooter.__str__ = mock.Mock(return_value=test_serial)
fastbooter.Devices.return_value = [test_serial]
return fastbooter
def _DeviceUtilsMock(test_serial):
device = mock.Mock(spec=device_utils.DeviceUtils)
device.__str__ = mock.Mock(return_value=test_serial)
device.product_board = mock.Mock(return_value=_BOARD)
device.adb = mock.Mock()
return device
class FastbootUtilsTest(mock_calls.TestCase):
def setUp(self):
self.device_utils_mock = _DeviceUtilsMock(_SERIAL)
self.fastboot_wrapper = _FastbootWrapperMock(_SERIAL)
self.fastboot = fastboot_utils.FastbootUtils(
self.device_utils_mock, fastbooter=self.fastboot_wrapper,
default_timeout=2, default_retries=0)
self.fastboot._board = _BOARD
class FastbootUtilsInitTest(FastbootUtilsTest):
def testInitWithDeviceUtil(self):
f = fastboot_utils.FastbootUtils(self.device_utils_mock)
self.assertEqual(str(self.device_utils_mock), str(f._device))
def testInitWithMissing_fails(self):
with self.assertRaises(AttributeError):
fastboot_utils.FastbootUtils(None)
with self.assertRaises(AttributeError):
fastboot_utils.FastbootUtils('')
class FastbootUtilsWaitForFastbootMode(FastbootUtilsTest):
# If this test fails by timing out after 1 second.
@mock.patch('time.sleep', mock.Mock())
def testWaitForFastbootMode(self):
self.fastboot.WaitForFastbootMode()
class FastbootUtilsEnableFastbootMode(FastbootUtilsTest):
def testEnableFastbootMode(self):
with self.assertCalls(
self.call.fastboot._device.EnableRoot(),
self.call.fastboot._device.adb.Reboot(to_bootloader=True),
self.call.fastboot.WaitForFastbootMode()):
self.fastboot.EnableFastbootMode()
class FastbootUtilsReboot(FastbootUtilsTest):
def testReboot_bootloader(self):
with self.assertCalls(
self.call.fastboot.fastboot.RebootBootloader(),
self.call.fastboot.WaitForFastbootMode()):
self.fastboot.Reboot(bootloader=True)
def testReboot_normal(self):
with self.assertCalls(
self.call.fastboot.fastboot.Reboot(),
self.call.fastboot._device.WaitUntilFullyBooted(timeout=mock.ANY)):
self.fastboot.Reboot()
class FastbootUtilsFlashPartitions(FastbootUtilsTest):
def testFlashPartitions_wipe(self):
with self.assertCalls(
(self.call.fastboot._VerifyBoard('test'), True),
(self.call.fastboot._FindAndVerifyPartitionsAndImages(
_PARTITIONS, 'test'), _IMAGES),
(self.call.fastboot.fastboot.Flash('cache', 'cache.img')),
(self.call.fastboot.fastboot.Flash('userdata', 'userdata.img')),
(self.call.fastboot.fastboot.Flash('system', 'system.img')),
(self.call.fastboot.fastboot.Flash('bootloader', 'bootloader.img')),
(self.call.fastboot.Reboot(bootloader=True)),
(self.call.fastboot.fastboot.Flash('radio', 'radio.img')),
(self.call.fastboot.Reboot(bootloader=True))):
self.fastboot._FlashPartitions(_PARTITIONS, 'test', wipe=True)
def testFlashPartitions_noWipe(self):
with self.assertCalls(
(self.call.fastboot._VerifyBoard('test'), True),
(self.call.fastboot._FindAndVerifyPartitionsAndImages(
_PARTITIONS, 'test'), _IMAGES),
(self.call.fastboot.fastboot.Flash('system', 'system.img')),
(self.call.fastboot.fastboot.Flash('bootloader', 'bootloader.img')),
(self.call.fastboot.Reboot(bootloader=True)),
(self.call.fastboot.fastboot.Flash('radio', 'radio.img')),
(self.call.fastboot.Reboot(bootloader=True))):
self.fastboot._FlashPartitions(_PARTITIONS, 'test')
class FastbootUtilsFastbootMode(FastbootUtilsTest):
def testFastbootMode_good(self):
with self.assertCalls(
self.call.fastboot.EnableFastbootMode(),
self.call.fastboot.fastboot.SetOemOffModeCharge(False),
self.call.fastboot.fastboot.SetOemOffModeCharge(True),
self.call.fastboot.Reboot()):
with self.fastboot.FastbootMode() as fbm:
self.assertEqual(self.fastboot, fbm)
def testFastbootMode_exception(self):
with self.assertCalls(
self.call.fastboot.EnableFastbootMode(),
self.call.fastboot.fastboot.SetOemOffModeCharge(False),
self.call.fastboot.fastboot.SetOemOffModeCharge(True),
self.call.fastboot.Reboot()):
with self.assertRaises(NotImplementedError):
with self.fastboot.FastbootMode() as fbm:
self.assertEqual(self.fastboot, fbm)
raise NotImplementedError
def testFastbootMode_exceptionInEnableFastboot(self):
self.fastboot.EnableFastbootMode = mock.Mock()
self.fastboot.EnableFastbootMode.side_effect = NotImplementedError
with self.assertRaises(NotImplementedError):
with self.fastboot.FastbootMode():
pass
class FastbootUtilsVerifyBoard(FastbootUtilsTest):
def testVerifyBoard_bothValid(self):
mock_file = io.StringIO(u'require board=%s\n' % _BOARD)
with mock.patch('__builtin__.open', return_value=mock_file, create=True):
with mock.patch('os.listdir', return_value=_VALID_FILES):
self.assertTrue(self.fastboot._VerifyBoard('test'))
def testVerifyBoard_BothNotValid(self):
mock_file = io.StringIO(u'abc')
with mock.patch('__builtin__.open', return_value=mock_file, create=True):
with mock.patch('os.listdir', return_value=_INVALID_FILES):
self.assertFalse(self.assertFalse(self.fastboot._VerifyBoard('test')))
def testVerifyBoard_FileNotFoundZipValid(self):
with mock.patch('os.listdir', return_value=[_BOARD + '.zip']):
self.assertTrue(self.fastboot._VerifyBoard('test'))
def testVerifyBoard_ZipNotFoundFileValid(self):
mock_file = io.StringIO(u'require board=%s\n' % _BOARD)
with mock.patch('__builtin__.open', return_value=mock_file, create=True):
with mock.patch('os.listdir', return_value=['android-info.txt']):
self.assertTrue(self.fastboot._VerifyBoard('test'))
def testVerifyBoard_zipNotValidFileIs(self):
mock_file = io.StringIO(u'require board=%s\n' % _BOARD)
with mock.patch('__builtin__.open', return_value=mock_file, create=True):
with mock.patch('os.listdir', return_value=_INVALID_FILES):
self.assertTrue(self.fastboot._VerifyBoard('test'))
def testVerifyBoard_fileNotValidZipIs(self):
mock_file = io.StringIO(u'require board=WrongBoard')
with mock.patch('__builtin__.open', return_value=mock_file, create=True):
with mock.patch('os.listdir', return_value=_VALID_FILES):
self.assertFalse(self.fastboot._VerifyBoard('test'))
def testVerifyBoard_noBoardInFileValidZip(self):
mock_file = io.StringIO(u'Regex wont match')
with mock.patch('__builtin__.open', return_value=mock_file, create=True):
with mock.patch('os.listdir', return_value=_VALID_FILES):
self.assertTrue(self.fastboot._VerifyBoard('test'))
def testVerifyBoard_noBoardInFileInvalidZip(self):
mock_file = io.StringIO(u'Regex wont match')
with mock.patch('__builtin__.open', return_value=mock_file, create=True):
with mock.patch('os.listdir', return_value=_INVALID_FILES):
self.assertFalse(self.fastboot._VerifyBoard('test'))
class FastbootUtilsFindAndVerifyPartitionsAndImages(FastbootUtilsTest):
def testFindAndVerifyPartitionsAndImages_valid(self):
PARTITIONS = [
'bootloader', 'radio', 'boot', 'recovery', 'system', 'userdata', 'cache'
]
files = [
'bootloader-test-.img',
'radio123.img',
'boot.img',
'recovery.img',
'system.img',
'userdata.img',
'cache.img'
]
return_check = {
'bootloader': 'test/bootloader-test-.img',
'radio': 'test/radio123.img',
'boot': 'test/boot.img',
'recovery': 'test/recovery.img',
'system': 'test/system.img',
'userdata': 'test/userdata.img',
'cache': 'test/cache.img',
}
with mock.patch('os.listdir', return_value=files):
return_value = self.fastboot._FindAndVerifyPartitionsAndImages(
PARTITIONS, 'test')
self.assertDictEqual(return_value, return_check)
def testFindAndVerifyPartitionsAndImages_badPartition(self):
with mock.patch('os.listdir', return_value=['test']):
with self.assertRaises(KeyError):
self.fastboot._FindAndVerifyPartitionsAndImages(['test'], 'test')
def testFindAndVerifyPartitionsAndImages_noFile(self):
with mock.patch('os.listdir', return_value=['test']):
with self.assertRaises(device_errors.FastbootCommandFailedError):
self.fastboot._FindAndVerifyPartitionsAndImages(['cache'], 'test')
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
|
|
#!/usr/bin/python
#
# Tobii controller for PsychoPy
# authors: Hiroyuki Sogo, Horea Christian
# - Tobii SDK 3.0 is required
#
import datetime
import os
from math import degrees, atan2
# from psychopy.tools.monitorunittools import pix2deg
from psychopy.misc import pix2deg
import tobii.sdk.mainloop
import tobii.sdk.time.clock
import tobii.sdk.time.sync
import tobii.sdk.browsing
import tobii.sdk.eyetracker
import psychopy.visual
import psychopy.event
import Image
import ImageDraw
from tobii.sdk.types import Point2D
from infant_eeg.config import DATA_DIR
class TobiiController:
def __init__(self, win):
self.eyetracker = None
self.eyetrackers = {}
self.win = win
self.gazeData = []
self.eventData = []
self.datafile = None
tobii.sdk.init()
self.clock = tobii.sdk.time.clock.Clock()
self.mainloop_thread = tobii.sdk.mainloop.MainloopThread()
self.browser = tobii.sdk.browsing.EyetrackerBrowser(self.mainloop_thread,
lambda t, n, i: self.on_eyetracker_browser_event(t, n, i))
self.mainloop_thread.start()
def waitForFindEyeTracker(self):
while len(self.eyetrackers.keys()) == 0:
pass
def on_eyetracker_browser_event(self, event_type, event_name, eyetracker_info):
# When a new eyetracker is found we add it to the treeview and to the
# internal list of eyetracker_info objects
if event_type == tobii.sdk.browsing.EyetrackerBrowser.FOUND:
self.eyetrackers[eyetracker_info.product_id] = eyetracker_info
return False
# Otherwise we remove the tracker from the treeview and the eyetracker_info list...
del self.eyetrackers[eyetracker_info.product_id]
# ...and add it again if it is an update message
if event_type == tobii.sdk.browsing.EyetrackerBrowser.UPDATED:
self.eyetrackers[eyetracker_info.product_id] = eyetracker_info
return False
def destroy(self):
self.eyetracker = None
self.browser.stop()
self.browser = None
self.mainloop_thread.stop()
############################################################################
# activation methods
############################################################################
def activate(self, eyetracker):
eyetracker_info = self.eyetrackers[eyetracker]
print "Connecting to:", eyetracker_info
tobii.sdk.eyetracker.Eyetracker.create_async(self.mainloop_thread,
eyetracker_info,
lambda error, eyetracker: self.on_eyetracker_created(error,
eyetracker,
eyetracker_info))
while self.eyetracker is None:
pass
self.syncmanager = tobii.sdk.time.sync.SyncManager(self.clock, eyetracker_info, self.mainloop_thread)
def on_eyetracker_created(self, error, eyetracker, eyetracker_info):
if error:
print " Connection to %s failed because of an exception: %s" % (eyetracker_info, error)
if error == 0x20000402:
print "The selected unit is too old, a unit which supports protocol version 1.0 is required.\n\n<b>Details:</b> <i>%s</i>" % error
else:
print "Could not connect to %s" % eyetracker_info
return False
self.eyetracker = eyetracker
############################################################################
# calibration methods
############################################################################
def calibratePoint(self, clock, last_pos, p):
clock.reset()
currentTime = clock.getTime()
x_diff = p.x - last_pos.x
y_diff = p.y - last_pos.y
angle = degrees(atan2(y_diff, x_diff)) + 90
self.rocket_img.setOri(angle)
while currentTime <= 1.5:
rel_pos = Point2D()
rel_pos.x = last_pos.x + ((currentTime / 1.5) * (p.x - last_pos.x))
rel_pos.y = last_pos.y + ((currentTime / 1.5) * (p.y - last_pos.y))
self.rocket_img.setPos((pix2deg((rel_pos.x - 0.5) * self.win.size[0], self.win.monitor),
pix2deg((0.5 - rel_pos.y) * self.win.size[1], self.win.monitor)))
self.rocket_img.setSize((pix2deg(110.67 * (1.5 - currentTime) + 4, self.win.monitor),
pix2deg(196 * (1.5 - currentTime) + 4, self.win.monitor)))
psychopy.event.getKeys()
self.rocket_img.draw()
self.win.flip()
currentTime = clock.getTime()
self.add_point_completed = False
self.eyetracker.AddCalibrationPoint(p, lambda error, r: self.on_add_completed(error, r))
while not self.add_point_completed:
psychopy.event.getKeys()
self.rocket_img.draw()
self.win.flip()
def doCalibration(self, calibrationPoints, calib=None):
# Can only calibrate with eyetracker
if self.eyetracker is None:
return
# Points to calibrate
self.points = calibrationPoints
self.point_index = -1
# Rocket image
self.rocket_img = psychopy.visual.ImageStim(self.win, os.path.join(DATA_DIR, 'images', 'rocket.png'))
# Results image
img = Image.new('RGB', self.win.size)
draw = ImageDraw.Draw(img)
self.calresult = psychopy.visual.SimpleImageStim(self.win, img)
# Results message
self.calresultmsg = psychopy.visual.TextStim(self.win, pos=(pix2deg(0, self.win.monitor),
pix2deg(-self.win.size[1] / 4, self.win.monitor)))
# Calibration point labels
if calib is None:
self.point_labels=[]
# Start calibration instruction
self.calresultmsg.setText('Start calibration:SPACE')
# Left eye status
self.left_eye_status = psychopy.visual.Circle(self.win, radius=pix2deg(40, self.win.monitor),
pos=(pix2deg(-50, self.win.monitor),
pix2deg(-self.win.size[1] / 3, self.win.monitor)))
# Right eye status
self.right_eye_status = psychopy.visual.Circle(self.win, radius=pix2deg(40, self.win.monitor),
pos=(pix2deg(50, self.win.monitor),
pix2deg(-self.win.size[1] / 3, self.win.monitor)))
# Reset gaze and event data and start tracking
self.gazeData = []
self.eventData = []
self.eyetracker.events.OnGazeDataReceived += self.on_gazedata
self.eyetracker.StartTracking()
# Wait until space key is hit
waitkey = True
while waitkey:
for key in psychopy.event.getKeys():
if key == 'space':
waitkey = False
self.rocket_img.draw()
self.calresultmsg.draw()
self.left_eye_status.fillColor = 'red'
self.right_eye_status.fillColor = 'red'
if len(self.gazeData):
if self.gazeData[-1].LeftValidity != 4:
self.left_eye_status.fillColor = 'green'
if self.gazeData[-1].RightValidity != 4:
self.right_eye_status.fillColor = 'green'
self.left_eye_status.draw()
self.right_eye_status.draw()
self.win.flip()
# Stop tracking and reset gaze data
self.eyetracker.StopTracking()
self.eyetracker.events.OnGazeDataReceived -= self.on_gazedata
self.gazeData = []
self.eventData = []
# Initialize calibration
self.initcalibration_completed = False
print "Init calibration"
self.eyetracker.StartCalibration(lambda error, r: self.on_calib_start(error, r))
while not self.initcalibration_completed:
pass
# If we're updating a calibration
if calib is not None:
# Set calibration
self.setcalibration_completed=False
self.eyetracker.SetCalibration(self.calib,lambda error, r: self.on_calib_set(error, r))
while not self.setcalibration_completed:
pass
# Calibrate each point
clock = psychopy.core.Clock()
last_pos = Point2D(x=0.5, y=0.5)
for self.point_index in range(len(self.points)):
p = Point2D()
p.x, p.y = self.points[self.point_index]
self.calibratePoint(clock, last_pos, p)
last_pos = Point2D(x=p.x, y=p.y)
# Compute calibration
self.computeCalibration_completed = False
self.computeCalibration_succeeded = False
self.eyetracker.ComputeCalibration(lambda error, r: self.on_calib_compute(error, r))
while not self.computeCalibration_completed:
pass
# Stop calibration
self.eyetracker.StopCalibration(None)
self.win.flip()
# Get calibration
self.getcalibration_completed = False
self.calib = self.eyetracker.GetCalibration(lambda error, calib: self.on_calib_response(error, calib))
while not self.getcalibration_completed:
pass
draw.rectangle(((0, 0), tuple(self.win.size)), fill=(128, 128, 128))
can_accept=False
if not self.computeCalibration_succeeded:
#computeCalibration failed.
self.calresultmsg.setText('Not enough data was collected (Retry:r/Abort:ESC)')
elif self.calib == None:
#no calibration data
self.calresultmsg.setText('No calibration data (Retry:r/Abort:ESC)')
else:
can_accept=True
point_list = []
points = {}
for data in self.calib.plot_data:
points[data.true_point] = {'left': data.left, 'right': data.right}
point_list.append(data.true_point)
if len(point_list) == 0:
self.calresultmsg.setText('No true calibration data (Retry:r/Abort:ESC)')
else:
for idx,(x,y) in enumerate(self.points):
draw.ellipse(((x * self.win.size[0] - 10,
y * self.win.size[1] - 10),
(x * self.win.size[0] + 10,
y * self.win.size[1] + 10)),
outline=(0, 0, 0))
if calib is None:
num_txt=psychopy.visual.TextStim(self.win, pos=(pix2deg((x-0.5) * self.win.size[0] - 10, self.win.monitor),
pix2deg((0.5-y) * self.win.size[1] - 20, self.win.monitor)))
num_txt.setText(str(idx+1))
self.point_labels.append(num_txt)
for idx,p in enumerate(point_list):
d = points[p]
draw.ellipse(((p.x * self.win.size[0] - 10,
p.y * self.win.size[1] - 10),
(p.x * self.win.size[0] + 10,
p.y * self.win.size[1] + 10)),
outline=(0, 0, 0))
if d['left'].validity == 1:
draw.line(((p.x * self.win.size[0],
p.y * self.win.size[1]),
(d['left'].map_point.x * self.win.size[0],
d['left'].map_point.y * self.win.size[1])), fill=(255, 0, 0))
if d['right'].validity == 1:
draw.line(((p.x * self.win.size[0],
p.y * self.win.size[1]),
(d['right'].map_point.x * self.win.size[0],
d['right'].map_point.y * self.win.size[1])), fill=(0, 255, 0))
self.calresultmsg.setText('Accept calibration results (Accept:a/Redo:#/Retry:r/Abort:ESC)')
self.calresult.setImage(img)
return can_accept
def on_calib_start(self, error, r):
if error:
print "Could not start calibration because of error. (0x%0x)" % error
return False
self.initcalibration_completed = True
def on_add_completed(self, error, r):
if error:
print "Add Calibration Point failed because of error. (0x%0x)" % error
return False
self.add_point_completed = True
return False
def on_remove_completed(self, error):
if error:
print "Remove Calibration Point failed because of error. (0x%0x)" % error
self.remove_point_completed = True
return False
def on_calib_compute(self, error, r):
if error == 0x20000502:
print "CalibCompute failed because not enough data was collected:", error
print "Not enough data was collected during calibration procedure."
self.computeCalibration_succeeded = False
elif error != 0:
print "CalibCompute failed because of a server error:", error
print "Could not compute calibration because of a server error.\n\n<b>Details:</b>\n<i>%s</i>" % error
self.computeCalibration_succeeded = False
else:
print ""
self.computeCalibration_succeeded = True
self.computeCalibration_completed = True
return False
def on_calib_response(self, error, calib):
if error:
print "On_calib_response: Error =", error
self.calib = None
self.getcalibration_completed = True
return False
print "On_calib_response: Success"
self.calib = calib
self.getcalibration_completed = True
return False
def on_calib_set(self, error, r):
if error:
print "Set Calibration failed because of error. (0x%0x)" % error
return False
self.setcalibration_completed=True
return False
def on_calib_done(self, status, msg):
# When the calibration procedure is done we update the calibration plot
if not status:
print msg
self.calibration = None
return False
def startTracking(self):
self.gazeData = []
self.eventData = []
self.eyetracker.events.OnGazeDataReceived += self.on_gazedata
self.eyetracker.StartTracking()
def stopTracking(self):
self.eyetracker.StopTracking()
self.eyetracker.events.OnGazeDataReceived -= self.on_gazedata
self.flushData()
self.gazeData = []
self.eventData = []
def on_gazedata(self, error, gaze):
self.gazeData.append(gaze)
def getGazePosition(self, gaze):
return ((pix2deg((gaze.LeftGazePoint2D.x - 0.5) * self.win.size[0], self.win.monitor),
pix2deg((0.5 - gaze.LeftGazePoint2D.y) * self.win.size[1], self.win.monitor),
pix2deg((gaze.RightGazePoint2D.x - 0.5) * self.win.size[0], self.win.monitor),
pix2deg((0.5 - gaze.RightGazePoint2D.y) * self.win.size[1], self.win.monitor)))
def getCurrentGazePosition(self):
if len(self.gazeData) == 0:
return (None, None, None, None)
else:
return self.getGazePosition(self.gazeData[-1])
def setDataFile(self, filename, exp_info):
self.datafile = open(filename, 'w')
self.datafile.write('Recording date:\t' + datetime.datetime.now().strftime('%Y/%m/%d') + '\n')
self.datafile.write('Recording time:\t' + datetime.datetime.now().strftime('%H:%M:%S') + '\n')
self.datafile.write('Recording resolution\t%d x %d\n' % tuple(self.win.size))
for key, data in exp_info.iteritems():
self.datafile.write('%s:\t%s\n' % (key, data))
self.datafile.write('\n')
self.datafile.write('\t'.join(['TimeStamp',
'GazePointXLeft',
'GazePointYLeft',
'PupilLeft',
'ValidityLeft',
'GazePointXRight',
'GazePointYRight',
'PupilRight'
'ValidityRight',
'GazePointX',
'GazePointY',
'Event']) + '\n')
def closeDataFile(self):
print 'datafile closed'
if self.datafile is not None:
self.flushData()
self.datafile.close()
self.datafile = None
def recordEvent(self, event):
t = self.syncmanager.convert_from_local_to_remote(self.clock.get_time())
self.eventData.append((t, event.code, event.table))
def flushData(self):
if self.datafile is None:
print 'data file is not set.'
return
if len(self.gazeData)==0:
return
timeStampStart = self.gazeData[0].Timestamp
gaze_events=[]
for g in self.gazeData:
gaze_events.append([(g.Timestamp-timeStampStart)/1000.0,g])
for e in self.eventData:
gaze_events.append([(e[0]-timeStampStart)/1000.0,e[1],e[2]])
gaze_events.sort(key=lambda tup: tup[0])
for gaze_event in gaze_events:
if len(gaze_event)==2:
time_stamp,g=gaze_event
self.datafile.write('%.1f\t%.4f\t%.4f\t%.4f\t%d\t%.4f\t%.4f\t%.4f\t%d' % (
time_stamp,
g.LeftGazePoint2D.x*self.win.size[0] if g.LeftValidity!=4 else -1.0,
g.LeftGazePoint2D.y*self.win.size[1] if g.LeftValidity!=4 else -1.0,
g.LeftPupil,
g.LeftValidity,
g.RightGazePoint2D.x*self.win.size[0] if g.RightValidity!=4 else -1.0,
g.RightGazePoint2D.y*self.win.size[1] if g.RightValidity!=4 else -1.0,
g.RightPupil,
g.RightValidity))
if g.LeftValidity == 4 and g.RightValidity == 4: #not detected
ave = (-1.0,-1.0)
elif g.LeftValidity == 4:
ave = (g.RightGazePoint2D.x,g.RightGazePoint2D.y)
elif g.RightValidity == 4:
ave = (g.LeftGazePoint2D.x,g.LeftGazePoint2D.y)
else:
ave = (.5*(g.LeftGazePoint2D.x+g.RightGazePoint2D.x)*self.win.size[0],
.5*(g.LeftGazePoint2D.y+g.RightGazePoint2D.y)*self.win.size[1])
self.datafile.write('\t%.4f\t%.4f\t'%ave)
self.datafile.write('\n')
else:
time_stamp,event_str,table=gaze_event
formatstr = '%.1f'+'\t'*11+'%s,%s\n'
table_str=','.join('%s:%s' % (key, val) for key, val in table.iteritems())
self.datafile.write(formatstr % (time_stamp,event_str,table_str))
self.gazeData = []
self.eventData = []
self.datafile.flush()
|
|
import os
import time
from collections import OrderedDict
from conans.paths import (CONANFILE, CONANINFO, CONANFILE_TXT, BUILD_INFO, CONANENV)
from conans.client.loader import ConanFileLoader
from conans.client.export import export_conanfile
from conans.client.deps_builder import DepsGraphBuilder
from conans.client.userio import UserIO
from conans.client.installer import ConanInstaller
from conans.util.files import save, load, rmdir, normalize
from conans.util.log import logger
from conans.client.uploader import ConanUploader
from conans.client.printer import Printer
from conans.errors import NotFoundException, ConanException
from conans.client.generators import write_generators
from conans.client.importer import run_imports, undo_imports
from conans.model.ref import ConanFileReference, PackageReference
from conans.client.remover import ConanRemover
from conans.model.info import ConanInfo
from conans.model.values import Values
from conans.model.options import OptionsValues
from conans.model.build_info import DepsCppInfo, CppInfo
from conans.client import packager
from conans.client.detect import detected_os
from conans.client.package_copier import PackageCopier
from conans.client.output import ScopedOutput
from conans.client.proxy import ConanProxy
from conans.client.remote_registry import RemoteRegistry
from conans.model.scope import Scopes
from conans.client.client_cache import ClientCache
from conans.client.source import config_source, config_source_local
from conans.client.manifest_manager import ManifestManager
from conans.model.env_info import EnvInfo, DepsEnvInfo
from conans.tools import environment_append
from conans.client.require_resolver import RequireResolver
from conans.model.profile import Profile
def get_user_channel(text):
tokens = text.split('/')
try:
user = tokens[0]
channel = tokens[1]
except IndexError:
channel = "testing"
return user, channel
class ConanManager(object):
""" Manage all the commands logic The main entry point for all the client
business logic
"""
def __init__(self, client_cache, user_io, runner, remote_manager, search_manager):
assert isinstance(user_io, UserIO)
assert isinstance(client_cache, ClientCache)
self._client_cache = client_cache
self._user_io = user_io
self._runner = runner
self._remote_manager = remote_manager
self._current_scopes = None
self._search_manager = search_manager
def _loader(self, current_path=None, user_settings_values=None, package_settings=None,
user_options_values=None, scopes=None, env=None, package_env=None):
# The disk settings definition, already including the default disk values
settings = self._client_cache.settings
options = OptionsValues()
conaninfo_scopes = Scopes()
if current_path:
conan_info_path = os.path.join(current_path, CONANINFO)
if os.path.exists(conan_info_path):
existing_info = ConanInfo.load_file(conan_info_path)
settings.values = existing_info.full_settings
options = existing_info.full_options # Take existing options from conaninfo.txt
conaninfo_scopes = existing_info.scope
if user_settings_values:
aux_values = Values.from_list(user_settings_values)
settings.values = aux_values
if user_options_values is not None: # Install will pass an empty list []
# Install OVERWRITES options, existing options in CONANINFO are not taken
# into account, just those from CONANFILE + user command line
options = OptionsValues.from_list(user_options_values)
if scopes:
conaninfo_scopes.update_scope(scopes)
self._current_scopes = conaninfo_scopes
return ConanFileLoader(self._runner, settings, package_settings=package_settings,
options=options, scopes=conaninfo_scopes,
env=env, package_env=package_env)
def export(self, user, conan_file_path, keep_source=False):
""" Export the conans
param conanfile_path: the original source directory of the user containing a
conanfile.py
param user: user under this package will be exported
param channel: string (stable, testing,...)
"""
assert conan_file_path
logger.debug("Exporting %s" % conan_file_path)
user_name, channel = get_user_channel(user)
conan_file = self._loader().load_class(os.path.join(conan_file_path, CONANFILE))
for field in ["url", "license", "description"]:
field_value = getattr(conan_file, field, None)
if not field_value:
self._user_io.out.warn("Conanfile doesn't have '%s'.\n"
"It is recommended to add it as attribute" % field)
conan_ref = ConanFileReference(conan_file.name, conan_file.version, user_name, channel)
conan_ref_str = str(conan_ref)
# Maybe a platform check could be added, but depends on disk partition
refs = self._search_manager.search(conan_ref_str, ignorecase=True)
if refs and conan_ref not in refs:
raise ConanException("Cannot export package with same name but different case\n"
"You exported '%s' but already existing '%s'"
% (conan_ref_str, " ".join(str(s) for s in refs)))
output = ScopedOutput(str(conan_ref), self._user_io.out)
export_conanfile(output, self._client_cache, conan_file.exports, conan_file_path,
conan_ref, conan_file.short_paths, keep_source)
def download(self, reference, package_ids, remote=None):
""" Download conanfile and specified packages to local repository
@param reference: ConanFileReference
@param package_ids: Package ids or empty for download all
@param remote: install only from that remote
"""
assert(isinstance(reference, ConanFileReference))
remote_proxy = ConanProxy(self._client_cache, self._user_io, self._remote_manager, remote)
package = remote_proxy.search(reference, None)
if not package: # Search the reference first, and raise if it doesn't exist
raise ConanException("'%s' not found in remote" % str(reference))
if package_ids:
remote_proxy.download_packages(reference, package_ids)
else:
packages_props = remote_proxy.search_packages(reference, None)
if not packages_props:
output = ScopedOutput(str(reference), self._user_io.out)
output.warn("No remote binary packages found in remote")
else:
remote_proxy.download_packages(reference, list(packages_props.keys()))
def _get_graph(self, reference, current_path, remote, options, settings, filename, update,
check_updates, manifest_manager, scopes, package_settings, env, package_env):
loader = self._loader(current_path, settings, package_settings, options, scopes, env, package_env)
# Not check for updates for info command, it'll be checked when dep graph is built
remote_proxy = ConanProxy(self._client_cache, self._user_io, self._remote_manager, remote,
update=update, check_updates=check_updates,
manifest_manager=manifest_manager)
if isinstance(reference, ConanFileReference):
project_reference = None
conanfile = loader.load_virtual(reference, current_path)
is_txt = True
else:
conanfile_path = reference
project_reference = "PROJECT"
output = ScopedOutput(project_reference, self._user_io.out)
try:
if filename and filename.endswith(".txt"):
raise NotFoundException("")
conan_file_path = os.path.join(conanfile_path, filename or CONANFILE)
conanfile = loader.load_conan(conan_file_path, output, consumer=True)
is_txt = False
if conanfile.name is not None and conanfile.version is not None:
project_reference = "%s/%s@" % (conanfile.name, conanfile.version)
project_reference += "PROJECT"
except NotFoundException: # Load requirements.txt
conan_path = os.path.join(conanfile_path, filename or CONANFILE_TXT)
conanfile = loader.load_conan_txt(conan_path, output)
is_txt = True
# build deps graph and install it
local_search = None if update else self._search_manager
resolver = RequireResolver(self._user_io.out, local_search, remote_proxy)
builder = DepsGraphBuilder(remote_proxy, self._user_io.out, loader, resolver)
deps_graph = builder.load(None, conanfile)
# These lines are so the conaninfo stores the correct complete info
if is_txt:
conanfile.info.settings = loader._settings.values
conanfile.info.full_settings = loader._settings.values
conanfile.info.scope = self._current_scopes
conanfile.cpp_info = CppInfo(current_path)
conanfile.env_info = EnvInfo(current_path)
registry = RemoteRegistry(self._client_cache.registry, self._user_io.out)
return (builder, deps_graph, project_reference, registry, conanfile,
remote_proxy, loader)
def info(self, reference, current_path, remote=None, options=None, settings=None,
info=None, filename=None, update=False, check_updates=False, scopes=None,
build_order=None, build_mode=None, package_settings=None):
""" Fetch and build all dependencies for the given reference
@param reference: ConanFileReference or path to user space conanfile
@param current_path: where the output files will be saved
@param remote: install only from that remote
@param options: list of tuples: [(optionname, optionvalue), (optionname, optionvalue)...]
@param settings: list of tuples: [(settingname, settingvalue), (settingname, value)...]
@param package_settings: dict name=> settings: {"zlib": [(settingname, settingvalue), ...]}
"""
objects = self._get_graph(reference, current_path, remote, options, settings, filename,
update, check_updates, None, scopes, package_settings, None, None)
(builder, deps_graph, project_reference, registry, _, remote_proxy, _) = objects
if build_order:
result = deps_graph.build_order(build_order)
self._user_io.out.info(", ".join(str(s) for s in result))
return
if build_mode is not False: # sim_install is a policy or list of names (same as install build param)
installer = ConanInstaller(self._client_cache, self._user_io, remote_proxy)
nodes = installer.nodes_to_build(deps_graph, build_mode)
self._user_io.out.info(", ".join(str(ref) for ref, _ in nodes))
return
if check_updates:
graph_updates_info = builder.get_graph_updates_info(deps_graph)
else:
graph_updates_info = {}
Printer(self._user_io.out).print_info(deps_graph, project_reference,
info, registry, graph_updates_info,
remote)
def read_profile(self, profile_name, cwd):
if not profile_name:
return None
if os.path.isabs(profile_name):
profile_path = profile_name
folder = os.path.dirname(profile_name)
elif profile_name.startswith("."): # relative path name
profile_path = os.path.abspath(os.path.join(cwd, profile_name))
folder = os.path.dirname(profile_path)
else:
folder = self._client_cache.profiles_path
profile_path = self._client_cache.profile_path(profile_name)
try:
text = load(profile_path)
except Exception:
if os.path.exists(folder):
profiles = [name for name in os.listdir(folder) if not os.path.isdir(name)]
else:
profiles = []
current_profiles = ", ".join(profiles) or "[]"
raise ConanException("Specified profile '%s' doesn't exist.\nExisting profiles: "
"%s" % (profile_name, current_profiles))
try:
return Profile.loads(text)
except ConanException as exc:
raise ConanException("Error reading '%s' profile: %s" % (profile_name, exc))
def install(self, reference, current_path, remote=None, options=None, settings=None,
build_mode=False, filename=None, update=False, check_updates=False,
manifest_folder=None, manifest_verify=False, manifest_interactive=False,
scopes=None, generators=None, profile_name=None, package_settings=None,
env=None, package_env=None, no_imports=False):
""" Fetch and build all dependencies for the given reference
@param reference: ConanFileReference or path to user space conanfile
@param current_path: where the output files will be saved
@param remote: install only from that remote
@param options: list of tuples: [(optionname, optionvalue), (optionname, optionvalue)...]
@param settings: list of tuples: [(settingname, settingvalue), (settingname, value)...]
@param package_settings: dict name=> settings: {"zlib": [(settingname, settingvalue), ...]}
@param profile: name of the profile to use
@param env: list of tuples for environment vars: [(var, value), (var2, value2)...]
@param package_env: package dict of list of tuples: {"package_name": [(var, value), (var2, value2)...]}
"""
generators = generators or []
if manifest_folder:
manifest_manager = ManifestManager(manifest_folder, user_io=self._user_io,
client_cache=self._client_cache,
verify=manifest_verify,
interactive=manifest_interactive)
else:
manifest_manager = None
profile = self.read_profile(profile_name, current_path)
# Mix Settings, Env vars and scopes between profile and command line
if profile:
profile.update_settings(settings)
profile.update_package_settings(package_settings)
settings = profile.settings
package_settings = profile.package_settings
profile.update_env(env)
profile.update_packages_env(package_env)
env = profile.env
package_env = profile.package_env
profile.update_scopes(scopes)
scopes = profile.scopes
objects = self._get_graph(reference, current_path, remote, options, settings, filename,
update, check_updates, manifest_manager, scopes, package_settings,
env, package_env)
(_, deps_graph, _, registry, conanfile, remote_proxy, loader) = objects
Printer(self._user_io.out).print_graph(deps_graph, registry)
# Warn if os doesn't match
try:
if detected_os() != loader._settings.os:
message = '''You are building this package with settings.os='%s' on a '%s' system.
If this is your intention, you can ignore this message.
If not:
- Check the passed settings (-s)
- Check your global settings in ~/.conan/conan.conf
- Remove conaninfo.txt to avoid bad cached settings
''' % (loader._settings.os, detected_os())
self._user_io.out.warn(message)
except ConanException: # Setting os doesn't exist
pass
installer = ConanInstaller(self._client_cache, self._user_io, remote_proxy)
installer.install(deps_graph, build_mode)
prefix = "PROJECT" if not isinstance(reference, ConanFileReference) else str(reference)
output = ScopedOutput(prefix, self._user_io.out)
# Write generators
tmp = list(conanfile.generators) # Add the command line specified generators
tmp.extend(generators)
conanfile.generators = tmp
write_generators(conanfile, current_path, output)
if not isinstance(reference, ConanFileReference):
content = normalize(conanfile.info.dumps())
save(os.path.join(current_path, CONANINFO), content)
output.info("Generated %s" % CONANINFO)
if not no_imports:
run_imports(conanfile, current_path, output)
if manifest_manager:
manifest_manager.print_log()
def _load_info_file(self, current_path, conanfile, output, info_file, error=False):
if info_file == BUILD_INFO:
class_, attr, gen = DepsCppInfo, "deps_cpp_info", "txt"
else:
class_, attr, gen = DepsEnvInfo, "deps_env_info", "env"
info_file_path = os.path.join(current_path, info_file)
try:
deps_info = class_.loads(load(info_file_path))
setattr(conanfile, attr, deps_info)
except IOError:
error_msg = ("%s file not found in %s\nIt is %s for this command\n"
"You can generate it using 'conan install -g %s'"
% (info_file, current_path, "required" if error else "recommended", gen))
if not error:
output.warn(error_msg)
else:
raise ConanException(error_msg)
except ConanException:
raise ConanException("Parse error in '%s' file in %s" % (info_file, current_path))
def _load_deps_info(self, current_path, conanfile, output, load_env=True, error=False):
self._load_info_file(current_path, conanfile, output, BUILD_INFO, error=error)
if load_env:
self._load_info_file(current_path, conanfile, output, CONANENV, error=error)
def source(self, current_path, reference, force):
if not isinstance(reference, ConanFileReference):
output = ScopedOutput("PROJECT", self._user_io.out)
conan_file_path = os.path.join(reference, CONANFILE)
conanfile = self._loader().load_conan(conan_file_path, output, consumer=True)
self._load_deps_info(current_path, conanfile, output)
export_folder = reference
config_source_local(export_folder, current_path, conanfile, output)
else:
output = ScopedOutput(str(reference), self._user_io.out)
conan_file_path = self._client_cache.conanfile(reference)
conanfile = self._loader().load_conan(conan_file_path, output, reference=reference)
self._load_deps_info(current_path, conanfile, output)
src_folder = self._client_cache.source(reference, conanfile.short_paths)
export_folder = self._client_cache.export(reference)
config_source(export_folder, src_folder, conanfile, output, force)
def imports_undo(self, current_path):
undo_imports(current_path, self._user_io.out)
def imports(self, current_path, reference, conan_file_path, dest_folder):
if not isinstance(reference, ConanFileReference):
output = ScopedOutput("PROJECT", self._user_io.out)
if not conan_file_path:
conan_file_path = os.path.join(reference, CONANFILE)
if not os.path.exists(conan_file_path):
conan_file_path = os.path.join(reference, CONANFILE_TXT)
if conan_file_path.endswith(".txt"):
conanfile = self._loader().load_conan_txt(conan_file_path, output)
else:
conanfile = self._loader().load_conan(conan_file_path, output, consumer=True)
else:
output = ScopedOutput(str(reference), self._user_io.out)
conan_file_path = self._client_cache.conanfile(reference)
conanfile = self._loader().load_conan(conan_file_path, output, reference=reference)
self._load_deps_info(current_path, conanfile, output, load_env=False, error=True)
run_imports(conanfile, dest_folder or current_path, output)
def local_package(self, current_path, build_folder):
if current_path == build_folder:
raise ConanException("Cannot 'conan package' to the build folder. "
"Please move to another folder and try again")
output = ScopedOutput("PROJECT", self._user_io.out)
conan_file_path = os.path.join(build_folder, CONANFILE)
conanfile = self._loader().load_conan(conan_file_path, output, consumer=True)
self._load_deps_info(build_folder, conanfile, output)
packager.create_package(conanfile, build_folder, current_path, output, local=True)
def package(self, reference, package_id):
# Package paths
conan_file_path = self._client_cache.conanfile(reference)
if not os.path.exists(conan_file_path):
raise ConanException("Package recipe '%s' does not exist" % str(reference))
if not package_id:
packages = [PackageReference(reference, packid)
for packid in self._client_cache.conan_builds(reference)]
if not packages:
raise NotFoundException("%s: Package recipe has not been built locally\n"
"Please read the 'conan package' command help\n"
"Use 'conan install' or 'conan test_package' to build and "
"create binaries" % str(reference))
else:
packages = [PackageReference(reference, package_id)]
for package_reference in packages:
build_folder = self._client_cache.build(package_reference, short_paths=None)
if not os.path.exists(build_folder):
raise NotFoundException("%s: Package binary '%s' folder doesn't exist\n"
"Please read the 'conan package' command help\n"
"Use 'conan install' or 'conan test_package' to build and "
"create binaries"
% (str(reference), package_reference.package_id))
# The package already exist, we can use short_paths if they were defined
package_folder = self._client_cache.package(package_reference, short_paths=None)
# Will read current conaninfo with specified options and load conanfile with them
output = ScopedOutput(str(reference), self._user_io.out)
output.info("Re-packaging %s" % package_reference.package_id)
loader = self._loader(build_folder)
conanfile = loader.load_conan(conan_file_path, self._user_io.out,
reference=package_reference.conan)
self._load_deps_info(build_folder, conanfile, output)
rmdir(package_folder)
packager.create_package(conanfile, build_folder, package_folder, output)
def build(self, conanfile_path, current_path, test=False, filename=None, profile_name=None,
env=None, package_env=None):
""" Call to build() method saved on the conanfile.py
param conanfile_path: the original source directory of the user containing a
conanfile.py
"""
logger.debug("Building in %s" % current_path)
logger.debug("Conanfile in %s" % conanfile_path)
if filename and filename.endswith(".txt"):
raise ConanException("A conanfile.py is needed to call 'conan build'")
conanfile_file = os.path.join(conanfile_path, filename or CONANFILE)
try:
# Append env_vars to execution environment and clear when block code ends
profile = self.read_profile(profile_name, current_path)
output = ScopedOutput("Project", self._user_io.out)
if profile:
profile.update_env(env)
profile.update_packages_env(package_env)
env = profile.env
package_env = profile.package_env
env = profile.env if profile else None
package_env = profile.package_env if profile else None
loader = self._loader(current_path, env=env, package_env=package_env)
conan_file = loader.load_conan(conanfile_file, output, consumer=True)
except NotFoundException:
# TODO: Auto generate conanfile from requirements file
raise ConanException("'%s' file is needed for build.\n"
"Create a '%s' and move manually the "
"requirements and generators from '%s' file"
% (CONANFILE, CONANFILE, CONANFILE_TXT))
try:
self._load_deps_info(current_path, conan_file, output)
os.chdir(current_path)
conan_file._conanfile_directory = conanfile_path
with environment_append(conan_file.env):
conan_file.build()
if test:
conan_file.test()
except ConanException:
raise # Raise but not let to reach the Exception except (not print traceback)
except Exception:
import traceback
trace = traceback.format_exc().split('\n')
raise ConanException("Unable to build it successfully\n%s" % '\n'.join(trace[3:]))
def upload(self, conan_reference, package_id=None, remote=None, all_packages=None,
force=False):
t1 = time.time()
remote_proxy = ConanProxy(self._client_cache, self._user_io, self._remote_manager, remote)
uploader = ConanUploader(self._client_cache, self._user_io, remote_proxy)
# Load conanfile to check if the build policy is set to always
try:
conanfile_path = self._client_cache.conanfile(conan_reference)
conan_file = self._loader().load_class(conanfile_path)
except NotFoundException:
raise NotFoundException("There is no local conanfile exported as %s"
% str(conan_reference))
# Can't use build_policy_always here because it's not loaded (only load_class)
if conan_file.build_policy == "always" and (all_packages or package_id):
raise ConanException("Conanfile has build_policy='always', "
"no packages can be uploaded")
if package_id: # Upload package
uploader.upload_package(PackageReference(conan_reference, package_id))
else: # Upload conans
uploader.upload_conan(conan_reference, all_packages=all_packages, force=force)
logger.debug("====> Time manager upload: %f" % (time.time() - t1))
def search(self, pattern_or_reference=None, remote=None, ignorecase=True, packages_query=None):
""" Print the single information saved in conan.vars about all the packages
or the packages which match with a pattern
Attributes:
pattern = string to match packages
remote = search on another origin to get packages info
packages_pattern = String query with binary
packages properties: "arch=x86 AND os=Windows"
"""
printer = Printer(self._user_io.out)
if remote:
remote_proxy = ConanProxy(self._client_cache, self._user_io, self._remote_manager,
remote)
adapter = remote_proxy
else:
adapter = self._search_manager
if isinstance(pattern_or_reference, ConanFileReference):
packages_props = adapter.search_packages(pattern_or_reference, packages_query)
ordered_packages = OrderedDict(sorted(packages_props.items()))
try:
recipe_hash = self._client_cache.load_manifest(pattern_or_reference).summary_hash
except IOError: # It could not exist in local
recipe_hash = None
printer.print_search_packages(ordered_packages, pattern_or_reference,
recipe_hash, packages_query)
else:
references = adapter.search(pattern_or_reference, ignorecase)
printer.print_search_recipes(references, pattern_or_reference)
def copy(self, reference, package_ids, username, channel, force=False):
""" Copy or move conanfile (exported) and packages to another user and or channel
@param reference: ConanFileReference containing the packages to be moved
@param package_ids: list of ids or [] for all list
@param username: Destination username
@param channel: Destination channel
@param remote: install only from that remote
"""
output = ScopedOutput(str(reference), self._user_io.out)
conan_file_path = self._client_cache.conanfile(reference)
conanfile = self._loader().load_conan(conan_file_path, output)
copier = PackageCopier(self._client_cache, self._user_io, conanfile.short_paths)
if not package_ids:
packages = self._client_cache.packages(reference)
if os.path.exists(packages):
package_ids = os.listdir(packages)
else:
package_ids = []
copier.copy(reference, package_ids, username, channel, force)
def remove(self, pattern, src=False, build_ids=None, package_ids_filter=None, force=False,
remote=None):
""" Remove conans and/or packages
@param pattern: string to match packages
@param package_ids: list of ids or [] for all list
@param remote: search on another origin to get packages info
@param force: if True, it will be deleted without requesting anything
"""
remote_proxy = ConanProxy(self._client_cache, self._user_io, self._remote_manager, remote)
remover = ConanRemover(self._client_cache, self._search_manager, self._user_io,
remote_proxy)
remover.remove(pattern, src, build_ids, package_ids_filter, force=force)
def user(self, remote=None, name=None, password=None):
remote_proxy = ConanProxy(self._client_cache, self._user_io, self._remote_manager, remote)
return remote_proxy.authenticate(name, password)
|
|
# -*- coding:utf-8 -*-
import os
import shutil
import unittest
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from requests.exceptions import ConnectionError
from .crawler import Crawler
from .exceptions import CrawlerError
from .proxies import ProxyPool
class TestAll(unittest.TestCase):
def setUp(self):
self.urls = {
'SIMPLE_HTML': 'https://httpbin.org/html',
'SIMPLE_TABLE': 'https://www.w3schools.com/html/html_tables.asp',
'USER_AGENT': 'https://httpbin.org/user-agent',
'POST': 'https://httpbin.org/post',
'COMPLEX_HTML': 'https://www.nytimes.com/',
'FORM': 'https://httpbin.org/forms/post',
'FORM2': 'https://eoryginalne.pl/kontakt',
'GOOGLE': 'https://www.google.pl/',
'LINKS': 'https://httpbin.org/links/10/0',
'ROBOTS': 'https://httpbin.org/robots.txt',
'IMAGE': 'https://httpbin.org/image/png',
'FILE_UPLOAD': 'http://cgi-lib.berkeley.edu/ex/fup.html',
'XML': 'https://httpbin.org/xml',
'REDIRECT': 'https://httpbin.org/redirect/1',
'REDIRECT_2_TIMES': 'https://httpbin.org/redirect/2',
'COOKIES': 'https://httpbin.org/cookies',
'BASIC_AUTH': 'https://httpbin.org/basic-auth/user/passwd',
'GZIP': 'https://httpbin.org/gzip',
'REST': 'https://jsonplaceholder.typicode.com/posts',
'SCRAPING_QUOTES': 'http://quotes.toscrape.com/',
'SCRAPING_BOOKSTORE': 'http://books.toscrape.com/',
'GAZETA': 'http://www.gazeta.pl/0,0.html',
'XKCD': 'https://xkcd.com/',
'PYTHON': 'https://www.python.org/',
'W3': 'https://www.w3schools.com/'
}
self.urls_list = [
"https://www.nytimes.com/",
"http://www.alexa.com/",
"https://www.mozilla.org/en-US/",
"https://www.amazon.com/",
"https://www.walmart.com/",
"http://www.biedronka.pl/pl",
"https://pl.aliexpress.com/",
"https://allegro.pl/",
"https://httpbin.org/html",
"https://github.com/"
]
self.test_dir = os.path.join(os.getcwd(), 'test')
self.upload_file = os.path.join(self.test_dir, 'upload.txt')
os.makedirs(self.test_dir, exist_ok=True)
with open(self.upload_file, 'wb') as f:
f.write(b"If the road is easy, you're likely going the wrong way..")
def tearDown(self):
shutil.rmtree(self.test_dir, ignore_errors=True)
def test_simple_http_page(self):
c = Crawler()
response = c.open(self.urls['SIMPLE_HTML'])
self.assertEqual(response.status_code, 200)
def test_find_links(self):
c = Crawler()
c.open(self.urls['GAZETA'])
links = c.links(filters={
'id': 'LinkArea:MT'
}, match='EQUAL'
)
self.assertTrue(links)
def test_parse_form(self):
c = Crawler()
response = c.open(self.urls['FORM2'])
self.assertEqual(response.status_code, 200)
forms = c.forms(filters={'id': 'searchbox'})
search_form = forms[0]
search_form.fields = {
'search_query': 'cute kittens'
}
self.assertEqual(search_form.fields['search_query'].get('value'), 'cute kittens')
def test_submit_form(self):
c = Crawler()
response = c.open(self.urls['FORM'])
self.assertEqual(response.status_code, 200)
forms = c.forms()
form = forms[0]
form.fields = {
'custname': 'aaa',
'delivery': '',
'custemail': 'test@email.com',
'comments': '',
'size': 'medium',
'topping': ['bacon', 'cheese'],
'custtel': '+48606505888'
}
c.submit(form, data={'extra_value': "I am your father."})
success = c.submit_check(
form,
phrase="I am your father.",
url=self.urls['POST'],
status_codes=[200]
)
self.assertEqual(success, True)
def test_response_history(self):
c = Crawler()
response = c.open(self.urls['REDIRECT_2_TIMES'])
self.assertEqual(len(response.history), 2)
def test_crawler_history(self):
c = Crawler()
c.open(self.urls['SIMPLE_HTML'])
c.open(self.urls['LINKS'])
history = c.history()
self.assertEqual(history[0].url, self.urls['SIMPLE_HTML'])
self.assertEqual(history[1].url, self.urls['LINKS'])
def test_crawler_flow(self):
c = Crawler()
c.open(self.urls['SIMPLE_HTML'])
c.open(self.urls['LINKS'])
self.assertEqual(len(c.flow()), 2)
def test_crawler_xpath(self):
c = Crawler()
c.open(self.urls['SIMPLE_HTML'])
p_text = c.xpath('//p/text()')
self.assertGreaterEqual(len(p_text), 1)
def test_crawler_xpath_filter(self):
c = Crawler()
c.open(self.urls['W3'])
filtered_results = c.xpath('//p').filter(filters={'class': 'w3-xlarge'})
self.assertEqual(filtered_results[0]['class'], 'w3-xlarge')
def test_crawler_css(self):
c = Crawler()
c.open(self.urls['SIMPLE_HTML'])
p_text = c.css('div')
self.assertGreaterEqual(len(p_text), 1)
def test_crawler_scraper_methods(self):
c = Crawler()
c.open(self.urls['SIMPLE_TABLE'])
self.assertTrue(c.tables())
self.assertTrue(c.title())
self.assertTrue(c.images())
def test_crawler_back_forward_navigation(self):
c = Crawler()
c.open(self.urls['SCRAPING_QUOTES'])
tags_links = c.links(filters={'class': 'tag'})
for link in tags_links:
c.follow(link)
history = c.history()
c.back()
self.assertEqual(c.get_url(), history[-2].url)
c.back()
self.assertEqual(c.get_url(), history[-3].url)
c.forward()
self.assertEqual(c.get_url(), history[-2].url)
self.assertRaises(CrawlerError, c.forward, step=5)
def test_crawler_clear_flow(self):
c = Crawler()
c.open(self.urls['SCRAPING_QUOTES'])
tags_links = c.links(filters={'class': 'tag'})
for link in tags_links:
c.follow(link)
self.assertTrue(c.history())
c.clear()
self.assertFalse(c.history())
def test_crawler_cookies_handling(self):
c = Crawler()
c.open(self.urls['COOKIES'], cookies={
'cookie_1': '1000101000101010',
'cookie_2': 'ABABHDBSBAJSLLWO',
})
response = c.response().json()
self.assertIn('cookie_1', response.get('cookies'), {})
def test_crawler_useragent_set(self):
c = Crawler()
c.useragent = "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
json_response = c.open(self.urls['USER_AGENT']).json()
self.assertEqual(c.useragent, json_response['user-agent'])
def test_crawler_overridden_useragent(self):
c = Crawler()
c.useragent = "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
one_time_useragent = 'Godzilla'
json_response = c.open(
self.urls['USER_AGENT'],
headers={'user-agent': one_time_useragent}
).json()
self.assertEqual(one_time_useragent, json_response['user-agent'])
def test_download_file(self):
c = Crawler()
file_name = 'test.png'
c.download(
self.test_dir,
self.urls['IMAGE'],
name=file_name
)
self.assertTrue(os.path.isfile(os.path.join(self.test_dir, file_name)))
def test_download_images_list(self):
""" Download list of images in parallel.
"""
c = Crawler()
c.open(self.urls['XKCD'])
full_images_urls = [c.join_url(src) for src in c.images()]
downloaded_files = c.download_files(self.test_dir, files=full_images_urls)
self.assertEqual(len(full_images_urls), len(downloaded_files))
def test_form_file_upload(self):
"""Uploading a file through """
c = Crawler()
c.open(self.urls['FILE_UPLOAD'])
forms = c.forms()
upload_form = forms[0]
upload_form.fields = {
'note': 'Towel cat picture',
'upfile': open(self.upload_file, 'r')
}
c.submit(upload_form, action='http://cgi-lib.berkeley.edu/ex/fup.cgi')
success = c.submit_check(
upload_form,
phrase="road is easy",
status_codes=[200]
)
self.assertTrue(success)
def test_proxy_pool(self):
proxies = [
"117.143.109.159:80",
"117.143.109.163:80",
"162.243.108.161:3128",
"195.14.242.39:80",
"202.57.129.228:8080",
"212.124.171.144:80",
"216.249.79.140:21320",
"220.130.34.177:80"
"52.10.247.166:80",
"77.51.16.170:80",
]
proxy_pool = ProxyPool()
proxy_pool.load_proxies(proxies, test=False)
working = list(proxy_pool.working())
proxy_pool_len = len(proxy_pool)
self.assertEqual(len(proxies), proxy_pool_len)
self.assertEqual(len(working), proxy_pool_len)
def test_run_crawler_in_threads(self):
c = Crawler()
with ThreadPoolExecutor(max_workers=4) as executor:
results = executor.map(c.open, self.urls_list)
self.assertEqual(len(list(results)), 10)
def test_run_crawler_in_processes(self):
c = Crawler()
with ProcessPoolExecutor(max_workers=4) as executor:
results = executor.map(c.open, self.urls_list)
self.assertEqual(len(list(results)), 10)
def test_run_crawler_in_threads_download_images(self):
def open_and_download(url):
response = c.open(url)
full_images_urls = [c.join_url(src) for src in c.images()]
downloaded_files = c.download_files(self.test_dir, files=full_images_urls)
self.assertEqual(len(full_images_urls), len(downloaded_files))
return response
c = Crawler()
with ThreadPoolExecutor(max_workers=4) as executor:
results = executor.map(open_and_download, self.urls_list)
self.assertEqual(len(list(results)), 10)
def test_crawler_custom_submit(self):
c = Crawler()
data = {
'name': 'Luciano Ramalho',
'title': 'Fluent Python'
}
c.submit(
action=self.urls['POST'],
data=data
)
self.assertEqual(c.response().json().get('form'), data)
def test_crawler_open_retries(self):
c = Crawler()
c.max_retries = 3
c.logging = True
with self.assertRaises(ConnectionError):
c.open('http://www.delver.cg/404', data={'test': 'test data'})
self.assertEqual(c._retries, c.max_retries)
def test_crawler_random_timeout(self):
urls = [
'https://httpbin.org/html',
'https://www.w3schools.com/html/html_tables.asp',
'https://httpbin.org/user-agent'
]
c = Crawler()
c.random_timeout = (0, 5)
c.logging = True
for url in urls:
c.open(url)
if __name__ == '__main__':
unittest.main()
|
|
from __future__ import unicode_literals
import json
from pprint import pprint
from cloudmesh_client.cloud.hpc.BatchProvider import BatchProvider
from cloudmesh_client.comet.cluster import Cluster
from cloudmesh_client.comet.comet import Comet
from cloudmesh_client.common.hostlist import Parameter
from django.shortcuts import render, redirect
from sqlalchemy.orm import sessionmaker
from .charts import Chart
# noinspection PyPep8Naming
def Session():
from aldjemy.core import get_engine
engine = get_engine()
_Session = sessionmaker(bind=engine)
return _Session()
session = Session()
def dict_table(request, **kwargs):
context = kwargs
pprint(context)
return render(request, 'cloudmesh_portal_comet/dict_table.jinja', context)
def index(request):
return comet_ll(request)
def comet_dict_table(request, **kwargs):
context = kwargs
# pprint(context)
return render(request, 'cloudmesh_portal_comet/comet_dict_table.jinja', context)
def comet_logon(request):
c = None
try:
c = Comet.logon()
print("LOGON OK")
return render(request,
'cloudmesh_portal_comet/logon_error.jinja')
except:
return c
def comet_status(request):
# noinspection PyUnusedLocal
c = comet_logon(request)
data = json.loads(Cluster.simple_list(format="json"))
# pprint(data)
# print (type(data))
clusters = []
for key in data:
total = data[key]["nodes"]
name = data[key]["name"]
if name == "comet-fe1":
name = "free"
cluster = {
"name": name,
"total": total,
"status": {
'active': 0,
'nostate': 0,
'down': 0,
'pending': 0,
'unkown': total
}
}
clusters.append(cluster)
details = json.loads(Cluster.list(format="json"))
counter = {}
for node in list(details.values()):
clustername = node["cluster"]
if clustername is not None:
if clustername not in counter:
counter[clustername] = {
'name': None,
'total': 0,
'status': {
'unkown': 0,
'active': 0,
'down': 0,
'pending': 0,
'nostate': 0,
'nostate-error': 0
}
}
# print (counter)
for key, node in list(details.items()):
if node['kind'] == 'compute':
name = node['cluster']
state = node['state']
if state in [None, 'None']:
state = 'unkown'
# print ("SSSSSSS", state, name, node['kind'])
counter[name]['status'][state] += 1
counter[name]['total'] += 1
counter[name]['name'] = name
pprint(counter)
#
# delete the free nodes for now
#
if 'comet-fe1' in counter:
for count in counter:
if count != "comet-fe1":
counter['comet-fe1']['total'] = \
counter['comet-fe1']['total'] - counter[count]['total']
counter['comet-fe1']['name'] = 'free'
counter_list = []
for key, cluster in list(counter.items()):
counter_list.append(cluster)
# context["clusters"] = counter_list
Chart.cluster_overview_pie(counter_list, filename='pie.svg')
#
# delete the overall count
#
if 'comet-fe1' in counter:
del counter['comet-fe1']
counter_list = []
for key, cluster in list(counter.items()):
counter_list.append(cluster)
Chart.cluster_overview_pie_vector(counter_list, filename='pie_vector.svg')
Chart.cluster_overview_radar(counter_list, filename='radar.svg')
context = {
'pid': str(Comet.find_tunnel()),
'tunnel': str(Comet.is_tunnel()),
'title': "Comet Status"
}
return render(request,
'cloudmesh_portal_comet/status.jinja',
context)
def comet_ll(request):
# noinspection PyUnusedLocal
c = comet_logon(request)
data = json.loads(Cluster.simple_list(format="json"))
pprint(data)
# data["terminal"] = Parameter.expand(data.keys())
for entry in data:
nodes = Parameter.expand(data[entry]["computes"])
nodes_linked = ["<a href=\"console/{}/{}\">{}</a>".format(data[entry]['name'], node, node) for node in nodes]
data[entry]["terminal"] = '<br>'.join(nodes_linked)
# pprint(type(data), data)
order = [
"name",
"project",
"nodes",
"computes",
"terminal",
"frontend name",
"frontend state",
"frontend type",
"description",
]
header = [
"Name",
"Project",
"Count",
"Nodes",
"Terminal",
"Frontend (Fe)",
"State (Fe)",
"Type (Fe)",
"Description",
]
return comet_dict_table(request, title="Comet List", data=data, header=header, order=order)
def comet_list(request):
# noinspection PyUnusedLocal
c = comet_logon(request)
data = json.loads(Cluster.list(format="json"))
dictionary = {}
for item in list(data.values()):
dictionary[item["name"]] = item
order = [
"name",
"state",
"kind",
"type",
"mac",
"ip",
"cpus",
"cluster",
"memory",
]
return comet_dict_table(request, title="Comet List", data=dictionary, order=order)
def comet_list_queue(request):
cluster = "comet"
output_format = "json"
order = [
"jobid",
"user",
"partition",
"nodes",
"st",
"name",
"nodelist",
"time",
]
provider = BatchProvider(cluster)
data = json.loads(provider.queue(cluster, format=output_format))
print (data)
return dict_table(request, title="Comet Queue", data=data, order=order)
def comet_info(request):
cluster = "comet"
output_format = "json"
order = [
'partition',
'nodes',
'state',
'avail',
'timelimit',
'cluster',
'nodelist',
# 'updated',
]
provider = BatchProvider(cluster)
data = json.loads(provider.info(cluster, format=output_format))
print (data)
return dict_table(request, title="Comet Queue", data=data, order=order)
def comet_console(request, cluster, node=None):
# noinspection PyUnusedLocal
c = comet_logon(request)
context = {"title": "Comet Virtual Cluster Console",
"cluster": cluster,
"node": node or "FE"}
url = Comet.console_url(cluster, node)
# print (url)
context["url"] = url
return render(request,
'cloudmesh_portal_comet/console.jinja',
context)
def comet_power(request, action, cluster, node=None):
# noinspection PyUnusedLocal
c = comet_logon(request)
# dispatching action and parameters
if not node:
subject = 'FE'
else:
try:
node = int(node)
subject = "COMPUTESET"
node = str(node)
except ValueError:
if '[' in node and ']' in node:
subject = "HOSTS"
else:
subject = "HOST"
Cluster.power(cluster, subject, node, action)
print (request.META.get('HTTP_REFERER'))
return redirect(request.META.get('HTTP_REFERER'))
|
|
# These surface visualization routines are based mainly on work of Julia Huntenburg and Sabine Oligschlaeger.
def plot_surf_stat_map(coords, faces, stat_map=None,
elev=0, azim=0,
cmap='jet',
threshold=None, bg_map=None,
bg_on_stat=False,
alpha='auto',
vmin=None, vmax=None,
cbar='sequential', # or'diverging'
symmetric_cbar="auto",
figsize=None,
labels=None, label_col=None, label_cpal=None,
mask=None, mask_lenient=None,
**kwargs):
'''
https://github.com/juhuntenburg/nilearn/tree/enh/surface_plotting
Helper function for symmetric colormap is copied from nilearn.
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as tri
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
# load mesh and derive axes limits
faces = np.array(faces, dtype=int)
limits = [coords.min(), coords.max()]
# set alpha if in auto mode
if alpha == 'auto':
if bg_map is None:
alpha = .5
else:
alpha = 1
# if cmap is given as string, translate to matplotlib cmap
if type(cmap) == str:
cmap = plt.cm.get_cmap(cmap)
# initiate figure and 3d axes
if figsize is not None:
fig = plt.figure(figsize=figsize, frameon=False)
else:
fig = plt.figure(frameon=False)
ax = fig.add_subplot(111, projection='3d', xlim=limits, ylim=limits)
ax.view_init(elev=elev, azim=azim)
ax.set_axis_off()
# plot mesh without data
p3dcollec = ax.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2],
triangles=faces, linewidth=0.,
antialiased=False,
color='white')
# where mask is indices of nodes to include:
if mask is not None:
cmask = np.zeros(len(coords))
cmask[mask] = 1
cutoff = 2 # include triangles in cortex only if ALL nodes in mask
if mask_lenient: # include triangles in cortex if ANY are in mask
cutoff = 0
fmask = np.where(cmask[faces].sum(axis=1) > cutoff)[0]
# If depth_map and/or stat_map are provided, map these onto the surface
# set_facecolors function of Poly3DCollection is used as passing the
# facecolors argument to plot_trisurf does not seem to work
if bg_map is not None or stat_map is not None:
face_colors = np.ones((faces.shape[0], 4))
face_colors[:, :3] = .5*face_colors[:, :3]
if bg_map is not None:
bg_data = bg_map
if bg_data.shape[0] != coords.shape[0]:
raise ValueError('The bg_map does not have the same number '
'of vertices as the mesh.')
bg_faces = np.mean(bg_data[faces], axis=1)
bg_faces = bg_faces - bg_faces.min()
bg_faces = bg_faces / bg_faces.max()
face_colors = plt.cm.gray_r(bg_faces)
# modify alpha values of background
face_colors[:, 3] = alpha*face_colors[:, 3]
if stat_map is not None:
stat_map_data = stat_map
stat_map_faces = np.mean(stat_map_data[faces], axis=1)
if cbar is 'diverging':
print cbar
# Call _get_plot_stat_map_params to derive symmetric vmin and vmax
# And colorbar limits depending on symmetric_cbar settings
cbar_vmin, cbar_vmax, vmin, vmax = \
_get_plot_stat_map_params(stat_map_faces, vmax,
symmetric_cbar, kwargs)
if cbar is 'sequential':
if vmin is None:
vmin = stat_map_data.min()
if vmax is None:
vmax = stat_map_data.max()
if threshold is not None:
kept_indices = np.where(abs(stat_map_faces) >= threshold)[0]
stat_map_faces = stat_map_faces - vmin
stat_map_faces = stat_map_faces / (vmax-vmin)
if bg_on_stat:
face_colors[kept_indices] = cmap(stat_map_faces[kept_indices]) * face_colors[kept_indices]
else:
face_colors[kept_indices] = cmap(stat_map_faces[kept_indices])
else:
stat_map_faces = stat_map_faces - vmin
stat_map_faces = stat_map_faces / (vmax-vmin)
if bg_on_stat:
if mask is not None:
face_colors[fmask] = cmap(stat_map_faces)[fmask] * face_colors[fmask]
else:
face_colors = cmap(stat_map_faces) * face_colors
else:
if mask is not None:
face_colors[fmask] = cmap(stat_map_faces)[fmask]
else:
face_colors = cmap(stat_map_faces)
if labels is not None:
'''
labels requires a tuple of label/s, each a list/array of node indices
----------------------------------------------------------------------
color palette for labels
if label_cpal is None, outlines will be black
if it's a color palette name, a different color for each label will be generated
if it's a list of rgb or color names, these will be used
valid color names from http://xkcd.com/color/rgb/
'''
if label_cpal is not None:
if label_col is not None:
raise ValueError("Don't use label_cpal and label_col together.")
if type(label_cpal) == str:
cpal = sns.color_palette(label_cpal, len(labels))
if type(label_cpal) == list:
if len(label_cpal) < len(labels):
raise ValueError('There are not enough colors in the color list.')
try:
cpal = sns.color_palette(label_cpal)
except:
cpal = sns.xkcd_palette(label_cpal)
for n_label, label in enumerate(labels):
for n_face, face in enumerate(faces):
count = len(set(face).intersection(set(label)))
if (count > 0) & (count < 3):
if label_cpal is None:
if label_col is not None:
face_colors[n_face,0:3] = sns.xkcd_palette([label_col])[0]
else:
face_colors[n_face,0:3] = sns.xkcd_palette(["black"])[0]
else:
face_colors[n_face,0:3] = cpal[n_label]
p3dcollec.set_facecolors(face_colors)
return fig
def _get_plot_stat_map_params(stat_map_data, vmax, symmetric_cbar, kwargs,
force_min_stat_map_value=None):
import numpy as np
""" Internal function for setting value limits for plot_stat_map and
plot_glass_brain.
The limits for the colormap will always be set to range from -vmax to vmax.
The limits for the colorbar depend on the symmetric_cbar argument, please
refer to docstring of plot_stat_map.
"""
# make sure that the color range is symmetrical
if vmax is None or symmetric_cbar in ['auto', False]:
# Avoid dealing with masked_array:
if hasattr(stat_map_data, '_mask'):
stat_map_data = np.asarray(
stat_map_data[np.logical_not(stat_map_data._mask)])
stat_map_max = np.nanmax(stat_map_data)
if force_min_stat_map_value == None:
stat_map_min = np.nanmin(stat_map_data)
else:
stat_map_min = force_min_stat_map_value
if symmetric_cbar == 'auto':
symmetric_cbar = stat_map_min < 0 and stat_map_max > 0
if vmax is None:
vmax = max(-stat_map_min, stat_map_max)
if 'vmin' in kwargs:
raise ValueError('this function does not accept a "vmin" '
'argument, as it uses a symmetrical range '
'defined via the vmax argument. To threshold '
'the map, use the "threshold" argument')
vmin = -vmax
if not symmetric_cbar:
negative_range = stat_map_max <= 0
positive_range = stat_map_min >= 0
if positive_range:
cbar_vmin = 0
cbar_vmax = None
elif negative_range:
cbar_vmax = 0
cbar_vmin = None
else:
cbar_vmin = stat_map_min
cbar_vmax = stat_map_max
else:
cbar_vmin, cbar_vmax = None, None
return cbar_vmin, cbar_vmax, vmin, vmax
def plot_surf_label(coords, faces,
labels=None,
elev=0, azim=0,
cpal='bright',
threshold=None,
bg_map=None,
bg_on_labels=False,
alpha='auto',
figsize=None,
**kwargs):
'''
- labels requires a tuple of label/s, each a list/array of node indices
- cpal takes either the name of a seaborn color palette or matplotlib color map,
or a list of rgb values or color names from http://xkcd.com/color/rgb/
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as tri
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
# load mesh and derive axes limits
faces = np.array(faces, dtype=int)
limits = [coords.min(), coords.max()]
# set alpha if in auto mode
if alpha == 'auto':
if bg_map is None:
alpha = .5
else:
alpha = 1
# if cap is given as string, translate to seaborn color palette
if type(cpal) == str:
cpal = sns.color_palette(cpal, len(labels))
if type(cpal) == list:
if len(cpal) < len(labels):
raise ValueError('There are not enough colors in the color list.')
try:
cpal = sns.color_palette(cpal)
except:
cpal = sns.xkcd_palette(cpal)
# initiate figure and 3d axes
if figsize is not None:
fig = plt.figure(figsize=figsize)
else:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d', xlim=limits, ylim=limits)
ax.view_init(elev=elev, azim=azim)
ax.set_axis_off()
# plot mesh without data
p3dcollec = ax.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2],
triangles=faces, linewidth=0.,
antialiased=False,
color='white')
if bg_map is not None or labels is not None:
face_colors = np.ones((faces.shape[0], 4))
face_colors[:, :3] = .5*face_colors[:, :3]
if bg_map is not None:
bg_data = bg_map
if bg_data.shape[0] != coords.shape[0]:
raise ValueError('The bg_map does not have the same number '
'of vertices as the mesh.')
bg_faces = np.mean(bg_data[faces], axis=1)
bg_faces = bg_faces - bg_faces.min()
bg_faces = bg_faces / bg_faces.max()
face_colors = plt.cm.gray_r(bg_faces)
# modify alpha values of background
face_colors[:, 3] = alpha*face_colors[:, 3]
# color the labels, either overriding or overlaying bg_map
if labels is not None:
for n_label,label in enumerate(labels):
for n_face, face in enumerate(faces):
count = len(set(face).intersection(set(label)))
if count > 1:
if bg_on_labels:
face_colors[n_face,0:3] = cpal[n_label] * face_colors[n_face,0:3]
else:
face_colors[n_face,0:3] = cpal[n_label]
p3dcollec.set_facecolors(face_colors)
return fig
def crop_img(fig, margin=False):
# takes fig, returns image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os
plt.tight_layout()
fig.savefig('./tempimage', bbox_inches='tight', orientation='landscape', pad_inches=-0.9, dpi=150)
plt.close(fig)
img = mpimg.imread('./tempimage.png')
os.remove('./tempimage.png')
'''
kept = {'rows':[], 'cols':[]}
for row in range(img.shape[0]):
if len(set(np.ndarray.flatten(img[row,:,:]))) > 1:
kept['rows'].append(row)
for col in range(img.shape[1]):
if len(set(np.ndarray.flatten(img[:,col,:]))) > 1:
kept['cols'].append(col)
if margin:
return img[min(kept['rows'])-margin:max(kept['rows'])+margin,
min(kept['cols'])-margin:max(kept['cols'])+margin]
else:
return img[kept['rows']][:,kept['cols']]
'''
return img
def create_fig(data=None, labels=None, label_col=None,
hemi=None, surf='pial',
sulc=True, alpha='auto',
cmap='jet', cpal='bright', cbar=False,
dmin=None, dmax=None,
mask=None, title=None):
import nibabel as nib, numpy as np
import matplotlib.pyplot as plt, matplotlib as mpl
from IPython.core.display import Image, display
import os
fsDir = '/afs/cbs.mpg.de/software/freesurfer/5.3.0/ubuntu-precise-amd64/subjects'
surf_f = '%s/fsaverage4/surf/%s.%s' % (fsDir, hemi, surf)
coords = nib.freesurfer.io.read_geometry(surf_f)[0]
faces = nib.freesurfer.io.read_geometry(surf_f)[1]
if sulc:
sulc_f = '%s/fsaverage4/surf/%s.sulc' % (fsDir, hemi)
sulc = nib.freesurfer.io.read_morph_data(sulc_f)
sulc_bool = True
else:
sulc = None
sulc_bool = False
# create images
imgs = []
for azim in [0, 180]:
if data is not None:
if dmin is None:
dmin = data[np.nonzero(data)].min()
if dmax is None:
dmax = data.max()
fig = plot_surf_stat_map(coords, faces, stat_map=data,
elev=0, azim=azim,
cmap=cmap,
bg_map=sulc,bg_on_stat=sulc_bool,
vmin=dmin, vmax=dmax,
labels=labels, label_col=label_col,
alpha=alpha,
mask=mask, mask_lenient=False)
#label_cpal=cpal)
else:
fig = plot_surf_label(coords, faces,
labels=labels,
elev=0, azim=azim,
bg_map=sulc,
cpal=cpal,
bg_on_labels=sulc_bool,
alpha=alpha)
# crop image
imgs.append((crop_img(fig, margin=5)),)
plt.close(fig)
# create figure with color bar
fig = plt.figure()
fig.set_size_inches(8, 4)
#ax1 = plt.subplot2grid((4,60), (0,0), colspan = 26, rowspan =4)
ax = plt.subplot2grid((4,60), (0,0), colspan = 26, rowspan =4)
plt.imshow(imgs[0])
#ax1.set_axis_off()
ax.set_axis_off()
#ax2 = plt.subplot2grid((4,60), (0,28), colspan = 26, rowspan =4)
ax = plt.subplot2grid((4,60), (0,28), colspan = 26, rowspan =4)
plt.imshow(imgs[1])
#ax2.set_axis_off()
ax.set_axis_off()
if cbar==True and data is not None:
cax = plt.subplot2grid((4,60), (1,59), colspan = 1, rowspan =2)
cmap = plt.cm.get_cmap(cmap)
norm = mpl.colors.Normalize(vmin=dmin, vmax=dmax)
cb = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm)
cb.set_ticks([dmin, dmax])
if title is not None:
fig.suptitle(title)
fig.savefig('./tempimage',dpi=150)
plt.close(fig)
display(Image(filename='./tempimage.png', width=800))
os.remove('./tempimage.png')
def create_fig_pdf(data=None, labels=None, label_col=None,
hemi=None, surf='pial',
sulc=True, alpha='auto',
cmap='jet', cpal='bright', cbar=False,
dmin=None, dmax=None,
mask=None, title=None):
import nibabel as nib, numpy as np
import matplotlib.pyplot as plt, matplotlib as mpl
from IPython.core.display import Image, display
import os
fsDir = '/afs/cbs.mpg.de/software/freesurfer/5.3.0/ubuntu-precise-amd64/subjects'
surf_f = '%s/fsaverage4/surf/%s.%s' % (fsDir, hemi, surf)
coords = nib.freesurfer.io.read_geometry(surf_f)[0]
faces = nib.freesurfer.io.read_geometry(surf_f)[1]
if sulc:
sulc_f = '%s/fsaverage4/surf/%s.sulc' % (fsDir, hemi)
sulc = nib.freesurfer.io.read_morph_data(sulc_f)
sulc_bool = True
else:
sulc = None
sulc_bool = False
# create images
imgs = []
for azim in [0, 180]:
if data is not None:
if dmin is None:
dmin = data[np.nonzero(data)].min()
if dmax is None:
dmax = data.max()
fig = plot_surf_stat_map(coords, faces, stat_map=data,
elev=0, azim=azim,
cmap=cmap,
bg_map=sulc,bg_on_stat=sulc_bool,
vmin=dmin, vmax=dmax,
labels=labels, label_col=label_col,
alpha=alpha,
mask=mask, mask_lenient=False)
#label_cpal=cpal)
else:
fig = plot_surf_label(coords, faces,
labels=labels,
elev=0, azim=azim,
bg_map=sulc,
cpal=cpal,
bg_on_labels=sulc_bool,
alpha=alpha)
# crop image
imgs.append((crop_img(fig, margin=15)),)
plt.close(fig)
# create figure with color bar
fig = plt.figure()
fig.set_size_inches(8, 4)
ax1 = plt.subplot2grid((4,60), (0,0), colspan = 26, rowspan =4)
plt.imshow(imgs[0])
ax1.set_axis_off()
ax2 = plt.subplot2grid((4,60), (0,28), colspan = 26, rowspan =4)
plt.imshow(imgs[1])
ax2.set_axis_off()
if cbar==True and data is not None:
cax = plt.subplot2grid((4,60), (1,59), colspan = 1, rowspan =2)
cmap = plt.cm.get_cmap(cmap)
norm = mpl.colors.Normalize(vmin=dmin, vmax=dmax)
cb = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm)
cb.set_ticks([dmin, dmax])
if title is not None:
fig.suptitle(title)
pdf.savefig()
plt.close(fig)
def create_fig_tojpeg(data=None, labels=None, label_col=None,
mesh = 'fsaverage4', mwall = False,
hemi=None, surf='pial',
sulc=True, alpha='auto',
cmap='jet', cpal='bright', cbar=False,
dmin=None, dmax=None,
mask=None, title=None, index = '', fname=None):
import nibabel as nib, numpy as np
import matplotlib.pyplot as plt, matplotlib as mpl
from IPython.core.display import Image, display
import os
coords = {'lh':None,'rh':None}
faces={'lh':None, 'rh':None}
sulc={'lh': None, 'rh':None}
fsDir = '/afs/cbs.mpg.de/software/freesurfer/5.3.0/ubuntu-precise-amd64/subjects'
surf_f_lh = '%s/%s/surf/lh.%s' % (fsDir, mesh, surf)
surf_f_rh = '%s/%s/surf/rh.%s' % (fsDir, mesh, surf)
coords['lh'] = nib.freesurfer.io.read_geometry(surf_f_lh)[0]
faces['lh'] = nib.freesurfer.io.read_geometry(surf_f_lh)[1]
coords['rh'] = nib.freesurfer.io.read_geometry(surf_f_rh)[0]
faces['rh'] = nib.freesurfer.io.read_geometry(surf_f_rh)[1]
nvph = coords['lh'].shape[0]
ind={'lh': range(nvph), 'rh': range(nvph,nvph*2)}
if mwall == False: # if the medial wall vertices are NOT present
lhcort = np.sort(nib.freesurfer.io.read_label('%s/%s/label/lh.cortex.label' % (fsDir, mesh)))
rhcort = np.sort(nib.freesurfer.io.read_label('%s/%s/label/rh.cortex.label' % (fsDir, mesh)))+nvph
cortex = np.hstack([lhcort,rhcort])
nsub = data.shape[0]-len(cortex)
subcortical = range(nvph*2, nvph*2+nsub)
vv = np.concatenate([cortex, subcortical])
data_new = np.zeros([nvph*2+nsub, data.shape[1]])
data_new[vv,:] = data
data = data_new
data2_new = np.zeros([nvph*2+nsub, data.shape[1]])
data2_new[vv,:] = data_realigned
data_realigned = data2_new
if sulc:
sulc_f_lh = '%s/%s/surf/lh.sulc' % (fsDir, mesh)
sulc_f_rh = '%s/%s/surf/rh.sulc' % (fsDir, mesh)
sulc['lh'] = nib.freesurfer.io.read_morph_data(sulc_f_lh)
sulc['rh'] = nib.freesurfer.io.read_morph_data(sulc_f_rh)
sulc_bool = True
else:
sulc = None
sulc_bool = False
if dmin is None:
dmin_calc = True
else:
dmin_calc = False
if dmax is None:
dmax_calc = True
else:
dmax_calc = False
# create images
imgs = []
for hemi in ['lh','rh']:
for azim in [0, 180]:
if data is not None:
if dmin_calc is True:
dmin = data[:,c].min()
dmins.append(dmin)
else:
dmins.append(dmin)
if dmax_calc is True:
dmax = data[:,c].max()
dmaxs.append(dmax)
else:
dmaxs.append(dmax)
fig = plot_surf_stat_map(coords[hemi], faces[hemi], stat_map=data[ind[hemi],c],
elev=0, azim=azim,
cmap=cmap,
bg_map=sulc[hemi], bg_on_stat=sulc_bool,
vmin=dmin, vmax=dmax,
labels=labels, label_col=label_col,
alpha=alpha,
mask=mask, mask_lenient=False)
#label_cpal=cpal)
else:
fig = plot_surf_label(coords[hemi], faces[hemi],
labels=labels,
elev=0, azim=azim,
bg_map=sulc[hemi],
cpal=cpal,
bg_on_labels=sulc_bool,
alpha=alpha)
# crop image
imgs.append((crop_img(fig, margin=15)),)
plt.close(fig)
# create figure with color bar
fig = plt.figure()
fig.set_size_inches(8, 8)
for i in range(len(imgs)):
row = int(np.floor(i/2))
col = np.mod(i,2)
comp = int(np.floor(i/4))
r = row*8
c = col*70 + 10
ax = plt.subplot2grid((nrows, ncols), (r,c), colspan = 52, rowspan = 8)
plt.imshow(imgs[i])
ax.set_axis_off()
#print "i = %d, comp %d, row %d, col %d" % (i, comp, row, col)
if i == (1 + comp*4) and cbar == True:
#print 'Yay!'
cax = plt.subplot2grid((nrows,ncols), (1+row*8,137), colspan = 2, rowspan = 14)
cmap = plt.cm.get_cmap(cmap)
norm = mpl.colors.Normalize(vmin=dmins[i], vmax=dmaxs[i])
cb = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm)
cb.set_ticks([dmins[i], dmaxs[i]])
ax = plt.subplot2grid((nrows, ncols), (row*8,0), colspan = 2, rowspan = 14)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
plt.axis('off')
plt.text(0, 0, "Diffusion map %d" % (comp+1), ha='center', va='bottom', rotation='vertical', size=18, alpha=.5)
if title is not None:
fig.suptitle(title, fontsize=20, alpha=0.8)
plt.savefig(fname + str(index) + ".png",dpi=300)
plt.close(fig)
def create_dense_fig(data=None, data_realigned=None,
mesh = 'fsaverage4', n_comps=3, mwall = False,
labels=None, label_col=None,
hemi=None, surf='inflated',
sulc=True, alpha='auto',
cmap='jet', cpal='bright', cbar=False,
dmin=None, dmax=None,
mask=None, title=None,
pdf=None, subj=None,
c1t="Column 1", c2t="Column 2"):
import nibabel as nib, numpy as np
import matplotlib.pyplot as plt, matplotlib as mpl
import matplotlib.gridspec as gridspec
from IPython.core.display import Image, display
import os
from matplotlib.backends.backend_pdf import PdfPages
coords = {'lh':None,'rh':None}
faces={'lh':None, 'rh':None}
sulc={'lh': None, 'rh':None}
fsDir = '/afs/cbs.mpg.de/software/freesurfer/5.3.0/ubuntu-precise-amd64/subjects'
surf_f_lh = '%s/%s/surf/lh.%s' % (fsDir, mesh, surf)
surf_f_rh = '%s/%s/surf/rh.%s' % (fsDir, mesh, surf)
coords['lh'] = nib.freesurfer.io.read_geometry(surf_f_lh)[0]
faces['lh'] = nib.freesurfer.io.read_geometry(surf_f_lh)[1]
coords['rh'] = nib.freesurfer.io.read_geometry(surf_f_rh)[0]
faces['rh'] = nib.freesurfer.io.read_geometry(surf_f_rh)[1]
nvph = coords['lh'].shape[0]
ind={'lh': range(nvph), 'rh': range(nvph,nvph*2)}
if mwall == False: # if the medial wall vertices are NOT present
lhcort = np.sort(nib.freesurfer.io.read_label('%s/%s/label/lh.cortex.label' % (fsDir, mesh)))
rhcort = np.sort(nib.freesurfer.io.read_label('%s/%s/label/rh.cortex.label' % (fsDir, mesh)))+nvph
cortex = np.hstack([lhcort,rhcort])
nsub = data.shape[0]-len(cortex)
subcortical = range(nvph*2, nvph*2+nsub)
vv = np.concatenate([cortex, subcortical])
data_new = np.zeros([nvph*2+nsub, data.shape[1]])
data_new[vv,:] = data
data = data_new
data2_new = np.zeros([nvph*2+nsub, data_realigned.shape[1]])
data2_new[vv,:] = data_realigned
data_realigned = data2_new
if sulc:
sulc_f_lh = '%s/%s/surf/lh.sulc' % (fsDir, mesh)
sulc_f_rh = '%s/%s/surf/rh.sulc' % (fsDir, mesh)
sulc['lh'] = nib.freesurfer.io.read_morph_data(sulc_f_lh)
sulc['rh'] = nib.freesurfer.io.read_morph_data(sulc_f_rh)
sulc_bool = True
else:
sulc = None
sulc_bool = False
if dmin is None:
dmin_calc = True
else:
dmin_calc = False
if dmax is None:
dmax_calc = True
else:
dmax_calc = False
# create images
imgs = []
dmins = []
dmaxs = []
for c in range(n_comps):
for hemi in ['lh','rh']:
for azim in [0, 180]:
if data is not None:
if dmin_calc is True:
dmin = data[:,c].min()
dmins.append(dmin)
else:
dmins.append(dmin)
if dmax_calc is True:
dmax = data[:,c].max()
dmaxs.append(dmax)
else:
dmaxs.append(dmax)
fig = plot_surf_stat_map(coords[hemi], faces[hemi], stat_map=data[ind[hemi],c],
elev=0, azim=azim,
cmap=cmap,
bg_map=sulc[hemi], bg_on_stat=sulc_bool,
vmin=dmin, vmax=dmax,
labels=labels, label_col=label_col,
alpha=alpha,
mask=mask, mask_lenient=False)
#label_cpal=cpal)
else:
fig = plot_surf_label(coords[hemi], faces[hemi],
labels=labels,
elev=0, azim=azim,
bg_map=sulc[hemi],
cpal=cpal,
bg_on_labels=sulc_bool,
alpha=alpha)
# crop image
imgs.append((crop_img(fig, margin=15)),)
plt.close(fig)
for azim in [0, 180]:
if data_realigned is not None:
if dmin_calc is True:
dmin = data_realigned[:,c].min()
dmins.append(dmin)
else:
dmins.append(dmin)
if dmax_calc is True:
dmax = data_realigned[:,c].max()
dmaxs.append(dmax)
else:
dmaxs.append(dmax)
fig = plot_surf_stat_map(coords[hemi], faces[hemi], stat_map=data_realigned[ind[hemi],c],
elev=0, azim=azim,
cmap=cmap,
bg_map=sulc[hemi], bg_on_stat=sulc_bool,
vmin=dmin, vmax=dmax,
labels=labels, label_col=label_col,
alpha=alpha,
mask=mask, mask_lenient=False)
#label_cpal=cpal)
else:
fig = plot_surf_label(coords[hemi], faces[hemi],
labels=labels,
elev=0, azim=azim,
bg_map=sulc[hemi],
cpal=cpal,
bg_on_labels=sulc_bool,
alpha=alpha)
# crop image
imgs.append((crop_img(fig, margin=15)),)
plt.close(fig)
# create figure with color bar
fig = plt.figure()
fig.set_size_inches(10, 3*n_comps)
nrows = 8*n_comps+2+15
ncols = 150
ax = plt.subplot2grid((nrows, ncols), (0,38), colspan = 20, rowspan =1)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
plt.axis('off')
plt.text(0, 0, c1t, ha='center', va='center', size=20, alpha=.5)
ax = plt.subplot2grid((nrows, ncols), (0,110), colspan = 20, rowspan =1)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
plt.axis('off')
plt.text(0, 0, c2t, ha='center', va='center', size=20, alpha=.5)
for i in range(len(imgs)):
row = int(np.floor(i/4))
col = np.mod(i,4)
comp = int(np.floor(i/8))
r = 2+row*4
c = col*30 + int(np.round((col+1)/4.))*10 + 10
ax = plt.subplot2grid((nrows, ncols), (r,c), colspan = 26, rowspan =4)
plt.imshow(imgs[i])
ax.set_axis_off()
#print "i = %d, comp %d, row %d, col %d" % (i, comp, row, col)
if i == (1 + comp*8) and cbar == True:
cax = plt.subplot2grid((nrows,ncols), (3+row*4,67), colspan = 2, rowspan = 6)
cmap = plt.cm.get_cmap(cmap)
if dmin_calc == True:
norm = mpl.colors.Normalize(vmin=dmins[i], vmax=dmaxs[i])
else:
norm = mpl.colors.Normalize(vmin=dmin, vmax=dmax)
cb = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm)
cb.set_ticks([dmins[i], dmaxs[i]])
ax = plt.subplot2grid((nrows, ncols), (3+row*4,0), colspan = 2, rowspan = 6)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
plt.axis('off')
plt.text(0, 0, "Map %d" % (comp+1), ha='center', va='bottom', rotation='vertical', size=18, alpha=.5)
if i == (3 + comp*8) and cbar == True:
cax = plt.subplot2grid((nrows,ncols), (3+row*4,137), colspan = 2, rowspan = 6)
cmap = plt.cm.get_cmap(cmap)
if dmin_calc == True:
norm = mpl.colors.Normalize(vmin=dmins[i], vmax=dmaxs[i])
else:
norm = mpl.colors.Normalize(vmin=dmin, vmax=dmax)
cb = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm)
cb.set_ticks([dmins[i], dmaxs[i]])
if subj is not None:
cormat = np.load("corrmats/3back/subject%d_3back.npy" % subj)
ax = plt.subplot2grid((nrows, ncols), (8*n_comps+3,0), colspan = 150, rowspan = 15)
cax = ax.matshow(cormat, cmap=cmap)
plt.colorbar(cax)
if title is not None:
fig.suptitle(title, fontsize=20, alpha=0.8)
#plt.tight_layout()
if pdf is not None:
#pp = PdfPages(fname)
pdf.savefig()
#pp.close()
plt.close(fig)
else:
fig.savefig('./tempimage',dpi=150)
plt.close(fig)
display(Image(filename='./tempimage.png', width=800))
os.remove('./tempimage.png')
def create_template_fig(data=None, n_comps=3,
mesh = 'fsaverage4', mwall = False,
labels=None, label_col=None,
hemi=None, surf='inflated',
sulc=True, alpha='auto',
cmap='jet', cpal='bright', cbar=False,
dmin=None, dmax=None,
mask=None, title=None,
pdf=None, subj=None):
import nibabel as nib, numpy as np
import matplotlib.pyplot as plt, matplotlib as mpl
import matplotlib.gridspec as gridspec
from IPython.core.display import Image, display
import os
from matplotlib.backends.backend_pdf import PdfPages
coords = {'lh':None,'rh':None}
faces={'lh':None, 'rh':None}
sulc={'lh': None, 'rh':None}
fsDir = '/afs/cbs.mpg.de/software/freesurfer/5.3.0/ubuntu-precise-amd64/subjects'
surf_f_lh = '%s/%s/surf/lh.%s' % (fsDir, mesh, surf)
surf_f_rh = '%s/%s/surf/rh.%s' % (fsDir, mesh, surf)
coords['lh'] = nib.freesurfer.io.read_geometry(surf_f_lh)[0]
faces['lh'] = nib.freesurfer.io.read_geometry(surf_f_lh)[1]
coords['rh'] = nib.freesurfer.io.read_geometry(surf_f_rh)[0]
faces['rh'] = nib.freesurfer.io.read_geometry(surf_f_rh)[1]
nvph = coords['lh'].shape[0]
ind={'lh': range(nvph), 'rh': range(nvph,nvph*2)}
if mwall == False: # if the medial wall vertices are NOT present
lhcort = np.sort(nib.freesurfer.io.read_label('%s/%s/label/lh.cortex.label' % (fsDir, mesh)))
rhcort = np.sort(nib.freesurfer.io.read_label('%s/%s/label/rh.cortex.label' % (fsDir, mesh)))+nvph
cortex = np.hstack([lhcort,rhcort])
nsub = data.shape[0]-len(cortex)
subcortical = range(nvph*2, nvph*2+nsub)
vv = np.concatenate([cortex, subcortical])
data_new = np.zeros([nvph*2+nsub, data.shape[1]])
data_new[vv,:] = data
data = data_new
if sulc:
sulc_f_lh = '%s/%s/surf/lh.sulc' % (fsDir, mesh)
sulc_f_rh = '%s/%s/surf/rh.sulc' % (fsDir, mesh)
sulc['lh'] = nib.freesurfer.io.read_morph_data(sulc_f_lh)
sulc['rh'] = nib.freesurfer.io.read_morph_data(sulc_f_rh)
sulc_bool = True
else:
sulc = None
sulc_bool = False
if dmin is None:
dmin_calc = True
else:
dmin_calc = False
if dmax is None:
dmax_calc = True
else:
dmax_calc = False
# create images
imgs = []
dmins = []
dmaxs = []
for c in range(n_comps):
for hemi in ['lh','rh']:
for azim in [0, 180]:
if data is not None:
if dmin_calc is True:
dmin = data[:,c].min()
dmins.append(dmin)
else:
dmins.append(dmin)
if dmax_calc is True:
dmax = data[:,c].max()
dmaxs.append(dmax)
else:
dmaxs.append(dmax)
fig = plot_surf_stat_map(coords[hemi], faces[hemi], stat_map=data[ind[hemi],c],
elev=0, azim=azim,
cmap=cmap,
bg_map=sulc[hemi], bg_on_stat=sulc_bool,
vmin=dmin, vmax=dmax,
labels=labels, label_col=label_col,
alpha=alpha,
mask=mask, mask_lenient=False)
#label_cpal=cpal)
else:
fig = plot_surf_label(coords[hemi], faces[hemi],
labels=labels,
elev=0, azim=azim,
bg_map=sulc[hemi],
cpal=cpal,
bg_on_labels=sulc_bool,
alpha=alpha)
# crop image
imgs.append((crop_img(fig, margin=5)))
plt.close(fig)
# create figure with color bar
fig = plt.figure()
fig.set_size_inches(6, 3*n_comps)
nrows = 16*n_comps
ncols = 150
for i in range(len(imgs)):
row = int(np.floor(i/2))
col = np.mod(i,2)
comp = int(np.floor(i/4))
r = row*8
c = col*70 + 10
ax = plt.subplot2grid((nrows, ncols), (r,c), colspan = 52, rowspan = 8)
plt.imshow(imgs[i])
ax.set_axis_off()
#print "i = %d, comp %d, row %d, col %d" % (i, comp, row, col)
if i == (1 + comp*4) and cbar == True:
#print 'Yay!'
cax = plt.subplot2grid((nrows,ncols), (1+row*8,137), colspan = 2, rowspan = 14)
cmap = plt.cm.get_cmap(cmap)
norm = mpl.colors.Normalize(vmin=dmins[i], vmax=dmaxs[i])
cb = mpl.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm)
cb.set_ticks([dmins[i], dmaxs[i]])
ax = plt.subplot2grid((nrows, ncols), (row*8,0), colspan = 2, rowspan = 14)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
plt.axis('off')
plt.text(0, 0, "Diffusion map %d" % (comp+1), ha='center', va='bottom', rotation='vertical', size=18, alpha=.5)
if title is not None:
fig.suptitle(title, fontsize=20, alpha=0.8)
#plt.tight_layout()
if pdf is not None:
#pp = PdfPages(fname)
pdf.savefig()
#pp.close()
plt.close(fig)
else:
fig.savefig('./tempimage',dpi=150)
plt.close(fig)
display(Image(filename='./tempimage.png', width=800))
os.remove('./tempimage.png')
|
|
#
#
# Atrous Python XTension
#
# Copyright (C) 2014 Egor Zindy <egor.zindy@manchester.ac.uk>, MIT license
#
# <CustomTools>
# <Menu name = "Python plugins">
# <Item name="Wavelet analysis" icon="Python" tooltip="Wavelet analysis for Imaris (2D and 3D kernels).">
# <Command>PythonXT::XTAtrous(%i)</Command>
# </Item>
# </Menu>
# </CustomTools>
#
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
import hotswap
import AtrousDialog
import Tkinter as tk
import ttk
import ImarisLib
import BridgeLib
import time
import numpy as np
import libatrous
###########################################################################
## Main application module
###########################################################################
class MyModule:
def __init__(self,vImaris):
self.vImaris = vImaris
#Use a clone
self.vDataSet = vImaris.GetDataSet().Clone()
#Keep all these in memory
self.vdataset_nt = self.vDataSet.GetSizeT()
self.vdataset_nx = self.vDataSet.GetSizeX()
self.vdataset_ny = self.vDataSet.GetSizeY()
self.vdataset_nz = self.vDataSet.GetSizeZ()
self.vdataset_nc = self.vDataSet.GetSizeC()
resx, resy, resz = BridgeLib.GetVoxelSize(self.vDataSet)
#Setting the grid resolution
libatrous.set_grid(resx, resy, resz)
self.wavelet_data = None
#The raw data (might take some time to load, so keep it in memory)
self.raw_data = None
#For now, only save the parameters for the current channel. When changing channel, this data is erased.
self.arrayvar_last = None
self.InitDialog()
def InitDialog(self):
#Build the dialog
self.Dialog=AtrousDialog.AtrousDialog()
self.Dialog.set_icon(BridgeLib.GetIcon())
self.Dialog.ExitOK = self.ExitOK
self.Dialog.ExitCancel = self.ExitCancel
self.Dialog.Update = self.Update
self.Dialog.Preview = self.Preview
self.Dialog.Calculate = self.Calculate
self.names = []
self.indexes = []
self.indexdic = {}
for i in range(self.vdataset_nc):
cname = self.vDataSet.GetChannelName(i)
if ' (filtered)' in cname:
continue
elif cname == '' or cname == '(name not specified)':
cname = 'Channel %d' % (i+1)
self.vDataSet.SetChannelName(i,cname)
self.names.append(cname)
self.indexes.append(i)
self.indexdic[cname] = i
self.current_channel = 0
#Set filters and current filter
self.Dialog.SetKernels(libatrous.get_names(),0)
#Set channels and current channel
self.Dialog.SetChannels(self.names,self.current_channel)
#Threshold scale
self.SetThresholdScales()
self.Dialog.mainloop()
def SetThresholdScales(self,channel=None):
#The threshold values
if channel is None:
channel = self.current_channel
michan = self.vDataSet.GetChannelRangeMin(channel)
machan = self.vDataSet.GetChannelRangeMax(channel)
self.Dialog.ctrl_low_thresh.config(from_=michan, to=machan,tickinterval=(machan-michan)/8.)
self.Dialog.ctrl_high_thresh.config(from_=michan, to=machan,tickinterval=(machan-michan)/8.)
self.Dialog.arrayvar["low_thresh"] =michan
self.Dialog.arrayvar["high_thresh"] = machan
def GetUpdated(self,old,new):
"""Check which parameters have changed between old and new dic"""
return [x for x in set(old) & set(new) if old[x] != new[x]]
def Update(self, arrayvar, elementname):
'''Show the new value in a label'''
if elementname == "channel":
channel = self.indexdic[arrayvar[elementname]]
self.current_channel = channel
#Corresponding channel_out - Get any json information from its description
#We do not want to create a new channel at this point
channel_out = self.GetMatchedChannel(self.current_channel, create=False)
if channel_out != -1:
json = BridgeLib.GetChannelDescription(self.vDataSet,channel_out)
if json != "":
self.Dialog.arrayvar.set_json(json)
self.arrayvar_last = None
elif elementname == "check_channel":
self.arrayvar_last = None
if 1:
pass
#any condition (for now)
#channel_out = self.GetMatchedChannel(self.current_channel)
#print("updating threshold",self.current_channel,channel_out,elementname)
#self.SetThresholdScales(channel_out)
#Do we need to preview?
if (arrayvar["check_liveview"] == "on"):
self.Preview()
def GetMatchedChannel(self,cindex,create=True):
"""Finds the matched filtered channel for a particular channel index.
If none found and the keyword create is set to true, this method will create a new channel and return its new index"""
ret = -1
cname = self.vDataSet.GetChannelName(cindex)
nc = self.vDataSet.GetSizeC()
for i in range(nc):
name = self.vDataSet.GetChannelName(i)
if name == cname+" (filtered)":
ret = i
break
if create == True and ret == -1:
self.vDataSet.SetSizeC(nc+1)
ret = nc
self.vDataSet.SetChannelName(ret,cname+" (filtered)")
rgba = self.vDataSet.GetChannelColorRGBA(cindex)
#rgba = rgba ^ 0x00ffffff
self.vDataSet.SetChannelColorRGBA(ret,rgba)
return ret
def Calculate(self,preview=False):
"""Bulk of the calculation
preview: current timepoint, otherwise, calculate all
when applying threshold: In preview, will leave visibility alone
otherwise, turn visibility off during data transfer to Imaris (faster)
"""
#Check between last and current, what actually needs recomputing.
arrayvar = self.Dialog.arrayvar.get()
list_filters = libatrous.get_names()
kernel_type = list_filters.index(arrayvar["kernel_type"])
kernel = libatrous.get_kernel(kernel_type)
low_scale = int(arrayvar["low_scale"])
high_scale = int(arrayvar["high_scale"])
low_thresh = float(arrayvar["low_thresh"])
high_thresh = float(arrayvar["high_thresh"])
check_invert = (arrayvar["check_invert"] == "on")
check_lowpass = (arrayvar["check_lowpass"] == "on")
check_threshold = (arrayvar["check_threshold"] == "on")
#check_delete = (arrayvar["check_delete"] == "on")
check_normalise = (arrayvar["check_normalise"] == "on")
check_delete = False
#Two things we need to check. Do we need to update both wavelet and threshold
#Do we need to do one time point or all.
update_wavelet = False
update_threshold = False
#All the channels or just the current selected channel (from dropdown menu)
if self.Dialog.arrayvar["check_channel"] == "on":
channel_indexes = self.indexes
else:
channel_indexes = [self.current_channel]
#This is a sure sign that we have NOT done any calculations yet
if self.arrayvar_last is None:
update_wavelet = True
update_threshold = True
self.wavelet_data = {}
self.raw_data = {}
else:
changed = set(self.GetUpdated(self.arrayvar_last, arrayvar))
if set(["kernel_type", "check_invert", "check_lowpass", "low_scale", "high_scale"]) & changed:
update_wavelet = True
update_threshold = True
#elif set(["low_thresh", "high_thresh", "check_threshold", "check_delete", "check_normalise"]) & changed:
elif set(["low_thresh", "high_thresh", "check_threshold", "check_normalise"]) & changed:
update_threshold = True
#Do we need to update any of the wavelet data?
for channel in channel_indexes:
if channel not in self.wavelet_data.keys():
tps = range(self.vdataset_nt)
#Do the same thing both for the wavelet data and the raw data
self.raw_data[channel] = [None]*self.vdataset_nt
self.wavelet_data[channel] = [None]*self.vdataset_nt
#Now, this is where we define what the timepoints are. That depends on preview (single timepoint)
#Updating the preview keyword makes sure we only process one timepoint in preview
#... then process all the timepoints the second time round
if preview:
arrayvar["preview"] = True
tp = self.vImaris.GetVisibleIndexT()
tps = [tp]
# Here we test if simply seeking a new timepoint and checking the filter for that timepoint. Is a filtered image available?
if self.wavelet_data[channel][tp] is None:
update_wavelet = True
update_threshold = True
else:
arrayvar["preview"] = False
tps = range(self.vdataset_nt)
if self.arrayvar_last is not None and self.arrayvar_last["preview"]==True and preview==False:
update_wavelet = True
update_threshold = True
############################################################
# Update the wavelet if needed
############################################################
if update_wavelet:
miarr,maarr = None, None
for channel in channel_indexes:
michan,machan = BridgeLib.GetRange(self.vDataSet,channel)
for tp in tps:
if self.raw_data[channel][tp] is not None:
dataset = self.raw_data[channel][tp]
else:
if self.vdataset_nz == 1:
dataset = BridgeLib.GetDataSlice(self.vDataSet,0,channel,tp).astype(np.float32)
else:
dataset = BridgeLib.GetDataVolume(self.vDataSet,channel,tp).astype(np.float32)
if check_invert:
dataset = machan - dataset
self.raw_data[channel][tp] = dataset
atrous_sub = libatrous.get_bandpass(dataset,low_scale-1,high_scale-1,kernel,check_lowpass)
mi = np.min(atrous_sub)
ma = np.max(atrous_sub)
if miarr is None or mi < miarr:
miarr = mi
if maarr is None or ma > maarr:
maarr = ma
self.wavelet_data[channel][tp] = atrous_sub
self.Dialog.ctrl_progress["value"]=(100.*(tp/float(len(tps)*len(channel_indexes))))
self.Dialog.ctrl_progress.update()
time.sleep(0.2)
self.Dialog.ctrl_progress["value"]=0
self.Dialog.ctrl_progress.update()
self.Dialog.ctrl_low_thresh.config(from_=miarr, to=maarr,tickinterval=(maarr-miarr)/8.)
self.Dialog.ctrl_high_thresh.config(from_=miarr, to=maarr,tickinterval=(maarr-miarr)/8.)
############################################################
# Update the threshold if needed
############################################################
if update_threshold:
channel_visibility = []
if preview == False:
for i in range(self.vdataset_nc):
channel_visibility.append(self.vImaris.GetChannelVisibility(i))
self.vImaris.SetChannelVisibility(i,0)
i = 0
for channel in channel_indexes:
michan,machan = BridgeLib.GetRange(self.vDataSet,channel)
channel_out = self.GetMatchedChannel(channel)
#Update the channel description...
BridgeLib.SetChannelDescription(self.vDataSet,channel_out,self.Dialog.arrayvar.get_json())
for tp in tps:
array_out = self.wavelet_data[channel][tp].copy()
miarr = array_out.min()
maarr = array_out.max()
#do threshold
if check_threshold:
mi, ma = low_thresh, high_thresh
else:
mi, ma = miarr, maarr
if mi == ma:
ma = mi + (maarr-miarr)/100.
if check_delete:
pass
#array_out[array_out < mi] = mi
#deleting objects brighter than max value
#array_out[array_out > ma] = mi
else:
#we really don't want any hot background.
if check_normalise:
#when normalising, this isn't an issue as normalisation occurs after clipping.
array_out[array_out < mi] = mi
else:
#Here, we are not normalising so values below the minimum value mi should be set to 0 rather than the mi value...
#unless mi is itself below 0 (although that would be an odd thing to do, but still need to be accounted for).
if mi < 0:
zeromi = mi
else:
zeromi = 0
array_out[array_out < mi] = zeromi
array_out[array_out > ma] = ma
#do normalisation
mi = np.min(array_out)
ma = np.max(array_out)
if check_normalise:
array_out = (machan-michan)*(array_out-mi)/(ma-mi)+michan
mi = michan
ma = machan
#convert the data back to the original format...
array_out = array_out.astype(BridgeLib.GetType(self.vDataSet))
#push back
if self.vdataset_nz == 1:
BridgeLib.SetDataSlice(self.vDataSet,array_out,0,channel_out,tp)
else:
BridgeLib.SetDataVolume(self.vDataSet,array_out,channel_out,tp)
self.vDataSet.SetChannelRange(channel_out,int(round(mi)),int(round(ma)))
self.Dialog.ctrl_progress["value"]=(100.*(tp/float(len(tps)*len(channel_indexes))))
self.Dialog.ctrl_progress.update()
self.vImaris.SetDataSet(self.vDataSet)
if preview == False:
for i in range(self.vdataset_nc):
self.vImaris.SetChannelVisibility(i,channel_visibility[i])
time.sleep(0.2)
self.Dialog.ctrl_progress["value"]=0
self.Dialog.ctrl_progress.update()
#Keeping the arrayvar values
self.arrayvar_last = arrayvar
def Preview(self):
self.Calculate(preview=True)
def ExitOK(self):
'''OK button action'''
self.Dialog.destroy()
exit(0)
def ExitCancel(self):
'''Cancel button action'''
self.Dialog.destroy()
exit(0)
def XTAtrous(aImarisId):
# Create an ImarisLib object
vImarisLib = ImarisLib.ImarisLib()
# Get an imaris object with id aImarisId
vImaris = vImarisLib.GetApplication(aImarisId)
# Check if the object is valid
if vImaris is None:
print("Could not connect to Imaris!")
exit(1)
vDataSet = vImaris.GetDataSet()
if vDataSet is None:
print("No data available!")
exit(1)
#The hotswap module watcher...
_watcher = hotswap.ModuleWatcher()
_watcher.run()
aModule = MyModule(vImaris)
|
|
import imp
import time
import yaml
import ConfigParser
from emonitor.extensions import db
from sqlalchemy.orm.collections import attribute_mapped_collection
from emonitor.modules.settings.settings import Settings
class AlarmType(db.Model):
"""AlarmType class"""
__tablename__ = 'alarmtypes'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(32))
keywords = db.Column(db.Text, default='')
interpreter = db.Column(db.String(64))
sections = db.relationship("AlarmSection", collection_class=attribute_mapped_collection('id'), cascade="all, delete-orphan")
_translations = db.Column('translations', db.Text)
_attributes = db.Column('attributes', db.Text)
def __repr__(self):
return self.name
def __init__(self, name, interpreter, keywords='', translations='', attributes=''):
self.name = name
self.keywords = keywords
self.interpreter = interpreter
self._translations = translations
self._attributes = attributes
@staticmethod
def buildFromConfigFile(cfile):
"""
create alarmtype from ini file definition
:param cfile:
:return: alarmtype
"""
from emonitor.modules.alarms.alarmsection import AlarmSection
_cfg = ConfigParser.ConfigParser()
_cfg.readfp(cfile)
if not _cfg.has_section('global'): # global section needed
return None
else:
if 'name' not in _cfg.options('global') or 'class' not in _cfg.options('global'):
return None
alarmtype = AlarmType(_cfg.get('global', 'name'), _cfg.get('global', 'class'))
alarmtype.keywords = _cfg.get('global', 'keywords').replace(u';', u'\r\n')
t = {}
for _k, _v in yaml.safe_load(_cfg.get('global', 'translations')).items():
t[_k.strip()] = _v.strip().encode('utf-8')
alarmtype.translations = t
t = {}
for item in [i for i in _cfg.items('global') if i[0] not in ['keywords', 'name', 'class', 'translations']]:
t[item[0]] = item[1]
alarmtype.attributes = t
for section in _cfg.sections():
if section != 'global': # global parameters
if 'name' not in _cfg.options(section): # required attribute name missing in section
return None
_params = {}
for p in [param for param in _cfg.options(section) if param not in ['name', 'method']]:
_params[p] = _cfg.get(section, p)
if 'method' in [k[0] for k in _cfg.items(section)]:
_method = _cfg.get(section, 'method')
else:
_method = ""
alarmtype.sections[_cfg.sections().index(section)] = AlarmSection(alarmtype.id, _cfg.get(section, 'name').decode('utf-8'), section, 1, _method, _cfg.sections().index(section), attributes=yaml.safe_dump(_params, encoding="utf-8"))
return alarmtype
def getConfigFile(self):
"""
build config file from type definition in database
:return: string in .ini format with [global] and fields as section
"""
class MyConfigParser(ConfigParser.ConfigParser):
def getStr(self):
"""Write an .ini-format string representation of the configuration state."""
ret = []
if self._defaults:
ret.append(u"[DEFAULT]")
for (key, value) in self._defaults.items():
ret.append(u"{} = {}s".format(key, str(value).replace('\n', '\n\t')))
ret.append("\n")
for _section in self._sections:
ret.append(u"\n[{}]".format(_section))
for (key, value) in self._sections[_section].items():
if key == "__name__":
continue
if (value is not None) or (self._optcre == self.OPTCRE):
key = u" = ".join((key, value.replace(u'\n', u'\n\t')))
ret.append(u"{}".format(key))
return u'\n'.join(ret)
if self.interpreterclass().configtype == 'generic': # only regex parsers use config file
_cfg = MyConfigParser()
_cfg.add_section('global')
_cfg.set('global', 'class', self.interpreter)
_cfg.set('global', 'keywords', self.keywords.replace('\r\n', ';'))
_cfg.set('global', 'translations', yaml.safe_dump(self.translations, encoding="utf-8"))
_cfg.set('global', 'name', self.name)
for k, v in self.attributes.items():
_cfg.set('global', k, v)
for section in [s for s in self.getSections() if s.active]:
_cfg.add_section(section.key)
_cfg.set(section.key, 'name', section.name)
if section.method:
_cfg.set(section.key, 'method', section.method)
for _k, _v in section.attributes.items():
_cfg.set(section.key, _k, _v)
return _cfg.getStr()
else:
return ""
@property
def translations(self):
return yaml.load(self._translations) or {}
@translations.setter
def translations(self, values):
self._translations = yaml.safe_dump(values, encoding='utf-8')
@property
def attributes(self):
"""
getter for attributes
:return: dict with type attributes
"""
return yaml.load(self._attributes) or {}
@attributes.setter
def attributes(self, values):
"""
setter for type attributes
:param values: digt with key-value pairs
"""
self._attributes = yaml.safe_dump(values, encoding='utf-8')
def interpreterclass(self):
"""
Get type interpreter class from directory *emonitor/modules/alarms/inc/*
:return: interpreterlass as instance :py:class:`emonitor.modules.alarms.alarmutils.AlarmFaxChecker`
"""
if self.interpreter:
cls = imp.load_source('emonitor.modules.alarms.inc', 'emonitor/modules/alarms/inc/%s' % self.interpreter)
return getattr(cls, cls.__all__[0])()
return None
def interpreterStrings(self):
"""
Get list of needed string for interpreter class
:return: list of strings
"""
if self.interpreterclass():
return sorted(self.interpreterclass().getDefaultConfig()['translations'])
return []
def translation(self, name):
if name in self.translations:
return self.translations[name]
return ""
def getSections(self):
"""
Get sorted list of possible sections of :py:class:`emonitor.modules.alarms.alarmtype.AlarmType`
:return: list of :py:class:`emonitor.modules.alarms.alarmsection.AlarmSection`
"""
return sorted(self.sections.values())
@staticmethod
def getVariables():
return Settings.get('alarms.evalfields').split('\r\n')
@staticmethod
def getAlarmTypes(id=0):
"""
Get list of alarm type objects filtered by parameter
:param optional id: id of alarm type or 0 for all types
:return: list of single object :py:class:`emonitor.modules.alarms.alarmtype.AlarmType`
"""
if id != 0:
return AlarmType.query.filter_by(id=id).first()
else:
return AlarmType.query.order_by('id').all()
@staticmethod
def getAlarmTypeByClassname(name):
"""
Get list of all alarm types by given class name
:param name: name of interpreter class
:return: list of :py:class:`emonitor.modules.alarms.alarmtype.AlarmType`
"""
return AlarmType.query.filter_by(interpreter=name).all() or []
@staticmethod
def handleEvent(eventname, **kwargs):
"""
Eventhandler for alarm type class: do type detection
:param eventname:
:param kwargs:
:return:
"""
stime = time.time()
if 'text' in kwargs.keys():
text = kwargs['text']
atype = None
for alarmtype in AlarmType.getAlarmTypes():
for kw in alarmtype.keywords.split('\n'):
if kw in text:
atype = alarmtype
break
kwargs['type'] = 0
if atype:
kwargs['type'] = atype.id
if 'time' not in kwargs.keys():
kwargs['time'] = []
kwargs['time'].append('alarmtype: alarmtype detection done in %s sec.' % (time.time() - stime))
return kwargs
|
|
"""
module to perform semi-synthetic simulations:
- take snps
- simulate phenotypes
- perform GWAS with different methods
- measure performance
"""
import logging
import os
import time
import sys
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab
import pylab
import fastlmm.association.gwas_eval as gw
from fastlmm.util.pickle_io import save, load
from fastlmm.util.runner import Local, Hadoop2, LocalMultiProc
from fastlmm.util import distributed_map
from pysnptools.standardizer import DiagKtoN
import split_data_helper
import semisynth_simulations
class LeaveTwoChrOutSimulation():
def __init__(self, snp_fn, out_prefix):
self.force_recompute = False
#self.base_path = base_path
self.snp_fn = snp_fn
from pysnptools.snpreader import Bed
self.snp_reader = Bed(snp_fn)
self.eigen_fn = self.snp_fn + "_pcs.pickle"
self.out_prefix = out_prefix
def precompute_pca(self):
"""
compute pcs
"""
logging.info("computing PCA on train set")
t0 = time.time()
if not os.path.isfile(self.eigen_fn) or self.force_recompute:
G = self.snp_reader.read(order='C').standardize().val
G.flags.writeable = False
chr1_idx, chr2_idx, rest_idx = split_data_helper.split_chr1_chr2_rest(self.snp_reader.pos)
G_train = G.take(rest_idx, axis=1)
from sklearn.decomposition import PCA
pca = PCA()
pcs = pca.fit_transform(G_train)
logging.info("saving eigendecomp to file %s" % self.eigen_fn)
eig_dec = {"pcs": pcs}
save(self.eigen_fn, eig_dec)
logging.info("time taken for pc computation: " + str(time.time()-t0))
else:
logging.info("pc file already exists: %s" % (self.eigen_fn))
def run(self, methods, num_causal, num_repeats, num_pcs, description, runner, seed=None, plot_fn=None):
self.precompute_pca()
input_files = [self.snp_fn + ext for ext in [".bed", ".fam", ".bim"]] + [self.eigen_fn]
input_args = [(methods, self.snp_fn, self.eigen_fn, num_causal, num_pcs, seed, sim_id) for sim_id in range(num_repeats)]
output_list = distributed_map.d_map(semisynth_simulations.compute_core, input_args, runner, input_files=input_files)
############################################
results_fn = "%s_results.runs_%i.causals_%i.pickle.bzip" % (description, num_repeats, num_causal)
reduced_results_fn = results_fn.replace("runs", "reduced.runs")
save(results_fn, output_list)
methods = output_list[0][0].keys()
arg_list = [(method, results_fn) for method in methods]
#reduce_runner = Hadoop(len(methods), mapmemory=90*1024, reducememory=90*1024, mkl_num_threads=1, queue="shared")
reduce_runner = Local()
combine_output = distributed_map.d_map(semisynth_simulations.combine_results, arg_list, reduce_runner, input_files=[results_fn])
save(reduced_results_fn, combine_output)
title = "%i causal, %i repeats" % (num_causal, num_repeats)
visualize_reduced_results(methods, combine_output, title=title, plot_fn=plot_fn)
return combine_output
def simulate_ascertained(methods, prevalence, iid_count, num_causal, num_repeats, description, snp_args, phenotype_args, runner=Local(), seed=None, plot_fn=None):
"""
run a synthetic simulation using ascertained data
:param methods: A list of functions implementing methods to be compared.
:type methods: list<function>
:param prevalence: Prior probability of a case, e.g. .1
:type prevalence: a float between 0.0 and 1.0 (exclusive)
:param iid_count: The number of individuals to generate.
:type iid_count: int
:param num_causal: The number causal SNPs in the simulation.
:type num_causal: int
:param num_repeats: The number of repeats in the simulation.
:type num_repeats: int
:param description: Short description string of experiment (for output)
:type description: str
:param num_repeats: The number of repeats in the simulation.
:type num_repeats: int
:param snp_args: arguments for an internal call to :func:`GWAS_benchmark.snp_gen`. Do not include
'iid_count' or 'seed'
:type snp_args: dictionary
:param phenotype_args: arguments for an internal call to :func:`.generate_phenotype`. Do not include
'snp_count' or 'seed'
:type phenotype_args: dictionary
:param runner: a Runner object (e.g. Local, Hadoop, HPC)
:type runner: Runner
:param seed: a random seed to control random number generation
:type seed: int
:param plot_fn: filename under which to save the output figure
:type plot_fn: str
"""
input_args = [(methods, num_causal, prevalence, iid_count, snp_args, phenotype_args, seed, sim_id) for sim_id in range(num_repeats)]
output_list = distributed_map.d_map(semisynth_simulations.compute_core_ascertained, input_args, runner)
############################################
results_fn = "%s_ascertained_results.runs_%i.causals_%i.pickle.bzip" % (description, num_repeats, num_causal)
reduced_results_fn = results_fn.replace("runs", "reduced.runs")
save(results_fn, output_list)
methods = output_list[0][0].keys()
arg_list = [(method, results_fn) for method in methods]
combine_output = distributed_map.d_map(semisynth_simulations.combine_results, arg_list, Local(), input_files=[results_fn])
save(reduced_results_fn, combine_output)
title = "%i causal, %i repeats" % (num_causal, num_repeats)
visualize_reduced_results(methods, combine_output, title=title, plot_fn=plot_fn)
return combine_output
#TODO: create a different subclass for ascertained data
# call the other subclass existing phenotype
def visualize_reduced_results(methods, combine_output, title="", plot_fn=None):
"""
set up plots: T1-error, ROC, PRC
"""
t0 = time.time()
fig = pylab.figure()
fig.set_size_inches(26,7)
for mi, method in enumerate(methods):
o = combine_output[mi]
pylab.subplot(131)
gw.draw_t1err_curve(o["t1err"][0], o["t1err"][1], method, o["num_trials"])
pylab.subplot(132)
draw_roc_curve(o["roc"][0], o["roc"][1], o["roc"][2], method)
pylab.subplot(133)
gw.draw_prc_curve(o["prc"][0], o["prc"][1], o["prc"][2], method)
pylab.suptitle(title)
print "time taken to draw figure", time.time()-t0
if plot_fn is None:
print "showing figure!"
pylab.show()
else:
print "saving figure!"
pylab.savefig(plot_fn, dpi=100)
def combine_results(input_tuple):
"""
compute performance statistics from p-values of method
"""
method, results_fn = input_tuple
logging.info("reading file: %s" % results_fn)
output_list = load(results_fn)
p_values_all = []
mask_all = []
p_values_all = []
p_values_chr1 = []
p_values_chr2 = []
mask_all = []
t0 = time.time()
logging.info("concatenating p-values")
for result, idx in output_list:
causals_chr2_idx = np.intersect1d(idx["chr2_idx"], idx["causal_idx"])
assert len(result[method]) == len(idx["chr1_idx"]) + len(idx["chr2_idx"])
p_vals_t1_err = result[method][idx["chr1_idx"]]
p_vals_power = result[method][causals_chr2_idx]
p_values_chr1.extend(p_vals_t1_err)
p_values_chr2.extend(p_vals_power)
p_values_all.extend(p_vals_t1_err)
p_values_all.extend(p_vals_power)
mask_t1_err = np.zeros(len(idx["chr1_idx"]), dtype=np.bool)
mask_power = np.ones(len(causals_chr2_idx), dtype=np.bool)
mask_all.extend(mask_t1_err)
mask_all.extend(mask_power)
logging.info("done concatenating p-values (%s)" % (str(time.time()-t0)))
result = {}
t0 = time.time()
result["roc"] = gw.compute_roc_data(np.array(mask_all, dtype=np.bool), -np.array(p_values_all))
logging.info("computed roc in (%s)" % (str(time.time()-t0)))
t0 = time.time()
result["prc"] = gw.compute_prc_data(np.array(mask_all, dtype=np.bool), -np.array(p_values_all))
logging.info("computed prc in (%s)" % (str(time.time()-t0)))
t0 = time.time()
result["t1err"] = gw.compute_t1err_data(np.array(p_values_chr1), np.zeros(len(p_values_chr1), dtype=np.bool))
logging.info("computed t1err in (%s)" % (str(time.time()-t0)))
t0 = time.time()
result["power"] = gw.compute_power_data(np.array(p_values_chr2), np.ones(len(p_values_chr2), dtype=np.bool))
logging.info("computed power in (%s)" % (str(time.time()-t0)))
result["method"] = method
result["num_trials"] = len(p_values_chr1)
return result
def generate_phenotype(snp_data, causals, genetic_var, noise_var, seed=None):
"""
generate phenotype given genotype
'causals' can be either an array of indexes to the causal snps or the number of causal snps desired.
"""
if seed is not None:
np.random.seed(int(seed % sys.maxint))
try:
num_causal = len(causals)
causal_idx = causals
except:
num_causal = causals
#the "if..else" is a work around because the linux version of np.random.choice doesn't like to select zero items from an empty list. We need to call random in either case so that the random seed ends up in the expected state
causal_idx = np.random.choice(sp.arange(snp_data.sid_count if num_causal>0 else 1),size=num_causal,replace=False)
num_phenotypes = 1
mean = 0.0
X = snp_data.val.copy()
X.flags.writeable = False
X_causal = X[:,causal_idx]
X_causal = 1./np.sqrt(X_causal.shape[1]) * X_causal
W = np.random.randn(num_causal, num_phenotypes) * np.sqrt(genetic_var) + mean #Weight of each causal SNP
XW = np.dot(X_causal, W)
noise_std = np.sqrt(noise_var)
Z = noise_std*sp.randn(X_causal.shape[0], num_phenotypes)
y = XW + Z
y = y[:,0]
return y
def generate_discrete_ascertained(prevalence, iid_count, snp_args, phenotype_args, seed=0):
"""
Generate discrete ascertained data. Internally, case will be generated at the requested
prevalence. Before returning, however, the control will randomly sampled so
that in the returned data, case and control have number of examples.
:param prevalence: Prior probability of a case, e.g. .1
:type prevalence: a float between 0.0 and 1.0 (exclusive)
:param iid_count: The number of examples desired in the returned data. Because of
rounding during data generate the actual number may be lower. Of this happens,
a warning will be shown.
:type iid_count: int
:param snp_args: arguments for an internal call to :func:`GWAS_benchmark.snp_gen`. Do not include
'iid_count' or 'seed'
:type snp_args: dictionary
:param phenotype_args: arguments for an internal call to :func:`.generate_phenotype`. Do not include
'snp_count' or 'seed'
:type phenotype_args: dictionary
:param seed: a random seed to control random number generation
:type seed: int
:rtype: a :class:`pysnptools.snpreader.SnpData' of genotype data and a nparray of 0,1 phenotype values.
:Example:
>>> snp_args = {"fst":.1,"dfr":.5,"sid_count":200,"maf_low":.05}
>>> phenotype_args = {"causals":10,"genetic_var":0.5, "noise_var":0.5}
>>> snps,pheno = generate_discrete_ascertained(prevalence=.1,iid_count=100,seed=5,snp_args=snp_args,phenotype_args=phenotype_args)
>>> print int(snps.val.shape[0]),int(snps.val.shape[1]),int(len(pheno))
98 200 98
"""
assert 0<prevalence and prevalence <= .5, "Expect prevalence to be between 0.0 (exclusive) and .5 (inclusive)"
assert int(iid_count) == iid_count and iid_count >= 0, "Expect iid_count to be a non-negative integer"
# generate more examples than we ultimately want
iid_count2 = int(float(iid_count) / 2.0 / prevalence)
from GWAS_benchmark import snp_gen
snp2 = snp_gen(iid_count=iid_count2, seed=seed, **snp_args)
pheno2 = generate_phenotype(snp_data=snp2, seed=seed, **phenotype_args)
# Sort by snps by pheno2 value
snps2_sorted = snp2[pheno2.argsort(),:]
# we want the top snp_count*prevalence for cases
# and a random sample, of the same size, from the rest for control
case_count = int(snps2_sorted.iid_count * prevalence)
case_index = range(-1,-(case_count+1),-1) # e.g. if case_count is 3, then -1,-2,-3
control_count = case_count
if control_count + case_count != iid_count:
logging.warn("iid_count is {0} instead of {1} because of rounding".format(control_count + case_count, iid_count))
if seed is not None:
np.random.seed(int(seed % sys.maxint))
#print "gda", snps2_sorted.iid_count,case_count,control_count
#the "if..else" is a work around because the linux version of np.random.choice doesn't like to select zero items from an empty list. We need to call random in either case so that the random seed ends up in the expected state
control_index = np.random.choice(np.arange(snps2_sorted.iid_count-case_count if control_count > 0 else 1), control_count, replace=False)
snp_final = snps2_sorted[np.concatenate((control_index,case_index)),:].read()
pheno_final = np.zeros(control_count+case_count)
pheno_final[control_count:]=1
return snp_final, pheno_final
def compute_core(input_tuple):
"""
Leave-two-chromosome-out evaluation scheme:
Chr1: no causals, used for T1-error evaluation
Chr2: has causals, not conditioned on, used for power evaluation
Rest: has causals, conditioned on
T1 Pow [ cond ]
===== ===== ===== .... =====
x x x x xx
"""
methods, snp_fn, eigen_fn, num_causal, num_pcs, seed, sim_id = input_tuple
# partially load bed file
from pysnptools.snpreader import Bed
snp_reader = Bed(snp_fn)
# determine indices for generation and evaluation
##################################################################
chr1_idx, chr2_idx, rest_idx = split_data_helper.split_chr1_chr2_rest(snp_reader.pos)
causal_candidates_idx = np.concatenate((chr2_idx, rest_idx))
# only compute t1-error (condition on all chr with causals on them)
#causal_candidates_idx = rest_idx
test_idx = np.concatenate((chr1_idx, chr2_idx))
if seed is not None:
np.random.seed(int(seed % sys.maxint))
causal_idx = np.random.permutation(causal_candidates_idx)[0:num_causal]
# generate phenotype
###################################################################
genetic_var = 0.5
noise_var = 0.5
y = generate_phenotype(Bed(snp_fn).read(order='C').standardize(), causal_idx, genetic_var, noise_var)
y.flags.writeable = False
############### only alter part until here --> modularize this
# load pcs
###################################################################
logging.info("loading eigendecomp from file %s" % eigen_fn)
eig_dec = load(eigen_fn)
G_pc = eig_dec["pcs"]
G_pc.flags.writeable = False
G_pc_ = G_pc[:,0:num_pcs]
G_pc_norm = DiagKtoN(G_pc_.shape[0]).standardize(G_pc_.copy())
G_pc_norm.flags.writeable = False
# run feature selection
#########################################################
# generate pheno data structure
pheno = {"iid": snp_reader.iid, "vals": y, "header": []}
covar = {"iid": snp_reader.iid, "vals": G_pc_norm, "header": []}
# subset readers
G0 = snp_reader[:,rest_idx]
test_snps = snp_reader[:,test_idx]
result = {}
fs_result = {}
# additional methods can be defined and included in the benchmark
for method_function in methods:
result_, fs_result_ = method_function(test_snps, pheno, G0, covar)
result.update(result_)
fs_result.update(fs_result_)
# save indices
indices = {"causal_idx": causal_idx, "chr1_idx": chr1_idx, "chr2_idx": chr2_idx, "input_tuple": input_tuple, "fs_result": fs_result}
#test_idx
return result, indices
def compute_core_ascertained(input_tuple):
"""
Leave-two-chromosome-out evaluation scheme:
Chr1: no causals, used for T1-error evaluation
Chr2: has causals, not conditioned on, used for power evaluation
Rest: has causals, conditioned on
T1 Pow [ cond ]
===== ===== ===== .... =====
x x x x xx
"""
methods, num_causal, prevalence, iid_count, snp_args, phenotype_args, seed, sim_id = input_tuple
# determine indices for generation and evaluation
##################################################################
chr1_idx, chr2_idx, rest_idx = range(0,1000), range(1000, 2000), range(2000, 10000)
causal_candidates_idx = np.concatenate((chr2_idx, rest_idx))
# only compute t1-error (condition on all chr with causals on them)
test_idx = np.concatenate((chr1_idx, chr2_idx))
if seed is not None:
np.random.seed(int(seed % sys.maxint))
causal_idx = np.random.permutation(causal_candidates_idx)[0:num_causal]
# generate phenotype
###################################################################
#y = generate_phenotype(Bed(snp_fn).read(order='C').standardize(), causal_idx, genetic_var, noise_var)
#y.flags.writeable = False
phenotype_args["causals"] = causal_idx
#import pdb; pdb.set_trace()
snp_reader, y = generate_discrete_ascertained(prevalence, iid_count, snp_args, phenotype_args, seed=seed)
# run feature selection
#########################################################
# generate pheno data structure
pheno = {"iid": snp_reader.iid, "vals": y, "header": []}
covar = {"iid": snp_reader.iid, "vals": np.ones((len(y),1)), "header": []}
# subset readers
G0 = snp_reader[:,rest_idx]
test_snps = snp_reader[:,test_idx]
result = {}
fs_result = {}
# additional methods can be defined and included in the benchmark
for method_function in methods:
result_, fs_result_ = method_function(test_snps, pheno, G0, covar)
result.update(result_)
fs_result.update(fs_result_)
# save indices
indices = {"causal_idx": causal_idx, "chr1_idx": chr1_idx, "chr2_idx": chr2_idx, "input_tuple": input_tuple, "fs_result": fs_result}
#test_idx
return result, indices
def draw_roc_curve(fpr, tpr, roc_auc, label):
"""
draw semi-log-scaled ROC curve
"""
if len(fpr) > 1000:
sub_idx = [int(a) for a in np.linspace(0, len(fpr)-1, num=1000, endpoint=True)]
fpr, tpr = fpr[sub_idx], tpr[sub_idx]
#pylab.semilogx(fpr, tpr, label='%s (area = %0.4f)' % (label, roc_auc))
pylab.semilogx(fpr, tpr, label=label)
#pylab.plot([0, 1], [0, 1], 'k--')
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
#pylab.xlabel('False Positive Rate')
pylab.xlabel('type I error', fontsize="large")
#pylab.ylabel('True Positive Rate (Power)')
pylab.ylabel('power', fontsize="large")
#pylab.title('Receiver operating characteristic example')
pylab.grid(True)
pylab.legend(loc="lower right")
def run_simulation(snp_fn, out_prefix, methods, num_causals, num_repeats, num_pcs, description, runner, plot_fn=None, seed=None):
sc = LeaveTwoChrOutSimulation(snp_fn, out_prefix)
return sc.run(methods, num_causals, num_repeats, num_pcs, "mouse_", runner, seed=seed, plot_fn=plot_fn)
def run_simulation_ascertained():
snp_args = {"fst": 0.2, "dfr": 0.1, "sid_count": 10000}
phenotype_args = {"genetic_var": 0.5, "noise_var": 0.5}
# make this a tuple of function and kwargs
from GWAS_benchmark.methods import execute_lmm, execute_linear_regression
methods = [execute_lmm] #, execute_linear_regression]
prevalence = 0.2
num_causal = 20
num_repeats = 50
iid_count= 500
description = "ascertained"
simulate_ascertained(methods, prevalence, iid_count, num_causal, num_repeats, description, snp_args, phenotype_args)
def main():
logging.basicConfig(level=logging.INFO)
#snp_fn = "data/toydata.5chrom"
snp_fn = "data/mouse/alldata"
out_prefix = "results/mouse_"
description = "test_run"
queue = "shared"
#runner = Hadoop2(200, mapmemory=40*1024, reducememory=90*1024, mkl_num_threads=4, queue=queue)
print "using snps", snp_fn
#runner = LocalMultiProc(20)
runner = Local()
num_causals = 500
num_repeats = 3
num_pcs = 5
# make this a tuple of function and kwargs
from GWAS_benchmark.methods import execute_lmm, execute_linear_regression, execute_dual_fs, execute_fs
methods = [execute_fs, execute_linear_regression]
run_simulation(snp_fn, out_prefix, methods, num_causals, num_repeats, num_pcs, description, runner)
if __name__ == "__main__":
run_simulation_ascertained()
#main()
|
|
##########################################################################
#
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
import time
import IECore
class LRUCacheTest( unittest.TestCase ) :
def test( self ) :
self.numGetterCalls = 0
def getter( key ) :
self.numGetterCalls += 1
return (
# value
{
"same" : key,
"times2" : key * 2,
"times4" : key * 4,
},
# cost
1
)
c = IECore.LRUCache( getter, 10 )
self.assertEqual( c.getMaxCost(), 10 )
c.setMaxCost( 20 )
self.assertEqual( c.getMaxCost(), 20 )
c.setMaxCost( 10 )
self.assertEqual( c.getMaxCost(), 10 )
v = c.get( 10 )
self.assertEqual( v,
{
"same" : 10,
"times2" : 20,
"times4" : 40,
}
)
self.assertEqual( c.currentCost(), 1 )
self.assertEqual( self.numGetterCalls, 1 )
v2 = c.get( 10 )
self.failUnless( v2 is v )
self.assertEqual( c.currentCost(), 1 )
self.assertEqual( self.numGetterCalls, 1 )
for k in range( 11, 10000 ) :
v = c.get( k )
self.assertEqual( v,
{
"same" : k,
"times2" : k * 2,
"times4" : k * 4,
}
)
self.failIf( c.currentCost() > 10 )
def testClearCausesReloads( self ) :
self.numGetterCalls = 0
self.multiplier = 2
def getter( key ) :
self.numGetterCalls += 1
return ( key * self.multiplier, 1 )
c = IECore.LRUCache( getter, 10 )
v = c.get( 10 )
self.assertEqual( v, 20 )
self.assertEqual( self.numGetterCalls, 1 )
v = c.get( 10 )
self.assertEqual( v, 20 )
self.assertEqual( self.numGetterCalls, 1 )
c.clear()
self.multiplier = 4
v = c.get( 10 )
self.assertEqual( v, 40 )
self.assertEqual( self.numGetterCalls, 2 )
def testThreadingAndLimitCost( self ) :
def getter( key ) :
return ( key * 2, 1 )
c = IECore.LRUCache( getter, 10 )
def thrash() :
for i in range( 0, 10000 ) :
v = c.get( i )
self.assertEqual( v, i * 2 )
threads = []
for i in range( 0, 10 ) :
thread = threading.Thread( target=thrash )
threads.append( thread )
thread.start()
for thread in threads :
thread.join()
def testThreadingAndClear( self ) :
def getter( key ) :
return ( key * 2, 1 )
c = IECore.LRUCache( getter, 100000 )
def f1() :
for i in range( 0, 10000 ) :
v = c.get( i )
self.assertEqual( v, i * 2 )
def f2() :
for i in range( 0, 10000 ) :
c.clear()
t1 = threading.Thread( target=f1 )
t2 = threading.Thread( target=f1 )
t3 = threading.Thread( target=f2 )
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
c.clear()
self.assertEqual( c.currentCost(), 0 )
def testYieldGILInGetter( self ) :
def getter( key ) :
# this call simulates the gil getting
# yielded for some reason - in the real world
# perhaps an Op call or just the python interpreter
# deciding to switch threads.
time.sleep( 0.1 )
return ( key, 1 )
c = IECore.LRUCache( getter, 100000 )
def f() :
c.get( 0 )
t1 = threading.Thread( target=f )
t2 = threading.Thread( target=f )
t1.start()
t2.start()
t1.join()
t2.join()
def testRemovalCallback( self ) :
def getter( key ) :
return ( key * 2, 1 )
removed = []
def removalCallback( key, value ) :
removed.append( ( key, value ) )
c = IECore.LRUCache( getter, removalCallback, 5 )
self.assertEqual( c.get( 1 ), 2 )
self.assertEqual( removed, [] )
self.assertEqual( c.get( 2 ), 4 )
self.assertEqual( removed, [] )
self.assertEqual( c.get( 3 ), 6 )
self.assertEqual( removed, [] )
self.assertEqual( c.get( 4 ), 8 )
self.assertEqual( removed, [] )
self.assertEqual( c.get( 5 ), 10 )
self.assertEqual( removed, [] )
self.assertEqual( c.get( 6 ), 12 )
self.assertEqual( len( removed ), 1 )
self.assertEqual( c.get( 7 ), 14 )
self.assertEqual( len( removed ), 2 )
c.clear()
self.assertEqual( len( removed ), 7 )
keys = [ x[0] for x in removed ]
for i in range( 1, 8 ) :
self.failUnless( i in keys )
def testSet( self ) :
def getter( key ) :
return ( None, 1 )
c = IECore.LRUCache( getter, 1000 )
c.set( 5, 10, 1 )
self.assertEqual( c.currentCost(), 1 )
self.assertEqual( c.get( 5 ), 10 )
self.assertEqual( c.currentCost(), 1 )
c.set( 5, 20, 100000 )
self.assertEqual( c.currentCost(), 0 )
self.assertEqual( c.get( 5 ), None )
self.assertEqual( c.currentCost(), 1 )
def testCPPThreading( self ) :
# arguments are :
# iterations, number of unique values, maximum cost, clear frequency
# cache exactly the right size
IECore.testLRUCacheThreading( 100000, 100, 100 )
# cache not quite big enough
IECore.testLRUCacheThreading( 100000, 100, 90 )
# cache thrashing like crazy
IECore.testLRUCacheThreading( 100000, 1000, 2 )
# clearing all the time while doing concurrent lookups
IECore.testLRUCacheThreading( 100000, 1000, 90, 20 )
def testEraseAndCached( self ) :
def getter( key ) :
return ( key, 1 )
c = IECore.LRUCache( getter, 1000 )
self.assertFalse( c.cached( 1 ) )
self.assertEqual( c.currentCost(), 0 )
c.get( 1 )
self.assertTrue( c.cached( 1 ) )
self.assertEqual( c.currentCost(), 1 )
self.assertTrue( c.erase( 1 ) )
self.assertEqual( c.currentCost(), 0 )
self.assertFalse( c.cached( 1 ) )
self.assertFalse( c.erase( 1 ) )
self.assertEqual( c.currentCost(), 0 )
def testSerialRecursion( self ) :
# Cache big enough that nothing will be evicted
IECore.testSerialLRUCacheRecursion( 100 )
# Cache small enough that evictions are necessary
IECore.testSerialLRUCacheRecursion( 10 )
def testParallelRecursion( self ) :
# Cache big enough that nothing will be evicted
IECore.testParallelLRUCacheRecursion( 100000, 10000, 10000 )
# Cache small enough that evictions are necessary
IECore.testParallelLRUCacheRecursion( 100000, 1000, 100 )
def testExceptions( self ) :
calls = []
def getter( key ) :
calls.append( key )
raise ValueError( "Get failed for {0}".format( key ) )
c = IECore.LRUCache( getter, 1000 )
# Check that the exception thrown by the getter propagates back out to us.
self.assertRaisesRegexp( RuntimeError, "Get failed for 10", c.get, 10 )
self.assertEqual( calls, [ 10 ] )
# Check that calling a second time gives us the same error, but without
# calling the getter again.
self.assertRaisesRegexp( RuntimeError, "Get failed for 10", c.get, 10 )
self.assertEqual( calls, [ 10 ] )
# Check that clear erases exceptions, so that the getter will be called again.
c.clear()
self.assertRaisesRegexp( RuntimeError, "Get failed for 10", c.get, 10 )
self.assertEqual( calls, [ 10, 10 ] )
# And check that erase does the same.
c.erase( 10 )
self.assertRaisesRegexp( RuntimeError, "Get failed for 10", c.get, 10 )
self.assertEqual( calls, [ 10, 10, 10 ] )
def testSetLimitsCost( self ) :
c = IECore.LRUCache( lambda key : key, 2 )
c.set( "a", "a", 1 )
self.assertEqual( c.currentCost(), 1 )
c.set( "b", "b", 1 )
self.assertEqual( c.currentCost(), 2 )
c.set( "c", "c", 1 )
self.assertEqual( c.currentCost(), 2 )
c.set( "d", "d", 1 )
self.assertEqual( c.currentCost(), 2 )
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2016 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Violin Memory 7000 Series All-Flash Array ISCSI Driver
"""
import mock
from cinder import test
from cinder.tests import fake_vmem_xgtools_client as vmemclient
from cinder.volume import configuration as conf
from cinder.volume.drivers.violin import v7000_common
from cinder.volume.drivers.violin import v7000_iscsi
VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba"
VOLUME = {
"name": "volume-" + VOLUME_ID,
"id": VOLUME_ID,
"display_name": "fake_volume",
"size": 2,
"host": "myhost",
"volume_type": None,
"volume_type_id": None,
}
SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb"
SNAPSHOT = {
"name": "snapshot-" + SNAPSHOT_ID,
"id": SNAPSHOT_ID,
"volume_id": VOLUME_ID,
"volume_name": "volume-" + VOLUME_ID,
"volume_size": 2,
"display_name": "fake_snapshot",
"volume": VOLUME,
}
SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
SRC_VOL = {
"name": "volume-" + SRC_VOL_ID,
"id": SRC_VOL_ID,
"display_name": "fake_src_vol",
"size": 2,
"host": "myhost",
"volume_type": None,
"volume_type_id": None,
}
SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
SRC_VOL = {
"name": "volume-" + SRC_VOL_ID,
"id": SRC_VOL_ID,
"display_name": "fake_src_vol",
"size": 2,
"host": "myhost",
"volume_type": None,
"volume_type_id": None,
}
INITIATOR_IQN = "iqn.1111-22.org.debian:11:222"
CONNECTOR = {
"initiator": INITIATOR_IQN,
"host": "irrelevant",
"ip": "1.2.3.4",
}
TARGET = "iqn.2004-02.com.vmem:%s" % VOLUME['id']
GET_VOLUME_STATS_RESPONSE = {
'vendor_name': 'Violin Memory, Inc.',
'reserved_percentage': 0,
'QoS_support': False,
'free_capacity_gb': 4094,
'total_capacity_gb': 2558,
}
CLIENT_INFO = {
'issanip_enabled': False,
'sanclient_id': 7,
'ISCSIDevices':
[{'category': 'Virtual Device',
'sizeMB': VOLUME['size'] * 1024,
'name': VOLUME['id'],
'object_id': 'v0000058',
'access': 'ReadWrite',
'ISCSITarget':
{'name': TARGET,
'startingLun': '0',
'ipAddr': '192.168.91.1 192.168.92.1 192.168.93.1 192.168.94.1',
'object_id': '2c68c1a4-67bb-59b3-93df-58bcdf422a66',
'access': 'ReadWrite',
'isInfiniBand': 'false',
'iscsiurl': ''},
'type': 'SAN',
'lun': '8',
'size': VOLUME['size'] * 1024 * 1024}],
'name': 'lab-srv3377',
'isiscsi_enabled': True,
'clusterName': '',
'ipAddress': '',
'isclustered': False,
'username': '',
'isbmr_enabled': False,
'useracl': None,
'isfibrechannel_enabled': False,
'iSCSIPolicy':
{'initiators': ['iqn.1993-08.org.debian:01:1ebcd244a059'],
'authentication':
{'mutualCHAP':
{'enabled': False,
'user': ''},
'enabled': False,
'defaultUser': ''},
'accessType': 'stationary'},
'ISCSITargetList':
[{'name': 'iqn.2004-02.com.vmem:lab-fsp-mga.openstack',
'startingLun': '0',
'ipAddr': '192.168.91.1 192.168.92.1 192.168.93.1 192.168.94.1',
'object_id': '716cc60a-576a-55f1-bfe3-af4a21ca5554',
'access': 'ReadWrite',
'isInfiniBand': 'false',
'iscsiurl': ''}],
'type': 'Windows',
'persistent_reservation': True,
'isxboot_enabled': False}
class V7000ISCSIDriverTestCase(test.TestCase):
"""Test cases for VMEM ISCSI driver."""
def setUp(self):
super(V7000ISCSIDriverTestCase, self).setUp()
self.conf = self.setup_configuration()
self.driver = v7000_iscsi.V7000ISCSIDriver(configuration=self.conf)
self.driver.gateway_iscsi_ip_addresses = [
'192.168.91.1', '192.168.92.1', '192.168.93.1', '192.168.94.1']
self.stats = {}
self.driver.set_initialized()
def tearDown(self):
super(V7000ISCSIDriverTestCase, self).tearDown()
def setup_configuration(self):
config = mock.Mock(spec=conf.Configuration)
config.volume_backend_name = 'v7000_iscsi'
config.san_ip = '8.8.8.8'
config.san_login = 'admin'
config.san_password = ''
config.san_thin_provision = False
config.san_is_local = False
config.use_igroups = False
config.request_timeout = 300
return config
def setup_mock_concerto(self, m_conf=None):
"""Create a fake Concerto communication object."""
_m_concerto = mock.Mock(name='Concerto',
version='1.1.1',
spec=vmemclient.mock_client_conf)
if m_conf:
_m_concerto.configure_mock(**m_conf)
return _m_concerto
@mock.patch.object(v7000_common.V7000Common, 'check_for_setup_error')
def test_check_for_setup_error(self, m_setup_func):
"""No setup errors are found."""
result = self.driver.check_for_setup_error()
m_setup_func.assert_called_with()
self.assertTrue(result is None)
def test_create_volume(self):
"""Volume created successfully."""
self.driver.common._create_lun = mock.Mock()
result = self.driver.create_volume(VOLUME)
self.driver.common._create_lun.assert_called_with(VOLUME)
self.assertTrue(result is None)
def test_create_volume_from_snapshot(self):
self.driver.common._create_volume_from_snapshot = mock.Mock()
result = self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT)
self.driver.common._create_volume_from_snapshot.assert_called_with(
SNAPSHOT, VOLUME)
self.assertTrue(result is None)
def test_create_cloned_volume(self):
self.driver.common._create_lun_from_lun = mock.Mock()
result = self.driver.create_cloned_volume(VOLUME, SRC_VOL)
self.driver.common._create_lun_from_lun.assert_called_with(
SRC_VOL, VOLUME)
self.assertTrue(result is None)
def test_delete_volume(self):
"""Volume deleted successfully."""
self.driver.common._delete_lun = mock.Mock()
result = self.driver.delete_volume(VOLUME)
self.driver.common._delete_lun.assert_called_with(VOLUME)
self.assertTrue(result is None)
def test_extend_volume(self):
"""Volume extended successfully."""
new_size = 10
self.driver.common._extend_lun = mock.Mock()
result = self.driver.extend_volume(VOLUME, new_size)
self.driver.common._extend_lun.assert_called_with(VOLUME, new_size)
self.assertTrue(result is None)
def test_create_snapshot(self):
self.driver.common._create_lun_snapshot = mock.Mock()
result = self.driver.create_snapshot(SNAPSHOT)
self.driver.common._create_lun_snapshot.assert_called_with(SNAPSHOT)
self.assertTrue(result is None)
def test_delete_snapshot(self):
self.driver.common._delete_lun_snapshot = mock.Mock()
result = self.driver.delete_snapshot(SNAPSHOT)
self.driver.common._delete_lun_snapshot.assert_called_with(SNAPSHOT)
self.assertTrue(result is None)
def test_get_volume_stats(self):
self.driver._update_volume_stats = mock.Mock()
self.driver._update_volume_stats()
result = self.driver.get_volume_stats(True)
self.driver._update_volume_stats.assert_called_with()
self.assertEqual(self.driver.stats, result)
def test_update_volume_stats(self):
"""Makes a mock query to the backend to collect
stats on all physical devices.
"""
backend_name = self.conf.volume_backend_name
self.driver.common._get_volume_stats = mock.Mock(
return_value=GET_VOLUME_STATS_RESPONSE,
)
result = self.driver._update_volume_stats()
self.driver.common._get_volume_stats.assert_called_with(
self.conf.san_ip)
self.assertEqual(backend_name,
self.driver.stats['volume_backend_name'])
self.assertEqual('iSCSI',
self.driver.stats['storage_protocol'])
self.assertTrue(result is None)
def test_initialize_connection(self):
lun_id = 1
response = {'success': True, 'msg': 'None'}
conf = {
'client.create_client.return_value': response,
'client.create_iscsi_target.return_value': response,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver._get_iqn = mock.Mock(return_value=TARGET)
self.driver._export_lun = mock.Mock(return_value=lun_id)
props = self.driver.initialize_connection(VOLUME, CONNECTOR)
self.driver._export_lun.assert_called_with(VOLUME, TARGET, CONNECTOR)
self.assertEqual(props['driver_volume_type'], "iscsi")
self.assertEqual(props['data']['target_discovered'], False)
self.assertEqual(props['data']['target_iqn'], TARGET)
self.assertEqual(props['data']['target_lun'], lun_id)
self.assertEqual(props['data']['volume_id'], VOLUME['id'])
self.assertEqual(props['data']['access_mode'], 'rw')
def test_terminate_connection(self):
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver._get_iqn = mock.Mock(return_value=TARGET)
self.driver._unexport_lun = mock.Mock()
result = self.driver.terminate_connection(VOLUME, CONNECTOR)
self.driver._unexport_lun.assert_called_with(VOLUME, TARGET, CONNECTOR)
self.assertEqual(result, None)
def test_export_lun(self):
lun_id = '1'
response = {'success': True, 'msg': 'Assign device successfully'}
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver.common._send_cmd_and_verify = mock.Mock(
return_value=response)
self.driver._get_lun_id = mock.Mock(return_value=lun_id)
result = self.driver._export_lun(VOLUME, TARGET, CONNECTOR)
self.driver.common._send_cmd_and_verify.assert_called_with(
self.driver.common.vmem_mg.lun.assign_lun_to_iscsi_target,
self.driver._is_lun_id_ready,
'Assign device successfully',
[VOLUME['id'], TARGET],
[VOLUME['id'], CONNECTOR['host']])
self.driver._get_lun_id.assert_called_with(
VOLUME['id'], CONNECTOR['host'])
self.assertEqual(lun_id, result)
def test_export_lun_fails_with_exception(self):
lun_id = '1'
response = {'success': False, 'msg': 'Generic error'}
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver.common._send_cmd_and_verify = mock.Mock(
side_effect=v7000_common.ViolinBackendErr(response['msg']))
self.driver._get_lun_id = mock.Mock(return_value=lun_id)
self.assertRaises(v7000_common.ViolinBackendErr,
self.driver._export_lun,
VOLUME, TARGET, CONNECTOR)
def test_unexport_lun(self):
response = {'success': True, 'msg': 'Unassign device successfully'}
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver.common._send_cmd = mock.Mock(
return_value=response)
result = self.driver._unexport_lun(VOLUME, TARGET, CONNECTOR)
self.driver.common._send_cmd.assert_called_with(
self.driver.common.vmem_mg.lun.unassign_lun_from_iscsi_target,
"Unassign device successfully",
VOLUME['id'], TARGET, True)
self.assertTrue(result is None)
def test_is_lun_id_ready(self):
lun_id = '1'
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver._get_lun_id = mock.Mock(return_value=lun_id)
result = self.driver._is_lun_id_ready(
VOLUME['id'], CONNECTOR['host'])
self.assertEqual(True, result)
def test_get_lun_id(self):
conf = {
'client.get_client_info.return_value': CLIENT_INFO,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
result = self.driver._get_lun_id(VOLUME['id'], CONNECTOR['host'])
self.assertEqual(8, result)
|
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from datetime import datetime, timedelta
from django.http import HttpRequest
from django.utils.timezone import utc
from django.test import TestCase
from guardian.models import User
from guardian.shortcuts import assign_perm
from nose.tools import assert_equals
from cardbox.card_model import Card
from cardbox.deck_model import Deck
from deckglue.models import DelayablePractice
from deckglue.views import PracticeDeckList, next_practice_item
def delayCard(dpid):
dp = DelayablePractice.objects.get(id=dpid)
dp.next_practice = datetime.utcnow().replace(tzinfo=utc) + timedelta(minutes=1000)
dp.save()
return dp
def makeCardDue( dpid, in_minutes):
dp = DelayablePractice.objects.get(id=dpid)
dp.next_practice = datetime.utcnow().replace(tzinfo=utc) - timedelta(minutes=in_minutes)
dp.save()
return dp
class PracticeDeckListTest(TestCase):
"""
This is the PracticeDeckList TestSuite
"""
fixtures = ['test.json']
def test_get_query_no_decks(self):
"""Check whether no decks are returned when none should be returned"""
testuser = User.objects.create(username="TestUser", password="1234")
dl = PracticeDeckList()
dl.request = HttpRequest()
dl.request.user = testuser
lod = dl.get_queryset()
assert_equals(lod.count(),0)
def test_get_query_one_deck(self):
"""Check whether decks are returned when they should be returned"""
dl = PracticeDeckList()
dl.request = HttpRequest()
dl.request.user = User.objects.get(username="zirror")
lod = dl.get_queryset()
assert_equals(lod.count(),1)
def test_get_query_percentage_due_0_no_cards(self):
"""Check whether percentage of cards due is 0 when there are no cards in deck"""
testuser = User.objects.create(username="TestUser", password="1234")
testdeck = Deck.objects.create(ID=3, title="Test-Deck", description="test")
assign_perm('view_deck',testuser,testdeck)
dl = PracticeDeckList()
dl.request = HttpRequest()
dl.request.user = testuser
lod = dl.get_queryset()
assert_equals(lod[0].due_percentage,0)
def test_get_query_percentage_due_0_all_cards_learned(self):
"""Check whether percentage of cards due is 0 when all cards have been learned recently"""
dl = PracticeDeckList()
dl.request = HttpRequest()
dl.request.user = User.objects.get(username="zirror")
delayCard(1)
delayCard(2)
lod = dl.get_queryset()
assert_equals(lod[0].due_percentage,0)
def test_get_query_percentage_due_100_all_cards_due(self):
"""Check whether percentage of cards due is 100 when all cards are due"""
dl = PracticeDeckList()
dl.request = HttpRequest()
dl.request.user = User.objects.get(username="zirror")
lod = dl.get_queryset()
assert_equals(lod[0].due_percentage,100)
class NextPracticeTemplateTest(TestCase):
"""
Next Practice Template Text Suite
"""
fixtures = ['test.json']
def getNextPracticeTemplate(self):
npi = next_practice_item()
npi.template_name = "learning/learn_item.html"
npi.request = HttpRequest()
npi.request.user = User.objects.get(username="zirror")
return npi
def test_get_cards_when_cards_due(self):
"""Returns response for learning when cards are due"""
npi = npi = self.getNextPracticeTemplate()
makeCardDue(1,10)
itms = npi.get(npi.request,deck_id=1)
text = str(itms.render())
assert_equals("learning" in text, True)
def test_get_cards_when_force(self):
"""Returns no carsds left for learning when none are due"""
npi = self.getNextPracticeTemplate()
delayCard(1)
delayCard(2)
itms = npi.get(npi.request,deck_id=1)
assert_equals("Herzlichen" in str(itms), True)
def test_get_cards_when_force(self):
"""Returns response for learning when force mode but no cards"""
npi = self.getNextPracticeTemplate()
delayCard(1)
delayCard(2)
itms = npi.get(npi.request,deck_id=1,force=True)
text = str(itms.render())
assert_equals("learning" in text, True)
class SignalTests(TestCase):
"""
This is the Signal TestSuite
"""
fixtures = ['test.json']
def setUp(self):
pass
def test_create_card_signal(self):
"""Check If Practice objects get created via signal"""
user = User.objects.get(username="zirror")
delaypractice = DelayablePractice()
alldue = delaypractice.get_all_due_for_user(user)
assert_equals(alldue.count(), 2)
def test_delete_card_signal(self):
"""Check If Practice objects get deleted via signal when card is deleted"""
user = User.objects.get(username="zirror")
card = Card.objects.get(ID=1)
card.delete()
delaypractice = DelayablePractice()
alldue = delaypractice.get_all_due_for_user(user)
assert_equals(alldue.count(), 1)
def test_delete_user_signal(self):
"""Check If Practice objects get deleted via signal when user is deleted"""
user = User.objects.get(username="zirror")
user.delete()
allDps = DelayablePractice.objects.all()
assert_equals(allDps.count(), 0)
class DelayablePracticeTest(TestCase):
"""
This is the DelayablePractice TestSuite
"""
fixtures = ['test.json']
def setUp(self):
pass
def test_get_all_due_in_card_id_list(self):
"""Check If Get_all_due_in_card_id_list only returns due cards"""
user = User.objects.get(username="zirror")
dp = delayCard(1)
numberOfDueCardsInList = dp.get_all_due_in_card_id_list(user, [1,2]).count()
assert_equals(numberOfDueCardsInList,1)
def test_delay_nothing_if_no_cards_are_due(self):
"""Check If nothing gets delayed when no cards are left due"""
dp_1 = delayCard(1) # make not due
dp_2 = delayCard(2) # make not due
time_1 = dp_1.next_practice
time_2 = dp_2.next_practice
dp_1.delay()
dp_2.delay()
assert_equals(time_1,dp_1.next_practice)
assert_equals(time_2,dp_2.next_practice)
def test_delay_after_latest_card_are_due(self):
"""Check If latest due card is in less than 10 minutes, only delay by that much plus one millisecond is done"""
dp_1 = makeCardDue(1,5) # make due
dp_2 = makeCardDue(2,3) # make due, but not as due
pre_delay = dp_1.next_practice > dp_2.next_practice
dp_1.delay()
assert_equals(pre_delay,False)
assert_equals(dp_1.next_practice > dp_2.next_practice,True)
def test_delay_for_10_min(self):
"""Card gets delayed for 10 minutes, if there is a planned learning after that"""
makeCardDue(1,20) # make very due
dp_2 = makeCardDue(2,25) # make even duer
dp_2.delay()
assert_equals(dp_2.next_practice > datetime.utcnow().replace(tzinfo=utc),True)
|
|
import pytz
import unittest
from datetime import datetime
from freezegun import freeze_time
from hamcrest import assert_that, is_, has_entries, equal_to
from mock import patch, MagicMock
from backdrop.transformers.dispatch import (
entrypoint,
run_transform,
get_query_parameters,
get_or_get_and_create_output_dataset
)
class DispatchTestCase(unittest.TestCase):
@patch('backdrop.transformers.dispatch.AdminAPI')
@patch('backdrop.transformers.dispatch.app')
def test_entrypoint(self, mock_app, mock_adminAPI):
adminAPI_instance = mock_adminAPI.return_value
adminAPI_instance.get_data_set_transforms.return_value = [
{'type': 1}, {'type': 2}
]
adminAPI_instance.get_data_set_by_name.return_value = {
"group": "foo",
"type": "bar",
}
earliest = datetime(2014, 12, 10, 12, 00, 00, tzinfo=pytz.utc)
latest = datetime(2014, 12, 14, 12, 00, 00, tzinfo=pytz.utc)
entrypoint('dataset123', earliest, latest)
assert_that(mock_app.send_task.call_count, is_(2))
mock_app.send_task.assert_any_call(
'backdrop.transformers.dispatch.run_transform',
args=(
{"group": "foo", "type": "bar"},
{'type': 1},
earliest,
latest))
mock_app.send_task.assert_any_call(
'backdrop.transformers.dispatch.run_transform',
args=(
{"group": "foo", "type": "bar"},
{'type': 2},
earliest,
latest))
@patch('backdrop.transformers.dispatch.AdminAPI')
@patch('backdrop.transformers.dispatch.DataSet')
@patch('backdrop.transformers.tasks.debug.logging')
@freeze_time('2014-12-14')
def test_run_transform(
self,
mock_logging_task,
mock_data_set,
mock_adminAPI):
mock_logging_task.return_value = [{'new-data': 'point'}]
adminAPI_instance = mock_adminAPI.return_value
adminAPI_instance.get_data_set.return_value = {
"bearer_token": "foo2",
}
data_set_instance = MagicMock()
data_set_instance.get.return_value = {
'data': [
{'data': 'point'},
],
}
mock_data_set.from_group_and_type.return_value = data_set_instance
earliest = datetime(2014, 12, 10, 12, 00, 00, tzinfo=pytz.utc)
latest = datetime(2014, 12, 14, 12, 00, 00, tzinfo=pytz.utc)
run_transform({
'data_group': 'group',
'data_type': 'type',
'token': 'foo',
}, {
'type': {
'function': 'backdrop.transformers.tasks.debug.logging',
},
'query-parameters': {
'period': 'day',
},
'options': {},
'output': {
'data-group': 'other-group',
'data-type': 'other-type',
},
}, earliest, latest)
mock_data_set.from_group_and_type.assert_any_call(
'http://backdrop/data', 'group', 'type',
)
data_set_instance.get.assert_called_with(
query_parameters={
'period': 'day',
'flatten': 'true',
'start_at': '2014-12-10T00:00:00+00:00',
'end_at': '2014-12-14T00:00:00+00:00',
'inclusive': 'true',
},
)
mock_data_set.from_group_and_type.assert_any_call(
'http://backdrop/data', 'other-group', 'other-type', token='foo2',
)
data_set_instance.post.assert_called_with([{'new-data': 'point'}])
@patch('backdrop.transformers.dispatch.AdminAPI')
@patch('backdrop.transformers.dispatch.DataSet')
@patch('backdrop.transformers.tasks.debug.logging')
@freeze_time('2014-12-14')
def test_run_transform_applies_additional_fields(
self,
mock_logging_task,
mock_data_set,
mock_adminAPI):
mock_logging_task.return_value = [{'new-data': 'point'}]
adminAPI_instance = mock_adminAPI.return_value
adminAPI_instance.get_data_set.return_value = {
"bearer_token": "foo2",
}
data_set_instance = MagicMock()
data_set_instance.get.return_value = {
'data': [
{'data': 'point'},
],
}
mock_data_set.from_group_and_type.return_value = data_set_instance
earliest = datetime(2014, 12, 10, 12, 00, 00, tzinfo=pytz.utc)
latest = datetime(2014, 12, 14, 12, 00, 00, tzinfo=pytz.utc)
run_transform({
'data_group': 'group',
'data_type': 'type',
'token': 'foo',
}, {
'type': {
'function': 'backdrop.transformers.tasks.debug.logging',
},
'query-parameters': {
'period': 'day',
},
'options': {
'additionalFields': {
'foo': 'bar',
}
},
'output': {
'data-group': 'other-group',
'data-type': 'other-type',
},
}, earliest, latest)
mock_data_set.from_group_and_type.assert_any_call(
'http://backdrop/data', 'group', 'type',
)
data_set_instance.get.assert_called_with(
query_parameters={
'period': 'day',
'flatten': 'true',
'start_at': '2014-12-10T00:00:00+00:00',
'end_at': '2014-12-14T00:00:00+00:00',
'inclusive': 'true',
},
)
mock_data_set.from_group_and_type.assert_any_call(
'http://backdrop/data', 'other-group', 'other-type', token='foo2',
)
data_set_instance.post.assert_called_with([{
'_id': 'X2ZvbzpiYXI=',
'new-data': 'point',
'foo': 'bar'
}])
@patch('backdrop.transformers.dispatch.AdminAPI')
@patch('backdrop.transformers.dispatch.DataSet')
@patch('backdrop.transformers.tasks.debug.logging')
def test_run_transform_no_output_group(
self,
mock_logging_task,
mock_data_set,
mock_adminAPI):
mock_logging_task.return_value = [{'new-data': 'point'}]
adminAPI_instance = mock_adminAPI.return_value
adminAPI_instance.get_data_set.return_value = {
"bearer_token": "foo2",
}
data_set_instance = MagicMock()
data_set_instance.get.return_value = {
'data': [
{'data': 'point'},
],
}
mock_data_set.from_group_and_type.return_value = data_set_instance
earliest = datetime(2014, 12, 10, 12, 00, 00, tzinfo=pytz.utc)
latest = datetime(2014, 12, 14, 12, 00, 00, tzinfo=pytz.utc)
run_transform({
'data_group': 'group',
'data_type': 'type',
'bearer_token': 'foo',
}, {
'type': {
'function': 'backdrop.transformers.tasks.debug.logging',
},
'query-parameters': {
'period': 'day',
},
'options': {},
'output': {
'data-type': 'other-type',
},
}, earliest, latest)
mock_data_set.from_group_and_type.assert_any_call(
'http://backdrop/data', 'group', 'other-type', token='foo2',
)
@patch('backdrop.transformers.dispatch.AdminAPI')
@patch('backdrop.transformers.dispatch.DataSet')
def test_get_or_get_and_create_dataset_when_data_set_exists(
self,
mock_data_set,
mock_adminAPI):
transform_config = {
'output': {
'data-group': 'floop',
'data-type': 'wibble'
}
}
input_dataset_config = {
"bearer_token": "foo2",
'data_group': 'loop',
'data_type': 'flibble'
}
adminAPI_instance = mock_adminAPI.return_value
adminAPI_instance.get_data_set.return_value = {
"bearer_token": "foo2",
}
data_set_instance = MagicMock()
data_set_instance.get.return_value = {
'data': [
{'data': 'point'},
],
}
mock_data_set.from_group_and_type.return_value = data_set_instance
output_data_set = get_or_get_and_create_output_dataset(
transform_config,
input_dataset_config)
assert_that(output_data_set, equal_to(data_set_instance))
adminAPI_instance.get_data_set.assert_called_once_with(
'floop',
'wibble'
)
mock_data_set.from_group_and_type.assert_called_once_with(
'http://backdrop/data', 'floop', 'wibble', token='foo2',
)
@patch('backdrop.transformers.dispatch.AdminAPI')
@patch('backdrop.transformers.dispatch.DataSet')
def test_get_and_get_or_create_dataset_when_get_finds_nothing(
self,
mock_data_set,
mock_adminAPI):
transform_config = {
'output': {
'data-group': 'floop',
'data-type': 'wibble'
}
}
input_dataset_config = {
'name': 'loop_flibble',
'bearer_token': 'foo2',
'data_group': 'loop',
'data_type': 'flibble',
'realtime': False,
'auto_ids': 'aa,bb',
'max_age_expected': 86400,
'upload_filters': 'backdrop.filter.1',
'queryable': True,
'upload_format': '',
'raw_queries_allowed': True,
'published': False
}
adminAPI_instance = mock_adminAPI.return_value
adminAPI_instance.get_data_set.return_value = None
adminAPI_instance = mock_adminAPI.return_value
adminAPI_instance.create_data_set.return_value = {
'bearer_token': 'foo2',
'data_group': 'floop',
'data_type': 'wibble'
}
data_set_instance = MagicMock()
data_set_instance.get.return_value = {
'data': [
{'data': 'point'},
],
}
mock_data_set.from_group_and_type.return_value = data_set_instance
output_data_set = get_or_get_and_create_output_dataset(
transform_config,
input_dataset_config)
assert_that(output_data_set, equal_to(data_set_instance))
adminAPI_instance.create_data_set.assert_called_once_with({
'data_group': 'floop',
'data_type': 'wibble',
'bearer_token': 'foo2',
'realtime': False,
'max_age_expected': 86400,
'published': False
})
mock_data_set.from_group_and_type.assert_called_once_with(
'http://backdrop/data', 'floop', 'wibble', token='foo2',
)
class GetQueryParametersTestCase(unittest.TestCase):
def test_same_timestamps_period(self):
earliest = datetime(2014, 12, 14, 12, 00, 00, tzinfo=pytz.utc)
latest = datetime(2014, 12, 14, 12, 00, 00, tzinfo=pytz.utc)
transform = {
'query-parameters': {
'period': 'week',
}
}
query_parameters = get_query_parameters(transform, earliest, latest)
assert_that(query_parameters, has_entries({
'period': 'week',
'duration': 1,
'start_at': '2014-12-14T12:00:00+00:00',
}))
def test_same_timestamps_non_period(self):
earliest = datetime(2014, 12, 14, 12, 00, 00, tzinfo=pytz.utc)
latest = datetime(2014, 12, 14, 12, 00, 00, tzinfo=pytz.utc)
transform = {
'query-parameters': {
}
}
query_parameters = get_query_parameters(transform, earliest, latest)
assert_that(query_parameters, has_entries({
'start_at': '2014-12-14T12:00:00+00:00',
'end_at': '2014-12-14T12:00:00+00:00',
'inclusive': 'true',
}))
@freeze_time('2015-02-18')
def test_period_no_intra_week(self):
earliest = datetime(2015, 2, 10, 12, 00, 00, tzinfo=pytz.utc)
latest = datetime(2015, 2, 17, 12, 00, 00, tzinfo=pytz.utc)
transform = {
'query-parameters': {
'period': 'week',
}
}
query_parameters = get_query_parameters(transform, earliest, latest)
assert_that(query_parameters, has_entries({
'start_at': '2015-02-09T00:00:00+00:00',
'end_at': '2015-02-16T00:00:00+00:00',
'inclusive': 'true',
}))
@freeze_time('2015-02-24')
def test_period(self):
earliest = datetime(2015, 2, 10, 12, 00, 00, tzinfo=pytz.utc)
latest = datetime(2015, 2, 17, 12, 00, 00, tzinfo=pytz.utc)
transform = {
'query-parameters': {
'period': 'week',
}
}
query_parameters = get_query_parameters(transform, earliest, latest)
assert_that(query_parameters, has_entries({
'start_at': '2015-02-09T00:00:00+00:00',
'end_at': '2015-02-23T00:00:00+00:00',
'inclusive': 'true',
}))
|
|
import pytest
import socket
from aiohttp.parsers import StreamWriter, CORK
from unittest import mock
# nodelay
def test_nodelay_default(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
reader = mock.Mock()
writer = StreamWriter(transport, proto, reader, loop)
assert not writer.tcp_nodelay
assert not s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
def test_set_nodelay_no_change(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
reader = mock.Mock()
writer = StreamWriter(transport, proto, reader, loop)
writer.set_tcp_nodelay(False)
assert not writer.tcp_nodelay
assert not s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
def test_set_nodelay_enable(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
reader = mock.Mock()
writer = StreamWriter(transport, proto, reader, loop)
writer.set_tcp_nodelay(True)
assert writer.tcp_nodelay
assert s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
def test_set_nodelay_enable_and_disable(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
reader = mock.Mock()
writer = StreamWriter(transport, proto, reader, loop)
writer.set_tcp_nodelay(True)
writer.set_tcp_nodelay(False)
assert not writer.tcp_nodelay
assert not s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
def test_set_nodelay_enable_ipv6(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
reader = mock.Mock()
writer = StreamWriter(transport, proto, reader, loop)
writer.set_tcp_nodelay(True)
assert writer.tcp_nodelay
assert s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
@pytest.mark.skipif(not hasattr(socket, 'AF_UNIX'),
reason="requires unix sockets")
def test_set_nodelay_enable_unix(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
reader = mock.Mock()
writer = StreamWriter(transport, proto, reader, loop)
writer.set_tcp_nodelay(True)
assert writer.tcp_nodelay
def test_set_nodelay_enable_no_socket(loop):
transport = mock.Mock()
transport.get_extra_info.return_value = None
proto = mock.Mock()
reader = mock.Mock()
writer = StreamWriter(transport, proto, reader, loop)
writer.set_tcp_nodelay(True)
assert writer.tcp_nodelay
assert writer._socket is None
# cork
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_cork_default(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
reader = mock.Mock()
writer = StreamWriter(transport, proto, reader, loop)
assert not writer.tcp_cork
assert not s.getsockopt(socket.IPPROTO_TCP, CORK)
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_no_change(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
reader = mock.Mock()
writer = StreamWriter(transport, proto, reader, loop)
writer.set_tcp_cork(False)
assert not writer.tcp_cork
assert not s.getsockopt(socket.IPPROTO_TCP, CORK)
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_enable(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
reader = mock.Mock()
writer = StreamWriter(transport, proto, reader, loop)
writer.set_tcp_cork(True)
assert writer.tcp_cork
assert s.getsockopt(socket.IPPROTO_TCP, CORK)
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_enable_and_disable(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
reader = mock.Mock()
writer = StreamWriter(transport, proto, reader, loop)
writer.set_tcp_cork(True)
writer.set_tcp_cork(False)
assert not writer.tcp_cork
assert not s.getsockopt(socket.IPPROTO_TCP, CORK)
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_enable_ipv6(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
reader = mock.Mock()
writer = StreamWriter(transport, proto, reader, loop)
writer.set_tcp_cork(True)
assert writer.tcp_cork
assert s.getsockopt(socket.IPPROTO_TCP, CORK)
@pytest.mark.skipif(not hasattr(socket, 'AF_UNIX'),
reason="requires unix sockets")
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_enable_unix(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
reader = mock.Mock()
writer = StreamWriter(transport, proto, reader, loop)
writer.set_tcp_cork(True)
assert writer.tcp_cork
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_cork_enable_no_socket(loop):
transport = mock.Mock()
transport.get_extra_info.return_value = None
proto = mock.Mock()
reader = mock.Mock()
writer = StreamWriter(transport, proto, reader, loop)
writer.set_tcp_cork(True)
assert writer.tcp_cork
assert writer._socket is None
# cork and nodelay interference
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_enabling_cork_disables_nodelay(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
reader = mock.Mock()
writer = StreamWriter(transport, proto, reader, loop)
writer.set_tcp_nodelay(True)
writer.set_tcp_cork(True)
assert not writer.tcp_nodelay
assert not s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
assert writer.tcp_cork
assert s.getsockopt(socket.IPPROTO_TCP, CORK)
@pytest.mark.skipif(CORK is None, reason="TCP_CORK or TCP_NOPUSH required")
def test_set_enabling_nodelay_disables_cork(loop):
transport = mock.Mock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
transport.get_extra_info.return_value = s
proto = mock.Mock()
reader = mock.Mock()
writer = StreamWriter(transport, proto, reader, loop)
writer.set_tcp_cork(True)
writer.set_tcp_nodelay(True)
assert writer.tcp_nodelay
assert s.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
assert not writer.tcp_cork
assert not s.getsockopt(socket.IPPROTO_TCP, CORK)
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class QueryTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.autopilot.v1.assistants("UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queries("UHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://autopilot.twilio.com/v1/Assistants/UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Queries/UHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"language": "language",
"date_created": "2015-07-30T20:00:00Z",
"model_build_sid": "UGaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"query": "query",
"date_updated": "2015-07-30T20:00:00Z",
"status": "status",
"sample_sid": "UFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"assistant_sid": "UAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"results": {
"task": "name",
"fields": [
{
"name": "name",
"value": "value",
"type": "type"
}
]
},
"url": "https://autopilot.twilio.com/v1/Assistants/UAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queries/UHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "UHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"source_channel": "voice",
"dialogue_sid": "UKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.autopilot.v1.assistants("UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queries("UHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.autopilot.v1.assistants("UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queries.list()
self.holodeck.assert_has_request(Request(
'get',
'https://autopilot.twilio.com/v1/Assistants/UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Queries',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"queries": [],
"meta": {
"previous_page_url": null,
"next_page_url": null,
"first_page_url": "https://autopilot.twilio.com/v1/Assistants/UAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queries?Status=status&ModelBuild=UGaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&DialogueSid=UKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&Language=language&PageSize=50&Page=0",
"page": 0,
"key": "queries",
"url": "https://autopilot.twilio.com/v1/Assistants/UAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queries?Status=status&ModelBuild=UGaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&DialogueSid=UKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&Language=language&PageSize=50&Page=0",
"page_size": 50
}
}
'''
))
actual = self.client.autopilot.v1.assistants("UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queries.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"queries": [
{
"language": "language",
"date_created": "2015-07-30T20:00:00Z",
"model_build_sid": "UGaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"query": "query",
"date_updated": "2015-07-30T20:00:00Z",
"status": "status",
"sample_sid": "UFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"assistant_sid": "UAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"results": {
"task": "name",
"fields": [
{
"name": "name",
"value": "value",
"type": "type"
}
]
},
"url": "https://autopilot.twilio.com/v1/Assistants/UAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queries/UHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "UHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"source_channel": null,
"dialogue_sid": "UKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
],
"meta": {
"previous_page_url": null,
"next_page_url": null,
"first_page_url": "https://autopilot.twilio.com/v1/Assistants/UAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queries?Status=status&ModelBuild=UGaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&DialogueSid=UKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&Language=language&PageSize=50&Page=0",
"page": 0,
"key": "queries",
"url": "https://autopilot.twilio.com/v1/Assistants/UAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queries?Status=status&ModelBuild=UGaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&DialogueSid=UKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa&Language=language&PageSize=50&Page=0",
"page_size": 50
}
}
'''
))
actual = self.client.autopilot.v1.assistants("UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queries.list()
self.assertIsNotNone(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.autopilot.v1.assistants("UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queries.create(language="language", query="query")
values = {'Language': "language", 'Query': "query", }
self.holodeck.assert_has_request(Request(
'post',
'https://autopilot.twilio.com/v1/Assistants/UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Queries',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"language": "language",
"date_created": "2015-07-30T20:00:00Z",
"model_build_sid": "UGaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"query": "query",
"date_updated": "2015-07-30T20:00:00Z",
"status": "status",
"sample_sid": "UFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"assistant_sid": "UAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"results": {
"task": "name",
"fields": [
{
"name": "name",
"value": "value",
"type": "type"
}
]
},
"url": "https://autopilot.twilio.com/v1/Assistants/UAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queries/UHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "UHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"source_channel": "voice",
"dialogue_sid": null
}
'''
))
actual = self.client.autopilot.v1.assistants("UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queries.create(language="language", query="query")
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.autopilot.v1.assistants("UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queries("UHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://autopilot.twilio.com/v1/Assistants/UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Queries/UHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"language": "language",
"date_created": "2015-07-30T20:00:00Z",
"model_build_sid": "UGaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"query": "query",
"date_updated": "2015-07-30T20:00:00Z",
"status": "status",
"sample_sid": "UFaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"assistant_sid": "UAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"results": {
"task": "name",
"fields": [
{
"name": "name",
"value": "value",
"type": "type"
}
]
},
"url": "https://autopilot.twilio.com/v1/Assistants/UAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queries/UHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "UHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"source_channel": "sms",
"dialogue_sid": "UKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
'''
))
actual = self.client.autopilot.v1.assistants("UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queries("UHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.autopilot.v1.assistants("UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queries("UHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://autopilot.twilio.com/v1/Assistants/UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Queries/UHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.autopilot.v1.assistants("UAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queries("UHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
|
|
"""
Notes
-----
Important attributes of continuous (order > 0) :class:`Field` and
:class:`SurfaceField` instances:
- `vertex_remap` : `econn[:, :n_vertex] = vertex_remap[conn]`
- `vertex_remap_i` : `conn = vertex_remap_i[econn[:, :n_vertex]]`
where `conn` is the mesh vertex connectivity, `econn` is the
region-local field connectivity.
"""
import numpy as nm
from sfepy.base.base import output, get_default, assert_
from sfepy.base.base import Struct
import fea
from sfepy.discrete.common.fields import parse_shape, Field
from sfepy.discrete.fem.mesh import Mesh
from sfepy.discrete.fem.meshio import convert_complex_output
from sfepy.discrete.fem.utils import (extend_cell_data, prepare_remap,
invert_remap, get_min_value)
from sfepy.discrete.fem.fe_surface import FESurface
from sfepy.discrete.integrals import Integral
from sfepy.discrete.fem.linearizer import (get_eval_dofs, get_eval_coors,
create_output)
def get_eval_expression(expression,
fields, materials, variables,
functions=None, mode='eval', term_mode=None,
extra_args=None, verbose=True, kwargs=None):
"""
Get the function for evaluating an expression given a list of elements,
and reference element coordinates.
"""
from sfepy.discrete.evaluate import eval_in_els_and_qp
def _eval(iels, coors):
val = eval_in_els_and_qp(expression, iels, coors,
fields, materials, variables,
functions=functions, mode=mode,
term_mode=term_mode,
extra_args=extra_args, verbose=verbose,
kwargs=kwargs)
return val[..., 0]
return _eval
def create_expression_output(expression, name, primary_field_name,
fields, materials, variables,
functions=None, mode='eval', term_mode=None,
extra_args=None, verbose=True, kwargs=None,
min_level=0, max_level=1, eps=1e-4):
"""
Create output mesh and data for the expression using the adaptive
linearizer.
Parameters
----------
expression : str
The expression to evaluate.
name : str
The name of the data.
primary_field_name : str
The name of field that defines the element groups and polynomial
spaces.
fields : dict
The dictionary of fields used in `variables`.
materials : Materials instance
The materials used in the expression.
variables : Variables instance
The variables used in the expression.
functions : Functions instance, optional
The user functions for materials etc.
mode : one of 'eval', 'el_avg', 'qp'
The evaluation mode - 'qp' requests the values in quadrature points,
'el_avg' element averages and 'eval' means integration over
each term region.
term_mode : str
The term call mode - some terms support different call modes
and depending on the call mode different values are
returned.
extra_args : dict, optional
Extra arguments to be passed to terms in the expression.
verbose : bool
If False, reduce verbosity.
kwargs : dict, optional
The variables (dictionary of (variable name) : (Variable
instance)) to be used in the expression.
min_level : int
The minimum required level of mesh refinement.
max_level : int
The maximum level of mesh refinement.
eps : float
The relative tolerance parameter of mesh adaptivity.
Returns
-------
out : dict
The output dictionary.
"""
field = fields[primary_field_name]
vertex_coors = field.coors[:field.n_vertex_dof, :]
ap = field.ap
ps = ap.interp.poly_spaces['v']
gps = ap.interp.gel.interp.poly_spaces['v']
vertex_conn = ap.econn[:, :field.gel.n_vertex]
eval_dofs = get_eval_expression(expression,
fields, materials, variables,
functions=functions,
mode=mode, extra_args=extra_args,
verbose=verbose, kwargs=kwargs)
eval_coors = get_eval_coors(vertex_coors, vertex_conn, gps)
(level, coors, conn,
vdofs, mat_ids) = create_output(eval_dofs, eval_coors,
vertex_conn.shape[0], ps,
min_level=min_level,
max_level=max_level, eps=eps)
mesh = Mesh.from_data('linearized_mesh', coors, None, [conn], [mat_ids],
field.domain.mesh.descs)
out = {}
out[name] = Struct(name='output_data', mode='vertex',
data=vdofs, var_name=name, dofs=None,
mesh=mesh, level=level)
out = convert_complex_output(out)
return out
class FEField(Field):
"""
Base class for finite element fields.
Notes
-----
- interps and hence node_descs are per region (must have single
geometry!)
Field shape information:
- ``shape`` - the shape of the base functions in a point
- ``n_components`` - the number of DOFs per FE node
- ``val_shape`` - the shape of field value (the product of DOFs and
base functions) in a point
"""
def __init__(self, name, dtype, shape, region, approx_order=1):
"""
Create a finite element field.
Parameters
----------
name : str
The field name.
dtype : numpy.dtype
The field data type: float64 or complex128.
shape : int/tuple/str
The field shape: 1 or (1,) or 'scalar', space dimension (2, or (2,)
or 3 or (3,)) or 'vector', or a tuple. The field shape determines
the shape of the FE base functions and is related to the number of
components of variables and to the DOF per node count, depending
on the field kind.
region : Region
The region where the field is defined.
approx_order : int or tuple
The FE approximation order. The tuple form is (order, has_bubble),
e.g. (1, True) means order 1 with a bubble function.
Notes
-----
Assumes one cell type for the whole region!
"""
shape = parse_shape(shape, region.domain.shape.dim)
if not self._check_region(region):
raise ValueError('unsuitable region for field %s! (%s)' %
(name, region.name))
Struct.__init__(self, name=name, dtype=dtype, shape=shape,
region=region)
self.domain = self.region.domain
self._set_approx_order(approx_order)
self._setup_geometry()
self._setup_kind()
self._setup_shape()
self._create_interpolant()
self._setup_approximations()
self._setup_global_base()
self.setup_coors()
self.clear_mappings(clear_all=True)
def _set_approx_order(self, approx_order):
"""
Set a uniform approximation order.
"""
if isinstance(approx_order, tuple):
self.approx_order = approx_order[0]
self.force_bubble = approx_order[1]
else:
self.approx_order = approx_order
self.force_bubble = False
def _create_interpolant(self):
name = '%s_%s_%s_%d%s' % (self.gel.name, self.space,
self.poly_space_base, self.approx_order,
'B' * self.force_bubble)
self.interp = fea.Interpolant(name, self.gel, self.space,
self.poly_space_base, self.approx_order,
self.force_bubble)
def get_true_order(self):
"""
Get the true approximation order depending on the reference
element geometry.
For example, for P1 (linear) approximation the true order is 1,
while for Q1 (bilinear) approximation in 2D the true order is 2.
"""
gel = self.gel
if (gel.dim + 1) == gel.n_vertex:
order = self.approx_order
else:
order = gel.dim * self.approx_order
if self.force_bubble:
bubble_order = gel.dim + 1
order = max(order, bubble_order)
return order
def is_higher_order(self):
"""
Return True, if the field's approximation order is greater than one.
"""
return self.force_bubble or (self.approx_order > 1)
def _setup_global_base(self):
"""
Setup global DOF/base functions, their indices and connectivity of the
field. Called methods implemented in subclasses.
"""
self._setup_facet_orientations()
self._init_econn()
self.n_vertex_dof, self.vertex_remap = self._setup_vertex_dofs()
self.vertex_remap_i = invert_remap(self.vertex_remap)
aux = self._setup_edge_dofs()
self.n_edge_dof, self.edge_dofs, self.edge_remap = aux
aux = self._setup_face_dofs()
self.n_face_dof, self.face_dofs, self.face_remap = aux
aux = self._setup_bubble_dofs()
self.n_bubble_dof, self.bubble_dofs, self.bubble_remap = aux
self.n_nod = self.n_vertex_dof + self.n_edge_dof \
+ self.n_face_dof + self.n_bubble_dof
self._setup_esurface()
def _setup_esurface(self):
"""
Setup extended surface entities (edges in 2D, faces in 3D),
i.e. indices of surface entities into the extended connectivity.
"""
node_desc = self.node_desc
ap = self.ap
gel = ap.interp.gel
ap.efaces = gel.get_surface_entities().copy()
nd = node_desc.edge
if nd is not None:
efs = []
for eof in gel.get_edges_per_face():
efs.append(nm.concatenate([nd[ie] for ie in eof]))
efs = nm.array(efs).squeeze()
if efs.ndim < 2:
efs = efs[:,nm.newaxis]
ap.efaces = nm.hstack((ap.efaces, efs))
efs = node_desc.face
if efs is not None:
efs = nm.array(efs).squeeze()
if efs.ndim < 2:
efs = efs[:,nm.newaxis]
ap.efaces = nm.hstack((ap.efaces, efs))
def setup_coors(self, coors=None):
"""
Setup coordinates of field nodes.
"""
mesh = self.domain.mesh
self.coors = nm.empty((self.n_nod, mesh.dim), nm.float64)
if coors is None:
coors = mesh.coors
# Mesh vertex nodes.
if self.n_vertex_dof:
indx = self.vertex_remap_i
self.coors[:self.n_vertex_dof] = nm.take(coors,
indx.astype(nm.int32),
axis=0)
self.ap.eval_extra_coor(self.coors, coors)
def get_vertices(self):
"""
Return indices of vertices belonging to the field region.
"""
return self.vertex_remap_i
def _get_facet_dofs(self, rfacets, remap, dofs):
facets = remap[rfacets]
return dofs[facets[facets >= 0]].ravel()
def get_data_shape(self, integral, integration='volume', region_name=None):
"""
Get element data dimensions.
Parameters
----------
integral : Integral instance
The integral describing used numerical quadrature.
integration : 'volume', 'plate', 'surface', 'surface_extra' or 'point'
The term integration type.
region_name : str
The name of surface region, required when `shape_kind` is
'surface'.
Returns
-------
data_shape : 4 ints
The `(n_el, n_qp, dim, n_en)` for volume shape kind,
`(n_fa, n_qp, dim, n_fn)` for surface shape kind and
`(n_nod, 0, 0, 1)` for point shape kind.
Notes
-----
- `n_el`, `n_fa` = number of elements/facets
- `n_qp` = number of quadrature points per element/facet
- `dim` = spatial dimension
- `n_en`, `n_fn` = number of element/facet nodes
- `n_nod` = number of element nodes
"""
ap = self.ap
region = self.domain.regions[region_name]
shape = region.shape
dim = region.dim
if integration in ('surface', 'surface_extra'):
sd = ap.surface_data[region_name]
# This works also for surface fields.
key = sd.face_type
weights = ap.get_qp(key, integral).weights
n_qp = weights.shape[0]
if integration == 'surface':
data_shape = (sd.n_fa, n_qp, dim, ap.n_ep[key])
else:
data_shape = (sd.n_fa, n_qp, dim, ap.n_ep['v'])
elif integration in ('volume', 'plate'):
_, weights = integral.get_qp(self.gel.name)
n_qp = weights.shape[0]
data_shape = (shape.n_cell, n_qp, dim, ap.n_ep['v'])
elif integration == 'point':
dofs = self.get_dofs_in_region(region, merge=True)
data_shape = (dofs.shape[0], 0, 0, 1)
else:
raise NotImplementedError('unsupported integration! (%s)'
% integration)
return data_shape
def get_dofs_in_region(self, region, merge=True):
"""
Return indices of DOFs that belong to the given region and group.
"""
node_desc = self.node_desc
dofs = []
vdofs = nm.empty((0,), dtype=nm.int32)
if node_desc.vertex is not None:
vdofs = self.vertex_remap[region.vertices]
vdofs = vdofs[vdofs >= 0]
dofs.append(vdofs)
edofs = nm.empty((0,), dtype=nm.int32)
if node_desc.edge is not None:
edofs = self._get_facet_dofs(region.edges,
self.edge_remap,
self.edge_dofs)
dofs.append(edofs)
fdofs = nm.empty((0,), dtype=nm.int32)
if node_desc.face is not None:
fdofs = self._get_facet_dofs(region.faces,
self.face_remap,
self.face_dofs)
dofs.append(fdofs)
bdofs = nm.empty((0,), dtype=nm.int32)
if (node_desc.bubble is not None) and region.has_cells():
els = self.bubble_remap[region.cells]
bdofs = self.bubble_dofs[els[els >= 0]].ravel()
dofs.append(bdofs)
if merge:
dofs = nm.concatenate(dofs)
return dofs
def extend_dofs(self, dofs, fill_value=None):
"""
Extend DOFs to the whole domain using the `fill_value`, or the
smallest value in `dofs` if `fill_value` is None.
"""
if fill_value is None:
if nm.isrealobj(dofs):
fill_value = get_min_value(dofs)
else:
# Complex values - treat real and imaginary parts separately.
fill_value = get_min_value(dofs.real)
fill_value += 1j * get_min_value(dofs.imag)
if self.approx_order != 0:
indx = self.get_vertices()
n_nod = self.domain.shape.n_nod
new_dofs = nm.empty((n_nod, dofs.shape[1]), dtype=self.dtype)
new_dofs.fill(fill_value)
new_dofs[indx] = dofs[:indx.size]
else:
new_dofs = extend_cell_data(dofs, self.domain, self.region,
val=fill_value)
return new_dofs
def remove_extra_dofs(self, dofs):
"""
Remove DOFs defined in higher order nodes (order > 1).
"""
if self.approx_order != 0:
new_dofs = dofs[:self.n_vertex_dof]
else:
new_dofs = dofs
return new_dofs
def linearize(self, dofs, min_level=0, max_level=1, eps=1e-4):
"""
Linearize the solution for post-processing.
Parameters
----------
dofs : array, shape (n_nod, n_component)
The array of DOFs reshaped so that each column corresponds
to one component.
min_level : int
The minimum required level of mesh refinement.
max_level : int
The maximum level of mesh refinement.
eps : float
The relative tolerance parameter of mesh adaptivity.
Returns
-------
mesh : Mesh instance
The adapted, nonconforming, mesh.
vdofs : array
The DOFs defined in vertices of `mesh`.
levels : array of ints
The refinement level used for each element group.
"""
assert_(dofs.ndim == 2)
n_nod, dpn = dofs.shape
assert_(n_nod == self.n_nod)
assert_(dpn == self.shape[0])
vertex_coors = self.coors[:self.n_vertex_dof, :]
ap = self.ap
ps = ap.interp.poly_spaces['v']
gps = ap.interp.gel.interp.poly_spaces['v']
vertex_conn = ap.econn[:, :self.gel.n_vertex]
eval_dofs = get_eval_dofs(dofs, ap.econn, ps, ori=ap.ori)
eval_coors = get_eval_coors(vertex_coors, vertex_conn, gps)
(level, coors, conn,
vdofs, mat_ids) = create_output(eval_dofs, eval_coors,
vertex_conn.shape[0], ps,
min_level=min_level,
max_level=max_level, eps=eps)
mesh = Mesh.from_data('linearized_mesh', coors, None, [conn], [mat_ids],
self.domain.mesh.descs)
return mesh, vdofs, level
def get_output_approx_order(self):
"""
Get the approximation order used in the output file.
"""
return min(self.approx_order, 1)
def create_output(self, dofs, var_name, dof_names=None,
key=None, extend=True, fill_value=None,
linearization=None):
"""
Convert the DOFs corresponding to the field to a dictionary of
output data usable by Mesh.write().
Parameters
----------
dofs : array, shape (n_nod, n_component)
The array of DOFs reshaped so that each column corresponds
to one component.
var_name : str
The variable name corresponding to `dofs`.
dof_names : tuple of str
The names of DOF components.
key : str, optional
The key to be used in the output dictionary instead of the
variable name.
extend : bool
Extend the DOF values to cover the whole domain.
fill_value : float or complex
The value used to fill the missing DOF values if `extend` is True.
linearization : Struct or None
The linearization configuration for higher order approximations.
Returns
-------
out : dict
The output dictionary.
"""
linearization = get_default(linearization, Struct(kind='strip'))
out = {}
if linearization.kind is None:
out[key] = Struct(name='output_data', mode='full',
data=dofs, var_name=var_name,
dofs=dof_names, field_name=self.name)
elif ((not self.is_higher_order())
or (linearization.kind == 'strip')):
if extend:
ext = self.extend_dofs(dofs, fill_value)
else:
ext = self.remove_extra_dofs(dofs)
if ext is not None:
approx_order = self.get_output_approx_order()
if approx_order != 0:
# Has vertex data.
out[key] = Struct(name='output_data', mode='vertex',
data=ext, var_name=var_name,
dofs=dof_names)
else:
ext.shape = (ext.shape[0], 1, ext.shape[1], 1)
out[key] = Struct(name='output_data', mode='cell',
data=ext, var_name=var_name,
dofs=dof_names)
else:
mesh, vdofs, levels = self.linearize(dofs,
linearization.min_level,
linearization.max_level,
linearization.eps)
out[key] = Struct(name='output_data', mode='vertex',
data=vdofs, var_name=var_name, dofs=dof_names,
mesh=mesh, levels=levels)
out = convert_complex_output(out)
return out
def create_mesh(self, extra_nodes=True):
"""
Create a mesh from the field region, optionally including the field
extra nodes.
"""
mesh = self.domain.mesh
if self.approx_order != 0:
ap = self.ap
if extra_nodes:
conn = ap.econn
else:
conn = ap.econn[:, :self.gel.n_vertex]
conns = [conn]
mat_ids = [mesh.cmesh.cell_groups]
descs = mesh.descs[:1]
if extra_nodes:
coors = self.coors
else:
coors = self.coors[:self.n_vertex_dof]
mesh = Mesh.from_data(self.name, coors, None, conns,
mat_ids, descs)
return mesh
def get_evaluate_cache(self, cache=None, share_geometry=False,
verbose=False):
"""
Get the evaluate cache for :func:`Variable.evaluate_at()
<sfepy.discrete.variables.Variable.evaluate_at()>`.
Parameters
----------
cache : Struct instance, optional
Optionally, use the provided instance to store the cache data.
share_geometry : bool
Set to True to indicate that all the evaluations will work on the
same region. Certain data are then computed only for the first
probe and cached.
verbose : bool
If False, reduce verbosity.
Returns
-------
cache : Struct instance
The evaluate cache.
"""
import time
try:
from scipy.spatial import cKDTree as KDTree
except ImportError:
from scipy.spatial import KDTree
from sfepy.discrete.fem.geometry_element import create_geometry_elements
if cache is None:
cache = Struct(name='evaluate_cache')
tt = time.clock()
if (cache.get('cmesh', None) is None) or not share_geometry:
mesh = self.create_mesh(extra_nodes=False)
cache.cmesh = cmesh = mesh.cmesh
gels = create_geometry_elements()
cmesh.set_local_entities(gels)
cmesh.setup_entities()
cache.centroids = cmesh.get_centroids(cmesh.tdim)
if self.gel.name != '3_8':
cache.normals0 = cmesh.get_facet_normals()
cache.normals1 = None
else:
cache.normals0 = cmesh.get_facet_normals(0)
cache.normals1 = cmesh.get_facet_normals(1)
output('cmesh setup: %f s' % (time.clock()-tt), verbose=verbose)
tt = time.clock()
if (cache.get('kdtree', None) is None) or not share_geometry:
cache.kdtree = KDTree(cmesh.coors)
output('kdtree: %f s' % (time.clock()-tt), verbose=verbose)
return cache
def interp_to_qp(self, dofs):
"""
Interpolate DOFs into quadrature points.
The quadrature order is given by the field approximation order.
Parameters
----------
dofs : array
The array of DOF values of shape `(n_nod, n_component)`.
Returns
-------
data_qp : array
The values interpolated into the quadrature points.
integral : Integral
The corresponding integral defining the quadrature points.
"""
integral = Integral('i', order=self.approx_order)
ap = self.ap
bf = ap.get_base('v', False, integral)
bf = bf[:,0,:].copy()
data_qp = nm.dot(bf, dofs[ap.econn])
data_qp = nm.swapaxes(data_qp, 0, 1)
data_qp.shape = data_qp.shape + (1,)
return data_qp, integral
def get_coor(self, nods=None):
"""
Get coordinates of the field nodes.
Parameters
----------
nods : array, optional
The indices of the required nodes. If not given, the
coordinates of all the nodes are returned.
"""
if nods is None:
return self.coors
else:
return self.coors[nods]
def create_mapping(self, region, integral, integration):
"""
Create a new reference mapping.
"""
out = self.ap.describe_geometry(self, integration, region, integral,
return_mapping=True)
return out
class VolumeField(FEField):
"""
Finite element field base class over volume elements (element dimension
equals space dimension).
"""
def _check_region(self, region):
"""
Check whether the `region` can be used for the
field.
Returns
-------
ok : bool
True if the region is usable for the field.
"""
ok = True
domain = region.domain
if region.kind != 'cell':
output("bad region kind! (is: %r, should be: 'cell')"
% region.kind)
ok = False
elif (region.kind_tdim != domain.shape.tdim):
output('cells with a bad topological dimension! (%d == %d)'
% (region.kind_tdim, domain.shape.tdim))
ok = False
return ok
def _setup_geometry(self):
"""
Setup the field region geometry.
"""
cmesh = self.domain.cmesh
for key, gel in self.domain.geom_els.iteritems():
ct = cmesh.cell_types
if (ct[self.region.cells] == cmesh.key_to_index[gel.name]).all():
self.gel = gel
break
else:
raise ValueError('region %s of field %s contains multiple'
' reference geometries!'
% (self.region.name, self.name))
self.is_surface = False
def _create_interpolant(self):
name = '%s_%s_%s_%d%s' % (self.gel.name, self.space,
self.poly_space_base, self.approx_order,
'B' * self.force_bubble)
self.interp = fea.Interpolant(name, self.gel, self.space,
self.poly_space_base, self.approx_order,
self.force_bubble)
def _setup_approximations(self):
name = self.interp.name + '_%s' % self.region.name
self.ap = fea.Approximation(name, self.interp, self.region)
def _init_econn(self):
"""
Initialize the extended DOF connectivity.
"""
ap = self.ap
n_ep = ap.n_ep['v']
n_cell = self.region.get_n_cells()
ap.econn = nm.zeros((n_cell, n_ep), nm.int32)
def _setup_vertex_dofs(self):
"""
Setup vertex DOF connectivity.
"""
if self.node_desc.vertex is None:
return 0, None
region = self.region
cmesh = self.domain.cmesh
conn, offsets = cmesh.get_incident(0, region.cells, region.tdim,
ret_offsets=True)
vertices = nm.unique(conn)
remap = prepare_remap(vertices, region.n_v_max)
n_dof = vertices.shape[0]
aux = nm.unique(nm.diff(offsets))
assert_(len(aux) == 1, 'region with multiple reference geometries!')
offset = aux[0]
ap = self.ap
# Remap vertex node connectivity to field-local numbering.
aux = conn.reshape((-1, offset)).astype(nm.int32)
ap.econn[:, :offset] = nm.take(remap, aux)
return n_dof, remap
def setup_extra_data(self, geometry, info, is_trace):
dct = info.dc_type.type
if geometry != None:
geometry_flag = 'surface' in geometry
else:
geometry_flag = False
if (dct == 'surface') or (geometry_flag):
reg = info.get_region()
self.domain.create_surface_group(reg)
self._setup_surface_data(reg, is_trace)
elif dct == 'edge':
raise NotImplementedError('dof connectivity type %s' % dct)
elif dct == 'point':
self._setup_point_data(self, info.region)
elif dct not in ('volume', 'scalar', 'plate'):
raise ValueError('unknown dof connectivity type! (%s)' % dct)
def _setup_surface_data(self, region, is_trace=False):
ap = self.ap
if region.name not in ap.surface_data:
ap.setup_surface_data(region)
if region.name in ap.surface_data and is_trace:
sd = ap.surface_data[region.name]
sd.setup_mirror_connectivity(region)
def _setup_point_data(self, field, region):
ap = self.ap
if region.name not in ap.point_data:
ap.setup_point_data(field, region)
def get_econn(self, conn_type, region, is_trace=False, integration=None):
"""
Get extended connectivity of the given type in the given region.
"""
ct = conn_type.type if isinstance(conn_type, Struct) else conn_type
ap = self.ap
if ct in ('volume', 'plate'):
if region.name == self.region.name:
conn = ap.econn
else:
tco = integration in ('volume', 'plate')
cells = region.get_cells(true_cells_only=tco)
ii = self.region.get_cell_indices(cells, true_cells_only=tco)
conn = nm.take(ap.econn, ii, axis=0)
elif ct == 'surface':
sd = ap.surface_data[region.name]
conn = sd.get_connectivity(is_trace=is_trace)
elif ct == 'edge':
raise NotImplementedError('connectivity type %s' % ct)
elif ct == 'point':
conn = ap.point_data[region.name]
else:
raise ValueError('unknown connectivity type! (%s)' % ct)
return conn
def average_qp_to_vertices(self, data_qp, integral):
"""
Average data given in quadrature points in region elements into
region vertices.
.. math::
u_n = \sum_e (u_{e,avg} * volume_e) / \sum_e volume_e
= \sum_e \int_{volume_e} u / \sum volume_e
"""
region = self.region
n_cells = region.get_n_cells()
if n_cells != data_qp.shape[0]:
msg = 'incomatible shape! (%d == %d)' % (n_cells,
data_qp.shape[0])
raise ValueError(msg)
n_vertex = self.n_vertex_dof
nc = data_qp.shape[2]
nod_vol = nm.zeros((n_vertex,), dtype=nm.float64)
data_vertex = nm.zeros((n_vertex, nc), dtype=nm.float64)
ap = self.ap
vg = ap.describe_geometry(self, 'volume', ap.region, integral)
volume = nm.squeeze(vg.volume)
iels = ap.region.get_cells()
data_e = nm.zeros((volume.shape[0], 1, nc, 1), dtype=nm.float64)
vg.integrate(data_e, data_qp[iels])
ir = nm.arange(nc, dtype=nm.int32)
conn = ap.econn[:, :self.gel.n_vertex]
for ii, cc in enumerate(conn):
# Assumes unique nodes in cc!
ind2, ind1 = nm.meshgrid(ir, cc)
data_vertex[ind1,ind2] += data_e[iels[ii],0,:,0]
nod_vol[cc] += volume[ii]
data_vertex /= nod_vol[:,nm.newaxis]
return data_vertex
class SurfaceField(FEField):
"""
Finite element field base class over surface (element dimension is one
less than space dimension).
"""
def _check_region(self, region):
"""
Check whether the `region` can be used for the
field.
Returns
-------
ok : bool
True if the region is usable for the field.
"""
ok = ((region.kind_tdim == (region.tdim - 1))
and (region.get_n_cells(True) > 0))
return ok
def _setup_geometry(self):
"""
Setup the field region geometry.
"""
for key, vgel in self.domain.geom_els.iteritems():
self.gel = vgel.surface_facet
break
if self.gel is None:
raise ValueError('cells with no surface!')
self.is_surface = True
def _create_interpolant(self):
name = '%s_%s_%s_%d%s' % (self.gel.name, self.space,
self.poly_space_base, self.approx_order,
'B' * self.force_bubble)
self.interp = fea.SurfaceInterpolant(name, self.gel, self.space,
self.poly_space_base,
self.approx_order,
self.force_bubble)
def _setup_approximations(self):
name = self.interp.name + '_%s' % self.region.name
self.ap = fea.SurfaceApproximation(name, self.interp, self.region)
def setup_extra_data(self, geometry, info, is_trace):
dct = info.dc_type.type
if dct != 'surface':
msg = "dof connectivity type must be 'surface'! (%s)" % dct
raise ValueError(msg)
reg = info.get_region()
ap = self.ap
if reg.name not in ap.surface_data:
# Defined in setup_vertex_dofs()
msg = 'no surface data of surface field! (%s)' % reg.name
raise ValueError(msg)
if reg.name in ap.surface_data and is_trace:
sd = ap.surface_data[reg.name]
sd.setup_mirror_connectivity(reg)
def _init_econn(self):
"""
Initialize the extended DOF connectivity.
"""
ap = self.ap
n_ep = ap.n_ep['v']
n_cell = self.region.get_n_cells(True)
ap.econn = nm.zeros((n_cell, n_ep), nm.int32)
def _setup_vertex_dofs(self):
"""
Setup vertex DOF connectivity.
"""
if self.node_desc.vertex is None:
return 0, None
region = self.region
remap = prepare_remap(region.vertices, region.n_v_max)
n_dof = region.vertices.shape[0]
ap = self.ap
# Remap vertex node connectivity to field-local numbering.
conn, gel = self.domain.get_conn(ret_gel=True)
faces = gel.get_surface_entities()
aux = FESurface('aux', region, faces, conn)
ap.econn[:, :aux.n_fp] = aux.leconn
ap.surface_data[region.name] = aux
return n_dof, remap
def _setup_bubble_dofs(self):
"""
Setup bubble DOF connectivity.
"""
return 0, None, None
def get_econn(self, conn_type, region, is_trace=False,
integration=None):
"""
Get extended connectivity of the given type in the given region.
"""
ct = conn_type.type if isinstance(conn_type, Struct) else conn_type
if ct != 'surface':
msg = 'connectivity type must be "surface"! (%s)' % ct
raise ValueError(msg)
ap = self.ap
sd = ap.surface_data[region.name]
conn = sd.get_connectivity(local=True, is_trace=is_trace)
return conn
def average_qp_to_vertices(self, data_qp, integral):
"""
Average data given in quadrature points in region elements into
region vertices.
.. math::
u_n = \sum_e (u_{e,avg} * area_e) / \sum_e area_e
= \sum_e \int_{area_e} u / \sum area_e
"""
region = self.region
n_cells = region.get_n_cells(True)
if n_cells != data_qp.shape[0]:
msg = 'incomatible shape! (%d == %d)' % (n_cells,
data_qp.shape[0])
raise ValueError(msg)
n_vertex = len(region.vertices)
nc = data_qp.shape[2]
nod_vol = nm.zeros((n_vertex,), dtype=nm.float64)
data_vertex = nm.zeros((n_vertex, nc), dtype=nm.float64)
ap = self.ap
sg = ap.describe_geometry(self, 'surface', ap.region, integral)
area = nm.squeeze(sg.volume)
n_cells = region.get_n_cells(True)
iels = nm.arange(n_cells, dtype=nm.int32)
data_e = nm.zeros((area.shape[0], 1, nc, 1), dtype=nm.float64)
sg.integrate(data_e, data_qp[iels])
ir = nm.arange(nc, dtype=nm.int32)
sd = self.domain.surface_groups[region.name]
# Should be vertex connectivity!
conn = sd.get_connectivity(local=True)
for ii, cc in enumerate(conn):
# Assumes unique nodes in cc!
ind2, ind1 = nm.meshgrid(ir, cc)
data_vertex[ind1,ind2] += data_e[iels[ii],0,:,0]
nod_vol[cc] += area[ii]
data_vertex /= nod_vol[:,nm.newaxis]
return data_vertex
class H1Mixin(Struct):
"""
Methods of fields specific to H1 space.
"""
def _setup_shape(self):
"""
Setup the field's shape-related attributes, see :class:`Field`.
"""
self.n_components = nm.prod(self.shape)
self.val_shape = self.shape
|
|
import pandas as pd
from bokeh.embed import components
from bokeh.models import HoverTool
from bokeh.models.formatters import DatetimeTickFormatter
from bokeh.plotting import figure, ColumnDataSource
from app import db
from app.decorators import data_quality
# creates your plot
date_formatter = DatetimeTickFormatter(microseconds=['%f'],
milliseconds=['%S.%2Ns'],
seconds=[':%Ss'],
minsec=[':%Mm:%Ss'],
minutes=['%H:%M:%S'],
hourmin=['%H:%M:'],
hours=["%H:%M"],
days=["%d %b"],
months=["%d %b %Y"],
years=["%b %Y"])
@data_quality(name='temp_xcam', caption='')
def temp_xcam_plot(start_date, end_date):
"""Return a <div> element with a HRS RCAM and BCAM temperature plot.
The plot shows the HRS temperature for the period between start_date (inclusive) and end_date (exclusive).
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the temperature plot.
"""
title = "HRS Red and Blue Camera Temperature"
y_axis_label = 'Temperature (K)'
# creates your query
table = 'FitsHeaderHrs'
column1 = 'TEM_BCAM'
column2 = 'TEM_RCAM'
logic = " and FileName like 'H%%'"
logic2 = " and FileName like 'R%%'"
sql = "select UTStart, {column} as TEMP, FileName, CONVERT(UTStart,char) AS Time " \
" from {table} join FileData using (FileData_Id) " \
" where UTStart > '{start_date}' and UTStart <'{end_date}' {logic}"
sql1 = sql.format(column=column1, start_date=start_date, end_date=end_date, table=table, logic=logic)
sql2 = sql.format(column=column2, start_date=start_date, end_date=end_date, table=table, logic=logic2)
df = pd.read_sql(sql1, db.engine)
df2 = pd.read_sql(sql2, db.engine)
source = ColumnDataSource(df)
source2 = ColumnDataSource(df2)
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Temperature: </span>
<span style="font-size: 15px;"> @TEMP</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Filename: </span>
<span style="font-size: 15px;"> @FileName</span>
</div>
</div>
"""
)
p = figure(title=title,
x_axis_label='Date', y_axis_label=y_axis_label,
x_axis_type='datetime', tools=[tool_list, _hover])
p.scatter(source=source, x='UTStart', y='TEMP', color='blue', fill_alpha=0.2, size=12, legend='Blue Arm')
p.scatter(source=source2, x='UTStart', y='TEMP', color='red', fill_alpha=0.2, size=10, legend='Red Arm')
p.xaxis[0].formatter = date_formatter
p.legend.location = "top_right"
p.legend.click_policy = "hide"
p.legend.background_fill_alpha = 0.3
p.legend.inactive_fill_alpha = 0.8
return p
@data_quality(name='temp_air', caption='')
def temp_air_plot(start_date, end_date):
"""Return a <div> element with a HRS temperature plot.
The plot shows the HRS temperature for the period between start_date (inclusive) and end_date (exclusive).
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the temperature plot.
"""
title = "HRS Environment Air Temperature"
y_axis_label = 'Temperature (K)'
# creates your query
table = 'FitsHeaderHrs'
column = 'TEM_AIR'
logic = " and FileName like 'H%%'"
logic2 = " and FileName like 'R%%'"
sql = "select UTStart, {column} as TEMP, FileName, CONVERT(UTStart,char) AS Time " \
" from {table} join FileData using (FileData_Id) " \
" where UTStart > '{start_date}' and UTStart <'{end_date}' {logic}"
sql1 = sql.format(column=column, start_date=start_date, end_date=end_date, table=table, logic=logic)
sql2 = sql.format(column=column, start_date=start_date, end_date=end_date, table=table, logic=logic2)
df = pd.read_sql(sql1, db.engine)
df2 = pd.read_sql(sql2, db.engine)
source = ColumnDataSource(df)
source2 = ColumnDataSource(df2)
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Temperature: </span>
<span style="font-size: 15px;"> @TEMP</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Filename: </span>
<span style="font-size: 15px;"> @FileName</span>
</div>
</div>
"""
)
p = figure(title=title,
x_axis_label='Date', y_axis_label=y_axis_label,
x_axis_type='datetime', tools=[tool_list, _hover])
p.scatter(source=source, x='UTStart', y='TEMP', color='blue', fill_alpha=0.2, size=12, legend='Blue Arm')
p.scatter(source=source2, x='UTStart', y='TEMP', color='red', fill_alpha=0.2, size=10, legend='Red Arm')
p.xaxis[0].formatter = date_formatter
p.legend.location = "top_right"
p.legend.click_policy = "hide"
p.legend.background_fill_alpha = 0.3
p.legend.inactive_fill_alpha = 0.8
return p
@data_quality(name='temp_vac', caption='')
def temp_vac_plot(start_date, end_date):
"""Return a <div> element with a HRS temperature plot.
The plot shows the HRS temperature for the period between start_date (inclusive) and end_date (exclusive).
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the temperature plot.
"""
title = "HRS Vacuum Chamber Wall Temperature"
y_axis_label = 'Temperature (K)'
# creates your query
table = 'FitsHeaderHrs'
column = 'TEM_VAC'
logic = " and FileName like 'H%%'"
logic2 = " and FileName like 'R%%'"
sql = "select UTStart, {column} as TEMP, FileName, CONVERT(UTStart,char) AS Time " \
" from {table} join FileData using (FileData_Id) " \
" where UTStart > '{start_date}' and UTStart <'{end_date}' {logic}"
sql1 = sql.format(column=column, start_date=start_date, end_date=end_date, table=table, logic=logic)
sql2 = sql.format(column=column, start_date=start_date, end_date=end_date, table=table, logic=logic2)
df = pd.read_sql(sql1, db.engine)
df2 = pd.read_sql(sql2, db.engine)
source = ColumnDataSource(df)
source2 = ColumnDataSource(df2)
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Temperature: </span>
<span style="font-size: 15px;"> @TEMP</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Filename: </span>
<span style="font-size: 15px;"> @FileName</span>
</div>
</div>
"""
)
p = figure(title=title,
x_axis_label='Date', y_axis_label=y_axis_label,
x_axis_type='datetime', tools=[tool_list, _hover])
p.scatter(source=source, x='UTStart', y='TEMP', color='blue', fill_alpha=0.2, size=12, legend='Blue Arm')
p.scatter(source=source2, x='UTStart', y='TEMP', color='Red', fill_alpha=0.2, size=10, legend='Red Arm')
p.legend.location = "top_right"
p.legend.click_policy = "hide"
p.legend.background_fill_alpha = 0.3
p.legend.inactive_fill_alpha = 0.8
p.xaxis[0].formatter = date_formatter
return p
@data_quality(name='temp_rmir', caption='')
def temp_rmir_plot(start_date, end_date):
"""Return a <div> element with a HRS temperature plot.
The plot shows the HRS temperature for the period between start_date (inclusive) and end_date (exclusive).
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the temperature plot.
"""
title = "HRS Red Pupil Mirror Cell Temperature"
y_axis_label = 'Temperature (K)'
# creates your query
table = 'FitsHeaderHrs'
column = 'TEM_RMIR'
logic = " and FileName like 'H%%'"
logic2 = " and FileName like 'R%%'"
sql = "select UTStart, {column} as TEMP, FileName, CONVERT(UTStart,char) AS Time " \
" from {table} join FileData using (FileData_Id) " \
" where UTStart > '{start_date}' and UTStart <'{end_date}' {logic}"
sql1 = sql.format(column=column, start_date=start_date, end_date=end_date, table=table, logic=logic)
sql2 = sql.format(column=column, start_date=start_date, end_date=end_date, table=table, logic=logic2)
df = pd.read_sql(sql1, db.engine)
df2 = pd.read_sql(sql2, db.engine)
source = ColumnDataSource(df)
source2 = ColumnDataSource(df2)
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Temperature: </span>
<span style="font-size: 15px;"> @TEMP</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Filename: </span>
<span style="font-size: 15px;"> @FileName</span>
</div>
</div>
"""
)
p = figure(title=title,
x_axis_label='Date', y_axis_label=y_axis_label,
x_axis_type='datetime', tools=[tool_list, _hover])
p.scatter(source=source, x='UTStart', y='TEMP', color='blue', fill_alpha=0.2, size=12, legend='Blue Arm')
p.scatter(source=source2, x='UTStart', y='TEMP', color='red', fill_alpha=0.2, size=10, legend='Red Arm')
p.legend.location = "top_right"
p.legend.click_policy = "hide"
p.legend.background_fill_alpha = 0.3
p.legend.inactive_fill_alpha = 0.8
p.xaxis[0].formatter = date_formatter
return p
@data_quality(name='temp_coll', caption='')
def temp_coll_plot(start_date, end_date):
"""Return a <div> element with a HRS temperature plot.
The plot shows the HRS temperature for the period between start_date (inclusive) and end_date (exclusive).
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the temperature plot.
"""
title = "HRS Collimator Mount Temperature"
y_axis_label = 'Temperature (K)'
# creates your query
table = 'FitsHeaderHrs'
column = 'TEM_COLL'
logic = " and FileName like 'H%%'"
logic2 = " and FileName like 'R%%'"
sql = "select UTStart, {column} as TEMP, FileName, CONVERT(UTStart,char) AS Time " \
" from {table} join FileData using (FileData_Id) " \
" where UTStart > '{start_date}' and UTStart <'{end_date}' {logic}"
sql1 = sql.format(column=column, start_date=start_date, end_date=end_date, table=table, logic=logic)
sql2 = sql.format(column=column, start_date=start_date, end_date=end_date, table=table, logic=logic2)
df = pd.read_sql(sql1, db.engine)
df2 = pd.read_sql(sql2, db.engine)
source = ColumnDataSource(df)
source2 = ColumnDataSource(df2)
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Temperature: </span>
<span style="font-size: 15px;"> @TEMP</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Filename: </span>
<span style="font-size: 15px;"> @FileName</span>
</div>
</div>
"""
)
p = figure(title=title,
x_axis_label='Date', y_axis_label=y_axis_label,
x_axis_type='datetime', tools=[tool_list, _hover])
p.scatter(source=source, x='UTStart', y='TEMP', color='blue', fill_alpha=0.2, size=12, legend='Blue Arm')
p.scatter(source=source2, x='UTStart', y='TEMP', color='red', fill_alpha=0.2, size=10, legend='Red Arm')
p.xaxis[0].formatter = date_formatter
p.legend.location = "top_right"
p.legend.click_policy = "hide"
p.legend.background_fill_alpha = 0.3
p.legend.inactive_fill_alpha = 0.8
return p
@data_quality(name='temp_ech', caption='')
def temp_air_plot(start_date, end_date):
"""Return a <div> element with a HRS temperature plot.
The plot shows the HRS temperature for the period between start_date (inclusive) and end_date (exclusive).
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the temperature plot.
"""
title = "HRS Echelle Mount Temperature"
y_axis_label = 'Temperature (K)'
# creates your query
table = 'FitsHeaderHrs'
column = 'TEM_ECH'
logic = " and FileName like 'H%%'"
logic2 = " and FileName like 'R%%'"
sql = "select UTStart, {column} as TEMP, FileName, CONVERT(UTStart,char) AS Time " \
" from {table} join FileData using (FileData_Id) " \
" where UTStart > '{start_date}' and UTStart <'{end_date}' {logic}"
sql1 = sql.format(column=column, start_date=start_date, end_date=end_date, table=table, logic=logic)
sql2 = sql.format(column=column, start_date=start_date, end_date=end_date, table=table, logic=logic2)
df = pd.read_sql(sql1, db.engine)
df2 = pd.read_sql(sql2, db.engine)
source = ColumnDataSource(df)
source2 = ColumnDataSource(df2)
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Temperature: </span>
<span style="font-size: 15px;"> @TEMP</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Filename: </span>
<span style="font-size: 15px;"> @FileName</span>
</div>
</div>
"""
)
p = figure(title=title,
x_axis_label='Date', y_axis_label=y_axis_label,
x_axis_type='datetime', tools=[tool_list, _hover])
p.scatter(source=source, x='UTStart', y='TEMP', color='blue', fill_alpha=0.2, size=12, legend='Blue Arm')
p.scatter(source=source2, x='UTStart', y='TEMP', color='red', fill_alpha=0.2, size=10, legend='Red Arm')
p.xaxis[0].formatter = date_formatter
p.legend.location = "top_right"
p.legend.click_policy = "hide"
p.legend.background_fill_alpha = 0.3
p.legend.inactive_fill_alpha = 0.8
return p
@data_quality(name='temp_ob', caption='')
def temp_air_plot(start_date, end_date):
"""Return a <div> element with a HRS temperature plot.
The plot shows the HRS temperature for the period between start_date (inclusive) and end_date (exclusive).
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the temperature plot.
"""
title = "HRS Optical Bench Temperature"
y_axis_label = 'Temperature (K)'
# creates your query
table = 'FitsHeaderHrs'
column = 'TEM_OB'
logic = " and FileName like 'H%%'"
logic2 = " and FileName like 'R%%'"
sql = "select UTStart, {column} as TEMP, FileName, CONVERT(UTStart,char) AS Time " \
" from {table} join FileData using (FileData_Id) " \
" where UTStart > '{start_date}' and UTStart <'{end_date}' {logic}"
sql1 = sql.format(column=column, start_date=start_date, end_date=end_date, table=table, logic=logic)
sql2 = sql.format(column=column, start_date=start_date, end_date=end_date, table=table, logic=logic2)
df = pd.read_sql(sql1, db.engine)
df2 = pd.read_sql(sql2, db.engine)
source = ColumnDataSource(df)
source2 = ColumnDataSource(df2)
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Temperature: </span>
<span style="font-size: 15px;"> @TEMP</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Filename: </span>
<span style="font-size: 15px;"> @FileName</span>
</div>
</div>
"""
)
p = figure(title=title,
x_axis_label='Date', y_axis_label=y_axis_label,
x_axis_type='datetime', tools=[tool_list, _hover])
p.scatter(source=source, x='UTStart', y='TEMP', color='blue', fill_alpha=0.2, size=12, legend='Blue Arm')
p.scatter(source=source2, x='UTStart', y='TEMP', color='red', fill_alpha=0.2, size=10, legend='Red Arm')
p.xaxis[0].formatter = date_formatter
return p
@data_quality(name='temp_iod', caption='')
def temp_iod_plot(start_date, end_date):
"""Return a <div> element with a HRS temperature plot.
The plot shows the HRS temperature for the period between start_date (inclusive) and end_date (exclusive).
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the temperature plot.
"""
title = "HRS Iodine Cell Heater Temperature"
y_axis_label = 'Temperature (K)'
# creates your query
table = 'FitsHeaderHrs'
column = 'TEM_IOD'
logic = " and FileName like 'H%%'"
sql = "select UTStart, {column} as TEMP, FileName, CONVERT(UTStart,char) AS Time " \
" from {table} join FileData using (FileData_Id) " \
" where UTStart > '{start_date}' and UTStart <'{end_date}' {logic}"
sql1 = sql.format(column=column, start_date=start_date, end_date=end_date, table=table, logic=logic)
df = pd.read_sql(sql1, db.engine)
source = ColumnDataSource(df)
tool_list = "pan,reset,save,wheel_zoom, box_zoom"
_hover = HoverTool(
tooltips="""
<div>
<div>
<span style="font-size: 15px; font-weight: bold;">Date: </span>
<span style="font-size: 15px;"> @Time</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Temperature: </span>
<span style="font-size: 15px;"> @TEMP</span>
</div>
<div>
<span style="font-size: 15px; font-weight: bold;">Filename: </span>
<span style="font-size: 15px;"> @FileName</span>
</div>
</div>
"""
)
p = figure(title=title,
x_axis_label='Date', y_axis_label=y_axis_label,
x_axis_type='datetime', tools=[tool_list, _hover])
p.scatter(source=source, x='UTStart', y='TEMP', color='purple', fill_alpha=0.2, size=12, legend='Iodine Cell')
p.xaxis[0].formatter = date_formatter
return p
|
|
#!/usr/bin/env python3
# To directly call tably from shell, set a symbolic link by running
# ln -sf $PWD/tably.py /usr/local/bin/tably
import argparse
import csv
import os
PREAMBLE = r"""\documentclass[11pt, a4paper]{article}
\usepackage{booktabs}
\begin{document}"""
HEADER = r"""\begin{{table}}[htb]
{indent}\centering{caption}{label}
{indent}\begin{{tabular}}{{@{{}}{align}@{{}}}}
{indent}{indent}\toprule"""
FOOTER = r"""{indent}{indent}\bottomrule
{indent}\end{{tabular}}
\end{{table}}"""
LABEL = '\n{indent}\\label{{{label}}}'
CAPTION = '\n{indent}\\caption{{{caption}}}'
class Tably:
"""Object which holds parsed arguments.
Methods:
run: selects the appropriate methods to generate LaTeX code/files
create_table: for each specified file, creates a LaTeX table
create_row: creates a row based on `line` content
combine_tables: combines all tables from input files together
save_single_table: creates and saves a single LaTeX table
get_units: writes the units as a row of the LaTeX table
"""
def __init__(self, args):
"""
Attributes:
files (string): name(s) of the .csv file(s)
no_header (bool): if the .csv contains only content, without a
header (names for the columns)
caption (string): the name of the table, printed above it
label (string): a label by which the table can be referenced
align (string): wanted alignment of the columns
no_indent (bool): should a LaTeX code be indented with 4 spaces per
code block. Doesn't affect the final looks of the table.
outfile (string): name of the file where to save the results.
separate_outfiles (list): names of the files where each table is saved
skip (int): number of rows in .csv to skip
preamble(bool): create a preamble
sep (string): column separator
units (list): units for each column
fragment (bool): only output content in tabular environment
fragment_skip_header (bool): shortcut of passing -k 1 -n -f
replace (bool): replace existing output file if -o is passed
tex_str (function): escape LaTeX special characters or do nothing
"""
self.files = args.files
self.no_header = args.no_header
self.caption = args.caption
self.label = args.label
self.align = args.align
self.no_indent = args.no_indent
self.outfile = args.outfile
self.separate_outfiles = args.separate_outfiles
self.skip = args.skip
self.preamble = args.preamble
self.sep = get_sep(args.sep)
self.units = args.units
self.fragment = args.fragment
self.fragment_skip_header = args.fragment_skip_header
self.replace = args.replace
self.tex_str = escape if not args.no_escape else lambda x: x
def run(self):
"""The main method.
If all tables need to be put into a single file,
calls `combine_tables` method to generate LaTeX code
and then calls `save_content` function if `outfile` is provided;
otherwise, prints to the console.
If each table needs to be put into a separate file,
calls `save_single_table` method to create and save each table separately.
"""
if self.fragment_skip_header:
self.skip = 1
self.no_header = True
self.fragment = True
if self.fragment:
self.no_indent = True
self.label = None
self.preamble = False
# if all tables need to be put into one file
if self.outfile or self.separate_outfiles is None:
final_content = self.combine_tables()
if not final_content:
return
if self.outfile:
try:
save_content(final_content, self.outfile, self.replace)
except FileNotFoundError:
print('{} is not a valid/known path. Could not save there.'.format(self.outfile))
else:
print(final_content)
# if -oo is passed (could be [])
if self.separate_outfiles is not None:
outs = self.separate_outfiles
if len(outs) == 0:
outs = [ os.path.splitext(file)[0]+'.tex' for file in self.files ]
elif os.path.isdir(outs[0]):
outs = [ os.path.join(outs[0], os.path.splitext(os.path.basename(file))[0])+'.tex' for file in self.files ]
elif len(outs) != len(self.files):
print('WARNING: Number of .csv files and number of output files do not match!')
for file, out in zip(self.files, outs):
self.save_single_table(file, out)
def create_table(self, file):
"""Creates a table from a given .csv file.
This method gives the procedure of converting a .csv file to a LaTeX table.
Unless -f is specified, the output is a ready-to-use LaTeX table environment.
All other methods that need to obtain a LaTeX table from a .csv file call this method.
"""
rows = []
indent = 4*' ' if not self.no_indent else ''
try:
with open(file) as infile:
for i, columns in enumerate(csv.reader(infile, delimiter=self.sep)):
if i < self.skip:
continue
rows.append(self.create_row(columns, indent))
except FileNotFoundError:
print("File {} doesn't exist!!\n".format(file))
return ''
if not rows:
print("No table created from the {} file. Check if the file is empty "
"or you used too high skip value.\n".format(file))
return ''
if not self.no_header:
rows.insert(1, r'{0}{0}\midrule'.format(indent))
if self.units:
rows[0] = rows[0] + r'\relax' # fixes problem with \[
units = self.get_units()
rows.insert(1, r'{0}{0}{1} \\'.format(indent, units))
content = '\n'.join(rows)
if not self.fragment:
header = HEADER.format(
label=add_label(self.label, indent),
caption=add_caption(self.caption, indent),
align=format_alignment(self.align, len(columns)),
indent=indent,
)
footer = FOOTER.format(indent=indent)
return '\n'.join((header, content, footer))
else:
return content
def create_row(self, line, indent):
"""Creates a row based on `line` content"""
return r'{indent}{indent}{content} \\'.format(
indent=indent,
content=' & '.join(self.tex_str(line)))
def combine_tables(self):
"""Combine all tables together and add a preamble if required.
Unless -oo is specified, this is how input tables are arranged.
"""
all_tables = []
if self.label and len(self.files) > 1:
all_tables.append("% don't forget to manually re-label the tables")
for file in self.files:
table = self.create_table(file)
if table:
all_tables.append(table)
if not all_tables:
return None
if self.preamble:
all_tables.insert(0, PREAMBLE)
all_tables.append('\\end{document}\n')
return '\n\n'.join(all_tables)
def save_single_table(self, file, out):
"""Creates and saves a single LaTeX table"""
table = [self.create_table(file)]
if table:
if self.preamble:
table.insert(0, PREAMBLE)
table.append('\\end{document}\n')
final_content = '\n\n'.join(table)
try:
save_content(final_content, out, self.replace)
except FileNotFoundError:
print('{} is not a valid/known path. Could not save there.'.format(out))
def get_units(self):
"""Writes the units as a row of the LaTeX table"""
formatted_units = []
for unit in self.tex_str(self.units):
if unit in '-/0':
formatted_units.append('')
else:
formatted_units.append('[{}]'.format(unit))
return ' & '.join(formatted_units)
def get_sep(sep):
if sep.lower() in ['t', 'tab', '\\t']:
return '\t'
elif sep.lower() in ['s', 'semi', ';']:
return ';'
elif sep.lower() in ['c', 'comma', ',']:
return ','
else:
return sep
def escape(line):
"""Escapes special LaTeX characters by prefixing them with backslash"""
for char in '#$%&_}{':
line = [column.replace(char, '\\'+char) for column in line]
return line
def format_alignment(align, length):
"""Makes sure that provided alignment is valid:
1. the length of alignment is either 1 or the same as the number of columns
2. valid characters are `l`, `c` and `r`
If there is an invalid character, all columns are set to centered alignment.
If alignment length is too long, it is stripped to fit the number of columns.
If alignment length is too short, it is padded with `c` for the missing
columns.
"""
if any(ch not in 'lcr' for ch in align):
align = 'c'
if len(align) == 1:
return length * align
elif len(align) == length:
return align
else:
return '{:c<{l}.{l}}'.format(align, l=length)
def add_label(label, indent):
"""Creates a table label"""
return LABEL.format(label=label, indent=indent) if label else ''
def add_caption(caption, indent):
"""Creates a table caption"""
return CAPTION.format(caption=caption, indent=indent) if caption else ''
def save_content(content, outfile, replace):
"""Saves the content to a file.
If an existing file is provided, the content is appended to the end
of the file by default. If -r is passed, the file is overwritten.
"""
if replace:
with open(outfile, 'w') as out:
out.writelines(content)
print('The content is written to', outfile)
else:
with open(outfile, 'a') as out:
out.writelines(content)
print('The content is appended to', outfile)
def arg_parser():
"""Parses command line arguments and provides --help"""
parser = argparse.ArgumentParser(description="Creates LaTeX tables from .csv files")
parser.add_argument(
'files',
nargs='+',
help='.csv file(s) containing the data you want to export.'
)
parser.add_argument(
'-a', '--align',
default='c',
help='Alignment for the columns of the table. '
'Use `l`, `c`, and `r` for left, center and right. '
'Either one character for all columns, or one character per column. '
'Default: c'
)
parser.add_argument(
'-c', '--caption',
help='Caption of the table. '
'Default: None'
)
parser.add_argument(
'-i', '--no-indent',
action='store_true',
help='Pass this if you do not want to indent LaTeX source code '
'with 4 spaces per float. No difference in the final result (pdf). '
'Default: False'
)
parser.add_argument(
'-k', '--skip',
type=int,
default=0,
help='Number of rows in .csv to skip. Default: 0'
)
parser.add_argument(
'-l', '--label',
help='Label of the table, for referencing it. Default: None'
)
parser.add_argument(
'-n', '--no-header',
action='store_true',
help='By default, the first row of .csv is used as a table header. '
'Pass this option if there is no header. Default: False'
)
parser.add_argument(
'-o', '--outfile',
help='Choose an output file to save the results. '
'The results are appended to the file (added after the last line). '
'Default: None, prints to console.'
)
parser.add_argument(
'-oo', '--separate-outfiles',
metavar='PATH',
nargs='*',
help='When multiple .csv files need to be processed, '
'pass -oo to save each individual table in a separate .tex file. '
'To specifiy each individual output file, '
'pass a list of filenames after -oo. '
'Alternatively, pass a directory that will store all the output files. '
'If no filename/directory is passed after -oo, '
'filenames of .csv files will be used (with .tex extension).'
)
parser.add_argument(
'-p', '--preamble',
action='store_true',
help='If selected, makes a whole .tex document (including the preamble) '
'ready to be built as .pdf. Useful when trying to make a quick report. '
'Default: False'
)
parser.add_argument(
'-s', '--sep',
default=',',
help=r'Choose a separator between columns. If a file is tab-separated, '
r'pass `t` or `tab`. If a file is semicolon-separated, '
r'pass `s`, `semi` or `\;`.'
r'Default: `,` (comma-separated)'
)
parser.add_argument(
'-u', '--units',
nargs='+',
help='Provide units for each column. If column has no unit, denote it '
'by passing either `-`, `/` or `0`. If `--no-header` is used, '
'this argument is ignored.'
)
parser.add_argument(
'-e', '--no-escape',
action='store_true',
help='If selected, do not escape special LaTeX characters.'
)
parser.add_argument(
'-f', '--fragment',
action='store_true',
help='If selected, only output content inside tabular environment '
'(no preamble, table environment, etc.).'
)
parser.add_argument(
'-ff', '--fragment-skip-header',
action='store_true',
help='Equivalent to passing -k 1 -n -f '
'(suppress header when they are on the first row of .csv and pass -f).'
)
parser.add_argument(
'-r', '--replace',
action='store_true',
help='If selected and -o or -oo is passed, overwrite any existing output file.'
)
return parser.parse_args()
def main():
options = arg_parser()
tably = Tably(options)
tably.run()
if __name__ == '__main__':
main()
|
|
import warnings
import numpy as np
import pandas.util.testing as tm
from pandas import (Series, DataFrame, MultiIndex,
Int64Index, UInt64Index, Float64Index,
IntervalIndex, CategoricalIndex,
IndexSlice, concat, date_range)
class NumericSeriesIndexing:
params = [
(Int64Index, UInt64Index, Float64Index),
('unique_monotonic_inc', 'nonunique_monotonic_inc'),
]
param_names = ['index_dtype', 'index_structure']
def setup(self, index, index_structure):
N = 10**6
indices = {
'unique_monotonic_inc': index(range(N)),
'nonunique_monotonic_inc': index(
list(range(55)) + [54] + list(range(55, N - 1))),
}
self.data = Series(np.random.rand(N), index=indices[index_structure])
self.array = np.arange(10000)
self.array_list = self.array.tolist()
def time_getitem_scalar(self, index, index_structure):
self.data[800000]
def time_getitem_slice(self, index, index_structure):
self.data[:800000]
def time_getitem_list_like(self, index, index_structure):
self.data[[800000]]
def time_getitem_array(self, index, index_structure):
self.data[self.array]
def time_getitem_lists(self, index, index_structure):
self.data[self.array_list]
def time_iloc_array(self, index, index_structure):
self.data.iloc[self.array]
def time_iloc_list_like(self, index, index_structure):
self.data.iloc[[800000]]
def time_iloc_scalar(self, index, index_structure):
self.data.iloc[800000]
def time_iloc_slice(self, index, index_structure):
self.data.iloc[:800000]
def time_ix_array(self, index, index_structure):
self.data.ix[self.array]
def time_ix_list_like(self, index, index_structure):
self.data.ix[[800000]]
def time_ix_scalar(self, index, index_structure):
self.data.ix[800000]
def time_ix_slice(self, index, index_structure):
self.data.ix[:800000]
def time_loc_array(self, index, index_structure):
self.data.loc[self.array]
def time_loc_list_like(self, index, index_structure):
self.data.loc[[800000]]
def time_loc_scalar(self, index, index_structure):
self.data.loc[800000]
def time_loc_slice(self, index, index_structure):
self.data.loc[:800000]
class NonNumericSeriesIndexing:
params = [
('string', 'datetime'),
('unique_monotonic_inc', 'nonunique_monotonic_inc'),
]
param_names = ['index_dtype', 'index_structure']
def setup(self, index, index_structure):
N = 10**6
indexes = {'string': tm.makeStringIndex(N),
'datetime': date_range('1900', periods=N, freq='s')}
index = indexes[index]
if index_structure == 'nonunique_monotonic_inc':
index = index.insert(item=index[2], loc=2)[:-1]
self.s = Series(np.random.rand(N), index=index)
self.lbl = index[80000]
def time_getitem_label_slice(self, index, index_structure):
self.s[:self.lbl]
def time_getitem_pos_slice(self, index, index_structure):
self.s[:80000]
def time_get_value(self, index, index_structure):
with warnings.catch_warnings(record=True):
self.s.get_value(self.lbl)
def time_getitem_scalar(self, index, index_structure):
self.s[self.lbl]
def time_getitem_list_like(self, index, index_structure):
self.s[[self.lbl]]
class DataFrameStringIndexing:
def setup(self):
index = tm.makeStringIndex(1000)
columns = tm.makeStringIndex(30)
self.df = DataFrame(np.random.randn(1000, 30), index=index,
columns=columns)
self.idx_scalar = index[100]
self.col_scalar = columns[10]
self.bool_indexer = self.df[self.col_scalar] > 0
self.bool_obj_indexer = self.bool_indexer.astype(object)
def time_get_value(self):
with warnings.catch_warnings(record=True):
self.df.get_value(self.idx_scalar, self.col_scalar)
def time_ix(self):
self.df.ix[self.idx_scalar, self.col_scalar]
def time_loc(self):
self.df.loc[self.idx_scalar, self.col_scalar]
def time_getitem_scalar(self):
self.df[self.col_scalar][self.idx_scalar]
def time_boolean_rows(self):
self.df[self.bool_indexer]
def time_boolean_rows_object(self):
self.df[self.bool_obj_indexer]
class DataFrameNumericIndexing:
def setup(self):
self.idx_dupe = np.array(range(30)) * 99
self.df = DataFrame(np.random.randn(10000, 5))
self.df_dup = concat([self.df, 2 * self.df, 3 * self.df])
self.bool_indexer = [True] * 5000 + [False] * 5000
def time_iloc_dups(self):
self.df_dup.iloc[self.idx_dupe]
def time_loc_dups(self):
self.df_dup.loc[self.idx_dupe]
def time_iloc(self):
self.df.iloc[:100, 0]
def time_loc(self):
self.df.loc[:100, 0]
def time_bool_indexer(self):
self.df[self.bool_indexer]
class Take:
params = ['int', 'datetime']
param_names = ['index']
def setup(self, index):
N = 100000
indexes = {'int': Int64Index(np.arange(N)),
'datetime': date_range('2011-01-01', freq='S', periods=N)}
index = indexes[index]
self.s = Series(np.random.rand(N), index=index)
self.indexer = [True, False, True, True, False] * 20000
def time_take(self, index):
self.s.take(self.indexer)
class MultiIndexing:
def setup(self):
mi = MultiIndex.from_product([range(1000), range(1000)])
self.s = Series(np.random.randn(1000000), index=mi)
self.df = DataFrame(self.s)
n = 100000
self.mdt = DataFrame({'A': np.random.choice(range(10000, 45000, 1000),
n),
'B': np.random.choice(range(10, 400), n),
'C': np.random.choice(range(1, 150), n),
'D': np.random.choice(range(10000, 45000), n),
'x': np.random.choice(range(400), n),
'y': np.random.choice(range(25), n)})
self.idx = IndexSlice[20000:30000, 20:30, 35:45, 30000:40000]
self.mdt = self.mdt.set_index(['A', 'B', 'C', 'D']).sort_index()
def time_series_ix(self):
self.s.ix[999]
def time_frame_ix(self):
self.df.ix[999]
def time_index_slice(self):
self.mdt.loc[self.idx, :]
class IntervalIndexing:
def setup_cache(self):
idx = IntervalIndex.from_breaks(np.arange(1000001))
monotonic = Series(np.arange(1000000), index=idx)
return monotonic
def time_getitem_scalar(self, monotonic):
monotonic[80000]
def time_loc_scalar(self, monotonic):
monotonic.loc[80000]
def time_getitem_list(self, monotonic):
monotonic[80000:]
def time_loc_list(self, monotonic):
monotonic.loc[80000:]
class CategoricalIndexIndexing:
params = ['monotonic_incr', 'monotonic_decr', 'non_monotonic']
param_names = ['index']
def setup(self, index):
N = 10**5
values = list('a' * N + 'b' * N + 'c' * N)
indices = {
'monotonic_incr': CategoricalIndex(values),
'monotonic_decr': CategoricalIndex(reversed(values)),
'non_monotonic': CategoricalIndex(list('abc' * N))}
self.data = indices[index]
self.int_scalar = 10000
self.int_list = list(range(10000))
self.cat_scalar = 'b'
self.cat_list = ['a', 'c']
def time_getitem_scalar(self, index):
self.data[self.int_scalar]
def time_getitem_slice(self, index):
self.data[:self.int_scalar]
def time_getitem_list_like(self, index):
self.data[[self.int_scalar]]
def time_getitem_list(self, index):
self.data[self.int_list]
def time_getitem_bool_array(self, index):
self.data[self.data == self.cat_scalar]
def time_get_loc_scalar(self, index):
self.data.get_loc(self.cat_scalar)
def time_get_indexer_list(self, index):
self.data.get_indexer(self.cat_list)
class MethodLookup:
def setup_cache(self):
s = Series()
return s
def time_lookup_iloc(self, s):
s.iloc
def time_lookup_ix(self, s):
s.ix
def time_lookup_loc(self, s):
s.loc
class GetItemSingleColumn:
def setup(self):
self.df_string_col = DataFrame(np.random.randn(3000, 1), columns=['A'])
self.df_int_col = DataFrame(np.random.randn(3000, 1))
def time_frame_getitem_single_column_label(self):
self.df_string_col['A']
def time_frame_getitem_single_column_int(self):
self.df_int_col[0]
class AssignTimeseriesIndex:
def setup(self):
N = 100000
idx = date_range('1/1/2000', periods=N, freq='H')
self.df = DataFrame(np.random.randn(N, 1), columns=['A'], index=idx)
def time_frame_assign_timeseries_index(self):
self.df['date'] = self.df.index
class InsertColumns:
def setup(self):
self.N = 10**3
self.df = DataFrame(index=range(self.N))
def time_insert(self):
np.random.seed(1234)
for i in range(100):
self.df.insert(0, i, np.random.randn(self.N),
allow_duplicates=True)
def time_assign_with_setitem(self):
np.random.seed(1234)
for i in range(100):
self.df[i] = np.random.randn(self.N)
from .pandas_vb_common import setup # noqa: F401
|
|
"""The lock tests for the august platform."""
import datetime
from unittest.mock import Mock
from aiohttp import ClientResponseError
import pytest
from yalexs.pubnub_async import AugustPubNub
from homeassistant.components.lock import (
DOMAIN as LOCK_DOMAIN,
STATE_JAMMED,
STATE_LOCKING,
STATE_UNLOCKING,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_LOCK,
SERVICE_UNLOCK,
STATE_LOCKED,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
STATE_UNLOCKED,
)
from homeassistant.helpers import device_registry as dr, entity_registry as er
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.august.mocks import (
_create_august_with_devices,
_mock_activities_from_fixture,
_mock_doorsense_enabled_august_lock_detail,
_mock_lock_from_fixture,
)
async def test_lock_device_registry(hass):
"""Test creation of a lock with doorsense and bridge ands up in the registry."""
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
await _create_august_with_devices(hass, [lock_one])
device_registry = dr.async_get(hass)
reg_device = device_registry.async_get_device(
identifiers={("august", "online_with_doorsense")}
)
assert reg_device.model == "AUG-MD01"
assert reg_device.sw_version == "undefined-4.3.0-1.8.14"
assert reg_device.name == "online_with_doorsense Name"
assert reg_device.manufacturer == "August Home Inc."
async def test_lock_changed_by(hass):
"""Test creation of a lock with doorsense and bridge."""
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
activities = await _mock_activities_from_fixture(hass, "get_activity.lock.json")
await _create_august_with_devices(hass, [lock_one], activities=activities)
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_LOCKED
assert (
lock_online_with_doorsense_name.attributes.get("changed_by")
== "Your favorite elven princess"
)
async def test_state_locking(hass):
"""Test creation of a lock with doorsense and bridge that is locking."""
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
activities = await _mock_activities_from_fixture(hass, "get_activity.locking.json")
await _create_august_with_devices(hass, [lock_one], activities=activities)
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_LOCKING
async def test_state_unlocking(hass):
"""Test creation of a lock with doorsense and bridge that is unlocking."""
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
activities = await _mock_activities_from_fixture(
hass, "get_activity.unlocking.json"
)
await _create_august_with_devices(hass, [lock_one], activities=activities)
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_UNLOCKING
async def test_state_jammed(hass):
"""Test creation of a lock with doorsense and bridge that is jammed."""
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
activities = await _mock_activities_from_fixture(hass, "get_activity.jammed.json")
await _create_august_with_devices(hass, [lock_one], activities=activities)
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_JAMMED
async def test_one_lock_operation(hass):
"""Test creation of a lock with doorsense and bridge."""
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
await _create_august_with_devices(hass, [lock_one])
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_LOCKED
assert lock_online_with_doorsense_name.attributes.get("battery_level") == 92
assert (
lock_online_with_doorsense_name.attributes.get("friendly_name")
== "online_with_doorsense Name"
)
data = {ATTR_ENTITY_ID: "lock.online_with_doorsense_name"}
assert await hass.services.async_call(
LOCK_DOMAIN, SERVICE_UNLOCK, data, blocking=True
)
await hass.async_block_till_done()
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_UNLOCKED
assert lock_online_with_doorsense_name.attributes.get("battery_level") == 92
assert (
lock_online_with_doorsense_name.attributes.get("friendly_name")
== "online_with_doorsense Name"
)
assert await hass.services.async_call(
LOCK_DOMAIN, SERVICE_LOCK, data, blocking=True
)
await hass.async_block_till_done()
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_LOCKED
# No activity means it will be unavailable until the activity feed has data
entity_registry = er.async_get(hass)
lock_operator_sensor = entity_registry.async_get(
"sensor.online_with_doorsense_name_operator"
)
assert lock_operator_sensor
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").state
== STATE_UNKNOWN
)
async def test_one_lock_operation_pubnub_connected(hass):
"""Test lock and unlock operations are async when pubnub is connected."""
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
assert lock_one.pubsub_channel == "pubsub"
pubnub = AugustPubNub()
await _create_august_with_devices(hass, [lock_one], pubnub=pubnub)
pubnub.connected = True
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_LOCKED
assert lock_online_with_doorsense_name.attributes.get("battery_level") == 92
assert (
lock_online_with_doorsense_name.attributes.get("friendly_name")
== "online_with_doorsense Name"
)
data = {ATTR_ENTITY_ID: "lock.online_with_doorsense_name"}
assert await hass.services.async_call(
LOCK_DOMAIN, SERVICE_UNLOCK, data, blocking=True
)
await hass.async_block_till_done()
pubnub.message(
pubnub,
Mock(
channel=lock_one.pubsub_channel,
timetoken=(dt_util.utcnow().timestamp() + 1) * 10000000,
message={
"status": "kAugLockState_Unlocked",
},
),
)
await hass.async_block_till_done()
await hass.async_block_till_done()
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_UNLOCKED
assert lock_online_with_doorsense_name.attributes.get("battery_level") == 92
assert (
lock_online_with_doorsense_name.attributes.get("friendly_name")
== "online_with_doorsense Name"
)
assert await hass.services.async_call(
LOCK_DOMAIN, SERVICE_LOCK, data, blocking=True
)
await hass.async_block_till_done()
pubnub.message(
pubnub,
Mock(
channel=lock_one.pubsub_channel,
timetoken=(dt_util.utcnow().timestamp() + 2) * 10000000,
message={
"status": "kAugLockState_Locked",
},
),
)
await hass.async_block_till_done()
await hass.async_block_till_done()
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_LOCKED
# No activity means it will be unavailable until the activity feed has data
entity_registry = er.async_get(hass)
lock_operator_sensor = entity_registry.async_get(
"sensor.online_with_doorsense_name_operator"
)
assert lock_operator_sensor
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").state
== STATE_UNKNOWN
)
async def test_lock_jammed(hass):
"""Test lock gets jammed on unlock."""
def _unlock_return_activities_side_effect(access_token, device_id):
raise ClientResponseError(None, None, status=531)
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
await _create_august_with_devices(
hass,
[lock_one],
api_call_side_effects={
"unlock_return_activities": _unlock_return_activities_side_effect
},
)
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_LOCKED
assert lock_online_with_doorsense_name.attributes.get("battery_level") == 92
assert (
lock_online_with_doorsense_name.attributes.get("friendly_name")
== "online_with_doorsense Name"
)
data = {ATTR_ENTITY_ID: "lock.online_with_doorsense_name"}
assert await hass.services.async_call(
LOCK_DOMAIN, SERVICE_UNLOCK, data, blocking=True
)
await hass.async_block_till_done()
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_JAMMED
async def test_lock_throws_exception_on_unknown_status_code(hass):
"""Test lock throws exception."""
def _unlock_return_activities_side_effect(access_token, device_id):
raise ClientResponseError(None, None, status=500)
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
await _create_august_with_devices(
hass,
[lock_one],
api_call_side_effects={
"unlock_return_activities": _unlock_return_activities_side_effect
},
)
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_LOCKED
assert lock_online_with_doorsense_name.attributes.get("battery_level") == 92
assert (
lock_online_with_doorsense_name.attributes.get("friendly_name")
== "online_with_doorsense Name"
)
data = {ATTR_ENTITY_ID: "lock.online_with_doorsense_name"}
with pytest.raises(ClientResponseError):
assert await hass.services.async_call(
LOCK_DOMAIN, SERVICE_UNLOCK, data, blocking=True
)
await hass.async_block_till_done()
async def test_one_lock_unknown_state(hass):
"""Test creation of a lock with doorsense and bridge."""
lock_one = await _mock_lock_from_fixture(
hass,
"get_lock.online.unknown_state.json",
)
await _create_august_with_devices(hass, [lock_one])
lock_brokenid_name = hass.states.get("lock.brokenid_name")
assert lock_brokenid_name.state == STATE_UNKNOWN
async def test_lock_bridge_offline(hass):
"""Test creation of a lock with doorsense and bridge that goes offline."""
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
activities = await _mock_activities_from_fixture(
hass, "get_activity.bridge_offline.json"
)
await _create_august_with_devices(hass, [lock_one], activities=activities)
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_UNAVAILABLE
async def test_lock_bridge_online(hass):
"""Test creation of a lock with doorsense and bridge that goes offline."""
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
activities = await _mock_activities_from_fixture(
hass, "get_activity.bridge_online.json"
)
await _create_august_with_devices(hass, [lock_one], activities=activities)
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_LOCKED
async def test_lock_update_via_pubnub(hass):
"""Test creation of a lock with doorsense and bridge."""
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
assert lock_one.pubsub_channel == "pubsub"
pubnub = AugustPubNub()
activities = await _mock_activities_from_fixture(hass, "get_activity.lock.json")
config_entry = await _create_august_with_devices(
hass, [lock_one], activities=activities, pubnub=pubnub
)
pubnub.connected = True
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_LOCKED
pubnub.message(
pubnub,
Mock(
channel=lock_one.pubsub_channel,
timetoken=dt_util.utcnow().timestamp() * 10000000,
message={
"status": "kAugLockState_Unlocking",
},
),
)
await hass.async_block_till_done()
await hass.async_block_till_done()
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_UNLOCKING
pubnub.message(
pubnub,
Mock(
channel=lock_one.pubsub_channel,
timetoken=(dt_util.utcnow().timestamp() + 1) * 10000000,
message={
"status": "kAugLockState_Locking",
},
),
)
await hass.async_block_till_done()
await hass.async_block_till_done()
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_LOCKING
async_fire_time_changed(hass, dt_util.utcnow() + datetime.timedelta(seconds=30))
await hass.async_block_till_done()
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_LOCKING
pubnub.connected = True
async_fire_time_changed(hass, dt_util.utcnow() + datetime.timedelta(seconds=30))
await hass.async_block_till_done()
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_LOCKING
# Ensure pubnub status is always preserved
async_fire_time_changed(hass, dt_util.utcnow() + datetime.timedelta(hours=2))
await hass.async_block_till_done()
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_LOCKING
pubnub.message(
pubnub,
Mock(
channel=lock_one.pubsub_channel,
timetoken=(dt_util.utcnow().timestamp() + 2) * 10000000,
message={
"status": "kAugLockState_Unlocking",
},
),
)
await hass.async_block_till_done()
await hass.async_block_till_done()
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_UNLOCKING
async_fire_time_changed(hass, dt_util.utcnow() + datetime.timedelta(hours=4))
await hass.async_block_till_done()
lock_online_with_doorsense_name = hass.states.get("lock.online_with_doorsense_name")
assert lock_online_with_doorsense_name.state == STATE_UNLOCKING
await hass.config_entries.async_unload(config_entry.entry_id)
await hass.async_block_till_done()
|
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Packet class. Binding mechanism. fuzz() method.
"""
import time,itertools,os
import copy
from fields import StrField,ConditionalField,Emph,PacketListField
from config import conf
from base_classes import BasePacket,Gen,SetGen,Packet_metaclass,NewDefaultValues
from volatile import VolatileValue
from utils import import_hexcap,tex_escape,colgen,get_temp_file
from error import Scapy_Exception,log_runtime
import subprocess
import pprint
class CGlobal:
ONCE =False
try:
import pyx
except ImportError:
pass
class RawVal:
def __init__(self, val=""):
self.val = val
def __str__(self):
return str(self.val)
def __repr__(self):
return "<RawVal [%r]>" % self.val
class CPacketRes:
pass;
def hexstr(x):
s = []
for c in x:
s.append("%02x" % ord(c))
return " ".join(s)
class Packet(BasePacket):
__metaclass__ = Packet_metaclass
name=None
fields_desc = []
aliastypes = []
overload_fields = {}
underlayer = None
sent_time = None
payload_guess = []
initialized = 0
show_indent=1
explicit = 0
raw_packet_cache = None
@classmethod
def from_hexcap(cls):
return cls(import_hexcap())
@classmethod
def upper_bonds(self):
for fval,upper in self.payload_guess:
print "%-20s %s" % (upper.__name__, ", ".join("%-12s" % ("%s=%r"%i) for i in fval.iteritems()))
@classmethod
def lower_bonds(self):
for lower,fval in self.overload_fields.iteritems():
print "%-20s %s" % (lower.__name__, ", ".join("%-12s" % ("%s=%r"%i) for i in fval.iteritems()))
def __init__(self, _pkt="", post_transform=None, _internal=0, _underlayer=None, **fields):
self.time = time.time()
self.sent_time = 0
if self.name is None:
self.name = self.__class__.__name__
self.aliastypes = [ self.__class__ ] + self.aliastypes
self.default_fields = {}
self._offset=0; # offset of the object
self._length = 0
self.offset_fields = {} # ofsset of each field
self.overloaded_fields = {}
self.fields={}
self.fieldtype={}
self.packetfields=[]
self.__dict__["payload"] = NoPayload()
self.init_fields()
self.underlayer = _underlayer
self.initialized = 1
self.original = _pkt
if _pkt:
self.dissect(_pkt)
if not _internal:
self.dissection_done(self)
for f in fields.keys():
self.fields[f] = self.get_field(f).any2i(self,fields[f])
if type(post_transform) is list:
self.post_transforms = post_transform
elif post_transform is None:
self.post_transforms = []
else:
self.post_transforms = [post_transform]
def init_fields(self):
self.do_init_fields(self.fields_desc)
def do_init_fields(self, flist):
for f in flist:
self.default_fields[f.name] = copy.deepcopy(f.default)
self.fieldtype[f.name] = f
if f.holds_packets:
self.packetfields.append(f)
def dissection_done(self,pkt):
"""DEV: will be called after a dissection is completed"""
self.post_dissection(pkt)
self.payload.dissection_done(pkt)
def post_dissection(self, pkt):
"""DEV: is called after the dissection of the whole packet"""
if self.payload:
self.payload._offset = self._offset + self._length
def get_field(self, fld):
"""DEV: returns the field instance from the name of the field"""
return self.fieldtype[fld]
def add_payload(self, payload):
if payload is None:
return
elif not isinstance(self.payload, NoPayload):
self.payload.add_payload(payload)
else:
if isinstance(payload, Packet):
self.__dict__["payload"] = payload
payload.add_underlayer(self)
for t in self.aliastypes:
if payload.overload_fields.has_key(t):
self.overloaded_fields = payload.overload_fields[t]
break
elif type(payload) is str:
self.__dict__["payload"] = conf.raw_layer(load=payload)
else:
raise TypeError("payload must be either 'Packet' or 'str', not [%s]" % repr(payload))
def remove_payload(self):
self.payload.remove_underlayer(self)
self.__dict__["payload"] = NoPayload()
self.overloaded_fields = {}
def add_underlayer(self, underlayer):
self.underlayer = underlayer
def remove_underlayer(self,other):
self.underlayer = None
def copy(self):
"""Returns a deep copy of the instance."""
clone = self.__class__()
clone.fields = self.fields.copy()
for k in clone.fields:
clone.fields[k] = self.get_field(k).do_copy(clone.fields[k])
clone.default_fields = self.default_fields.copy()
clone.overloaded_fields = self.overloaded_fields.copy()
clone.overload_fields = self.overload_fields.copy()
clone._offset=self._offset
clone.underlayer = self.underlayer
clone.explicit = self.explicit
clone.raw_packet_cache = self.raw_packet_cache
clone.post_transforms = self.post_transforms[:]
clone.__dict__["payload"] = self.payload.copy()
clone.payload.add_underlayer(clone)
return clone
def dump_offsets (self):
print "obj-id ",id(self),
print self.name ,self._offset
if self.payload:
self.payload.dump_offsets()
def dump_offsets_tree(self, indent = '', base_offset = 0):
ct = conf.color_theme
print("%s%s %s %s" % (indent,
ct.punct("###["),
ct.layer_name(self.name),
ct.punct("]###")))
for f in self.fields_desc:
if isinstance(f, ConditionalField) and not f._evalcond(self):
continue
fvalue = self.getfieldval(f.name)
if isinstance(fvalue, Packet) or (f.islist and f.holds_packets and type(fvalue) is list):
print('\n%s %s: %s' % (indent, f.name, base_offset + f._offset))
fvalue_gen = SetGen(fvalue, _iterpacket = 0)
fvalue_bu = None
for fvalue in fvalue_gen:
if fvalue_bu:
fvalue._offset = fvalue_bu._offset + len(fvalue_bu)
print('%s %s: %s' % (indent, fvalue.name, base_offset + f._offset + fvalue._offset))
fvalue.dump_offsets_tree(' ' + indent, base_offset + f._offset + fvalue._offset)
fvalue_bu = fvalue
else:
print('%s %s: %s' % (indent, f.name, base_offset + f._offset))
if self.payload:
print('---- payload ----')
self.payload.dump_offsets_tree(indent, base_offset + self._length)
def getfieldval(self, attr):
if attr in self.fields:
return self.fields[attr]
if attr in self.overloaded_fields:
return self.overloaded_fields[attr]
if attr in self.default_fields:
return self.default_fields[attr]
return self.payload.getfieldval(attr)
def getfield_and_val(self, attr):
if attr in self.fields:
return self.get_field(attr),self.fields[attr]
if attr in self.overloaded_fields:
return self.get_field(attr),self.overloaded_fields[attr]
if attr in self.default_fields:
return self.get_field(attr),self.default_fields[attr]
return self.payload.getfield_and_val(attr)
def __getattr__(self, attr):
if self.initialized:
fld,v = self.getfield_and_val(attr)
if fld is not None:
return fld.i2h(self, v)
return v
raise AttributeError(attr)
def setfieldval(self, attr, val):
if self.default_fields.has_key(attr):
fld = self.get_field(attr)
if fld is None:
any2i = lambda x,y: y
else:
any2i = fld.any2i
self.fields[attr] = any2i(self, val)
self.explicit = 0
self.raw_packet_cache = None
elif attr == "payload":
self.remove_payload()
self.add_payload(val)
else:
self.payload.setfieldval(attr,val)
def __setattr__(self, attr, val):
if self.initialized:
try:
self.setfieldval(attr,val)
except AttributeError:
pass
else:
return
self.__dict__[attr] = val
def delfieldval(self, attr):
if self.fields.has_key(attr):
del(self.fields[attr])
self.explicit = 0 # in case a default value must be explicited
self.raw_packet_cache = None
elif self.default_fields.has_key(attr):
pass
elif attr == "payload":
self.remove_payload()
else:
self.payload.delfieldval(attr)
def __delattr__(self, attr):
if self.initialized:
try:
self.delfieldval(attr)
except AttributeError:
pass
else:
return
if self.__dict__.has_key(attr):
del(self.__dict__[attr])
else:
raise AttributeError(attr)
def __repr__(self):
s = ""
ct = conf.color_theme
for f in self.fields_desc:
if isinstance(f, ConditionalField) and not f._evalcond(self):
continue
if f.name in self.fields:
val = f.i2repr(self, self.fields[f.name])
elif f.name in self.overloaded_fields:
val = f.i2repr(self, self.overloaded_fields[f.name])
else:
continue
if isinstance(f, Emph) or f in conf.emph:
ncol = ct.emph_field_name
vcol = ct.emph_field_value
else:
ncol = ct.field_name
vcol = ct.field_value
s += " %s%s%s" % (ncol(f.name),
ct.punct("="),
vcol(val))
return "%s%s %s %s%s%s"% (ct.punct("<"),
ct.layer_name(self.__class__.__name__),
s,
ct.punct("|"),
repr(self.payload),
ct.punct(">"))
def __str__(self):
return self.build()
def __div__(self, other):
if isinstance(other, Packet):
cloneA = self.copy()
cloneB = other.copy()
cloneA.add_payload(cloneB)
return cloneA
elif type(other) is str:
return self/conf.raw_layer(load=other)
else:
return other.__rdiv__(self)
__truediv__ = __div__
def __rdiv__(self, other):
if type(other) is str:
return conf.raw_layer(load=other)/self
else:
raise TypeError
__rtruediv__ = __rdiv__
def __mul__(self, other):
if type(other) is int:
return [self]*other
else:
raise TypeError
def __rmul__(self,other):
return self.__mul__(other)
def __nonzero__(self):
return True
def __len__(self):
return len(self.__str__())
def dump_fields_offsets (self):
for f in self.fields_desc:
print "field %-40s %02d %02d" % (f.name, f._offset,f.get_size_bytes ());
def self_build(self, field_pos_list=None):
if self.raw_packet_cache is not None:
return self.raw_packet_cache
p=""
for f in self.fields_desc:
if type(p) is tuple :
f._offset=len(p[0])
else:
assert(type(p)==str)
f._offset=len(p)
val = self.getfieldval(f.name)
if isinstance(val, RawVal):
sval = str(val)
p += sval
if field_pos_list is not None:
field_pos_list.append( (f.name, sval.encode("string_escape"), len(p), len(sval) ) )
f._offset= val
else:
try:
p = f.addfield(self, p, val)
except Exception as e:
print 'Error in %s adding %s, %s' % (self.name, f.name, e)
raise
return p
def do_build_payload(self):
return self.payload.do_build(None)
def do_update_payload_offset(self,pkt):
#print "obj-id ",id(self)
#print "current offset ",self.name," ",self._offset
#print "current header size ",len(pkt)
self.payload._offset = self._offset + len(pkt)
def dump_layers_offset (self):
p=self;
while True:
print p.name, "offset :",p._offset
p=p.payload
if p ==None or isinstance(p,NoPayload):
break;
def do_build(self, result = None):
if not self.explicit:
self = self.__iter__().next()
pkt = self.self_build()
for t in self.post_transforms:
pkt = t(pkt)
self.do_update_payload_offset(pkt)
pay = self.do_build_payload()
p = self.post_build(pkt,pay)
if result != None:
result.pkt = self;
return p
def build_padding(self):
return self.payload.build_padding()
def update_build_info (self,other):
p=self;
o=other;
while True:
assert p.aliastypes == o.aliastypes, (p, o)
assert type(p) == type(o), (type(p), type(o))
#copy
p._offset=o._offset
#next
p=p.payload
o=o.payload
if p ==None or isinstance(p,NoPayload):
break;
def build(self):
result = CPacketRes();
p = self.do_build(result)
p += self.build_padding()
p = self.build_done(p)
self.update_build_info (result.pkt)
return p
def post_build(self, pkt, pay):
"""DEV: called right after the current layer is build."""
return pkt+pay
def build_done(self, p):
return self.payload.build_done(p)
def do_build_ps(self):
p=""
pl = []
q=""
for f in self.fields_desc:
if isinstance(f, ConditionalField) and not f._evalcond(self):
continue
p = f.addfield(self, p, self.getfieldval(f.name) )
if type(p) is str:
r = p[len(q):]
q = p
else:
r = ""
pl.append( (f, f.i2repr(self,self.getfieldval(f.name)), r) )
pkt,lst = self.payload.build_ps(internal=1)
p += pkt
lst.append( (self, pl) )
return p,lst
def build_ps(self,internal=0):
p,lst = self.do_build_ps()
# if not internal:
# pkt = self
# while pkt.haslayer(conf.padding_layer):
# pkt = pkt.getlayer(conf.padding_layer)
# lst.append( (pkt, [ ("loakjkjd", pkt.load, pkt.load) ] ) )
# p += pkt.load
# pkt = pkt.payload
return p,lst
def psdump(self, filename=None, **kargs):
"""psdump(filename=None, layer_shift=0, rebuild=1)
Creates an EPS file describing a packet. If filename is not provided a temporary file is created and gs is called."""
canvas = self.canvas_dump(**kargs)
if filename is None:
fname = get_temp_file(autoext=".eps")
canvas.writeEPSfile(fname)
subprocess.Popen([conf.prog.psreader, fname+".eps"])
else:
canvas.writeEPSfile(filename)
def pdfdump(self, filename=None, **kargs):
"""pdfdump(filename=None, layer_shift=0, rebuild=1)
Creates a PDF file describing a packet. If filename is not provided a temporary file is created and xpdf is called."""
canvas = self.canvas_dump(**kargs)
if filename is None:
fname = get_temp_file(autoext=".pdf")
canvas.writePDFfile(fname)
subprocess.Popen([conf.prog.pdfreader, fname+".pdf"])
else:
canvas.writePDFfile(filename)
def canvas_dump(self, layer_shift=0, rebuild=1):
canvas = pyx.canvas.canvas()
if rebuild:
p,t = self.__class__(str(self)).build_ps()
else:
p,t = self.build_ps()
YTXT=len(t)
for n,l in t:
YTXT += len(l)
YTXT = float(YTXT)
YDUMP=YTXT
XSTART = 1
XDSTART = 10
y = 0.0
yd = 0.0
xd = 0
XMUL= 0.55
YMUL = 0.4
backcolor=colgen(0.6, 0.8, 1.0, trans=pyx.color.rgb)
forecolor=colgen(0.2, 0.5, 0.8, trans=pyx.color.rgb)
# backcolor=makecol(0.376, 0.729, 0.525, 1.0)
def hexstr(x):
s = []
for c in x:
s.append("%02x" % ord(c))
return " ".join(s)
def make_dump_txt(x,y,txt):
return pyx.text.text(XDSTART+x*XMUL, (YDUMP-y)*YMUL, r"\tt{%s}"%hexstr(txt), [pyx.text.size.Large])
def make_box(o):
return pyx.box.rect(o.left(), o.bottom(), o.width(), o.height(), relcenter=(0.5,0.5))
def make_frame(lst):
if len(lst) == 1:
b = lst[0].bbox()
b.enlarge(pyx.unit.u_pt)
return b.path()
else:
fb = lst[0].bbox()
fb.enlarge(pyx.unit.u_pt)
lb = lst[-1].bbox()
lb.enlarge(pyx.unit.u_pt)
if len(lst) == 2 and fb.left() > lb.right():
return pyx.path.path(pyx.path.moveto(fb.right(), fb.top()),
pyx.path.lineto(fb.left(), fb.top()),
pyx.path.lineto(fb.left(), fb.bottom()),
pyx.path.lineto(fb.right(), fb.bottom()),
pyx.path.moveto(lb.left(), lb.top()),
pyx.path.lineto(lb.right(), lb.top()),
pyx.path.lineto(lb.right(), lb.bottom()),
pyx.path.lineto(lb.left(), lb.bottom()))
else:
# XXX
gb = lst[1].bbox()
if gb != lb:
gb.enlarge(pyx.unit.u_pt)
kb = lst[-2].bbox()
if kb != gb and kb != lb:
kb.enlarge(pyx.unit.u_pt)
return pyx.path.path(pyx.path.moveto(fb.left(), fb.top()),
pyx.path.lineto(fb.right(), fb.top()),
pyx.path.lineto(fb.right(), kb.bottom()),
pyx.path.lineto(lb.right(), kb.bottom()),
pyx.path.lineto(lb.right(), lb.bottom()),
pyx.path.lineto(lb.left(), lb.bottom()),
pyx.path.lineto(lb.left(), gb.top()),
pyx.path.lineto(fb.left(), gb.top()),
pyx.path.closepath(),)
def make_dump(s, shift=0, y=0, col=None, bkcol=None, larg=16):
c = pyx.canvas.canvas()
tlist = []
while s:
dmp,s = s[:larg-shift],s[larg-shift:]
txt = make_dump_txt(shift, y, dmp)
tlist.append(txt)
shift += len(dmp)
if shift >= 16:
shift = 0
y += 1
if col is None:
col = pyx.color.rgb.red
if bkcol is None:
col = pyx.color.rgb.white
c.stroke(make_frame(tlist),[col,pyx.deco.filled([bkcol]),pyx.style.linewidth.Thick])
for txt in tlist:
c.insert(txt)
return c, tlist[-1].bbox(), shift, y
last_shift,last_y=0,0.0
while t:
bkcol = backcolor.next()
proto,fields = t.pop()
y += 0.5
pt = pyx.text.text(XSTART, (YTXT-y)*YMUL, r"\font\cmssfont=cmss10\cmssfont{%s}" % proto.name, [ pyx.text.size.Large])
y += 1
ptbb=pt.bbox()
ptbb.enlarge(pyx.unit.u_pt*2)
canvas.stroke(ptbb.path(),[pyx.color.rgb.black, pyx.deco.filled([bkcol])])
canvas.insert(pt)
for fname, fval, fdump in fields:
col = forecolor.next()
ft = pyx.text.text(XSTART, (YTXT-y)*YMUL, r"\font\cmssfont=cmss10\cmssfont{%s}" % tex_escape(fname.name))
if isinstance(fval, str):
if len(fval) > 18:
fval = fval[:18]+"[...]"
else:
fval=""
vt = pyx.text.text(XSTART+3, (YTXT-y)*YMUL, r"\font\cmssfont=cmss10\cmssfont{%s}" % tex_escape(fval))
y += 1.0
if fdump:
dt,target,last_shift,last_y = make_dump(fdump, last_shift, last_y, col, bkcol)
dtb = dt.bbox()
dtb=target
vtb = vt.bbox()
bxvt = make_box(vtb)
bxdt = make_box(dtb)
dtb.enlarge(pyx.unit.u_pt)
try:
if yd < 0:
cnx = pyx.connector.curve(bxvt,bxdt,absangle1=0, absangle2=-90)
else:
cnx = pyx.connector.curve(bxvt,bxdt,absangle1=0, absangle2=90)
except:
pass
else:
canvas.stroke(cnx,[pyx.style.linewidth.thin,pyx.deco.earrow.small,col])
canvas.insert(dt)
canvas.insert(ft)
canvas.insert(vt)
last_y += layer_shift
return canvas
def extract_padding(self, s):
"""DEV: to be overloaded to extract current layer's padding. Return a couple of strings (actual layer, padding)"""
return s,None
def post_dissect(self, s):
"""DEV: is called right after the current layer has been dissected"""
return s
def pre_dissect(self, s):
"""DEV: is called right before the current layer is dissected"""
return s
def do_dissect(self, s):
flist = self.fields_desc[:]
flist.reverse()
raw = s
offset = 0
while s and flist:
f = flist.pop()
f._offset = offset
try:
s,fval = f.getfield(self, s)
offset = len(raw) - (len(s[0]) if type(s) is tuple else len(s))
if getattr(f, 'passon', False): # fix for DNS
offset += s[1]
self.fields[f.name] = fval
except Exception as e:
print 'Error parsing field %s of layer %s: %s' % (f.name, self.name, e)
raise
assert(raw.endswith(s))
if s:
self.raw_packet_cache = raw[:-len(s)]
else:
self.raw_packet_cache = raw
self.explicit = 1
return s
def do_dissect_payload(self, s):
if s:
cls = self.guess_payload_class(s)
try:
p = cls(s, _internal=1, _underlayer=self)
except KeyboardInterrupt:
raise
except:
if conf.debug_dissector:
if isinstance(cls,type) and issubclass(cls,Packet):
log_runtime.error("%s dissector failed" % cls.name)
else:
log_runtime.error("%s.guess_payload_class() returned [%s]" % (self.__class__.__name__,repr(cls)))
if cls is not None:
raise
p = conf.raw_layer(s, _internal=1, _underlayer=self)
self.add_payload(p)
def dissect(self, s):
start_len = len(s)
s = self.pre_dissect(s)
s = self.do_dissect(s)
self._length = start_len - len(s)
s = self.post_dissect(s)
payl,pad = self.extract_padding(s)
self.do_dissect_payload(payl)
if pad and conf.padding:
self.add_payload(conf.padding_layer(pad))
def guess_payload_class(self, payload):
"""DEV: Guesses the next payload class from layer bonds. Can be overloaded to use a different mechanism."""
for t in self.aliastypes:
for fval, cls in t.payload_guess:
ok = 1
for k in fval.keys():
if not hasattr(self, k) or fval[k] != self.getfieldval(k):
ok = 0
break
if ok:
return cls
return self.default_payload_class(payload)
def default_payload_class(self, payload):
"""DEV: Returns the default payload class if nothing has been found by the guess_payload_class() method."""
return conf.raw_layer
def hide_defaults(self):
"""Removes fields' values that are the same as default values."""
for k in self.fields.keys():
if self.default_fields.has_key(k):
if self.default_fields[k] == self.fields[k]:
del(self.fields[k])
self.payload.hide_defaults()
def clone_with(self, payload=None, **kargs):
pkt = self.__class__()
pkt.explicit = 1
pkt.fields = kargs
pkt._offset=self._offset
pkt.time = self.time
pkt.underlayer = self.underlayer
pkt.overload_fields = self.overload_fields.copy()
pkt.post_transforms = self.post_transforms
if payload is not None:
pkt.add_payload(payload)
return pkt
def __iter__(self):
def loop(todo, done, self=self):
if todo:
eltname = todo.pop()
elt = self.getfieldval(eltname)
if not isinstance(elt, Gen):
if self.get_field(eltname).islist:
elt = SetGen([elt])
else:
elt = SetGen(elt)
for e in elt:
done[eltname]=e
for x in loop(todo[:], done):
yield x
else:
if isinstance(self.payload,NoPayload):
payloads = [None]
else:
payloads = self.payload
for payl in payloads:
done2=done.copy()
for k in done2:
if isinstance(done2[k], VolatileValue):
done2[k] = done2[k]._fix()
pkt = self.clone_with(payload=payl, **done2)
yield pkt
if self.explicit:
todo = []
done = self.fields
else:
todo = [ k for (k,v) in itertools.chain(self.default_fields.iteritems(),
self.overloaded_fields.iteritems())
if isinstance(v, VolatileValue) ] + self.fields.keys()
done = {}
return loop(todo, done)
def __gt__(self, other):
"""True if other is an answer from self (self ==> other)."""
if isinstance(other, Packet):
return other < self
elif type(other) is str:
return 1
else:
raise TypeError((self, other))
def __lt__(self, other):
"""True if self is an answer from other (other ==> self)."""
if isinstance(other, Packet):
return self.answers(other)
elif type(other) is str:
return 1
else:
raise TypeError((self, other))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
for f in self.fields_desc:
if f not in other.fields_desc:
return False
if self.getfieldval(f.name) != other.getfieldval(f.name):
return False
return self.payload == other.payload
def __ne__(self, other):
return not self.__eq__(other)
def hashret(self):
"""DEV: returns a string that has the same value for a request and its answer."""
return self.payload.hashret()
def answers(self, other):
"""DEV: true if self is an answer from other"""
if other.__class__ == self.__class__:
return self.payload.answers(other.payload)
return 0
def haslayer(self, cls):
"""true if self has a layer that is an instance of cls. Superseded by "cls in self" syntax."""
if self.__class__ == cls or self.__class__.__name__ == cls:
return 1
for f in self.packetfields:
fvalue_gen = self.getfieldval(f.name)
if fvalue_gen is None:
continue
if not f.islist:
fvalue_gen = SetGen(fvalue_gen,_iterpacket=0)
for fvalue in fvalue_gen:
if isinstance(fvalue, Packet):
ret = fvalue.haslayer(cls)
if ret:
return ret
return self.payload.haslayer(cls)
def getlayer(self, cls, nb=1, _track=None):
"""Return the nb^th layer that is an instance of cls."""
if type(cls) is int:
nb = cls+1
cls = None
if type(cls) is str and "." in cls:
ccls,fld = cls.split(".",1)
else:
ccls,fld = cls,None
if cls is None or self.__class__ == cls or self.__class__.name == ccls:
if nb == 1:
if fld is None:
return self
else:
return self.getfieldval(fld)
else:
nb -=1
for f in self.packetfields:
fvalue_gen = self.getfieldval(f.name)
if fvalue_gen is None:
continue
if not f.islist:
fvalue_gen = SetGen(fvalue_gen,_iterpacket=0)
for fvalue in fvalue_gen:
if isinstance(fvalue, Packet):
track=[]
ret = fvalue.getlayer(cls, nb, _track=track)
if ret is not None:
return ret
nb = track[0]
return self.payload.getlayer(cls,nb,_track=_track)
def firstlayer(self):
q = self
while q.underlayer is not None:
q = q.underlayer
return q
def __getitem__(self, cls):
if type(cls) is slice:
lname = cls.start
if cls.stop:
ret = self.getlayer(cls.start, cls.stop)
else:
ret = self.getlayer(cls.start)
if ret is None and cls.step is not None:
ret = cls.step
else:
lname=cls
ret = self.getlayer(cls)
if ret is None:
if type(lname) is Packet_metaclass:
lname = lname.__name__
elif type(lname) is not str:
lname = repr(lname)
raise IndexError("Layer [%s] not found" % lname)
return ret
def __delitem__(self, cls):
del(self[cls].underlayer.payload)
def __setitem__(self, cls, val):
self[cls].underlayer.payload = val
def __contains__(self, cls):
""""cls in self" returns true if self has a layer which is an instance of cls."""
return self.haslayer(cls)
def route(self):
return (None,None,None)
def fragment(self, *args, **kargs):
return self.payload.fragment(*args, **kargs)
def display(self,*args,**kargs): # Deprecated. Use show()
"""Deprecated. Use show() method."""
self.show(*args,**kargs)
def show(self, indent=3, lvl="", label_lvl=""):
"""Prints a hierarchical view of the packet. "indent" gives the size of indentation for each layer."""
ct = conf.color_theme
print "%s%s %s %s" % (label_lvl,
ct.punct("###["),
ct.layer_name(self.name),
ct.punct("]###"))
for f in self.fields_desc:
if isinstance(f, ConditionalField) and not f._evalcond(self):
continue
if isinstance(f, Emph) or f in conf.emph:
ncol = ct.emph_field_name
vcol = ct.emph_field_value
else:
ncol = ct.field_name
vcol = ct.field_value
fvalue = self.getfieldval(f.name)
if isinstance(fvalue, Packet) or (f.islist and f.holds_packets and type(fvalue) is list):
print "%s \\%-10s\\" % (label_lvl+lvl, ncol(f.name))
fvalue_gen = SetGen(fvalue,_iterpacket=0)
for fvalue in fvalue_gen:
fvalue.show(indent=indent, label_lvl=label_lvl+lvl+" |")
else:
try: # to get rid of "long"
if type(fvalue) is long and int(fvalue) == fvalue:
fvalue = int(fvalue)
except:
pass
begn = "%s %-10s%s " % (label_lvl+lvl,
ncol(f.name),
ct.punct("="),)
reprval = f.i2repr(self,fvalue)
if type(reprval) is str:
reprval = reprval.replace("\n", "\n"+" "*(len(label_lvl)
+len(lvl)
+len(f.name)
+4))
print "%s%s" % (begn,vcol(reprval))
self.payload.show(indent=indent, lvl=lvl+(" "*indent*self.show_indent), label_lvl=label_lvl)
def is_whole_explicit(self):
l = self
while l:
if not l.explicit:
return False
l = l.payload
return True
def show2(self):
"""Prints a hierarchical view of an assembled version of the packet, so that automatic fields are calculated (checksums, etc.)"""
if self.is_whole_explicit():
self.show()
else:
self.__class__(str(self)).show()
def sprintf(self, fmt, relax=1):
"""sprintf(format, [relax=1]) -> str
where format is a string that can include directives. A directive begins and
ends by % and has the following format %[fmt[r],][cls[:nb].]field%.
fmt is a classic printf directive, "r" can be appended for raw substitution
(ex: IP.flags=0x18 instead of SA), nb is the number of the layer we want
(ex: for IP/IP packets, IP:2.src is the src of the upper IP layer).
Special case : "%.time%" is the creation time.
Ex : p.sprintf("%.time% %-15s,IP.src% -> %-15s,IP.dst% %IP.chksum% "
"%03xr,IP.proto% %r,TCP.flags%")
Moreover, the format string can include conditionnal statements. A conditionnal
statement looks like : {layer:string} where layer is a layer name, and string
is the string to insert in place of the condition if it is true, i.e. if layer
is present. If layer is preceded by a "!", the result si inverted. Conditions
can be imbricated. A valid statement can be :
p.sprintf("This is a{TCP: TCP}{UDP: UDP}{ICMP:n ICMP} packet")
p.sprintf("{IP:%IP.dst% {ICMP:%ICMP.type%}{TCP:%TCP.dport%}}")
A side effect is that, to obtain "{" and "}" characters, you must use
"%(" and "%)".
"""
escape = { "%": "%",
"(": "{",
")": "}" }
# Evaluate conditions
while "{" in fmt:
i = fmt.rindex("{")
j = fmt[i+1:].index("}")
cond = fmt[i+1:i+j+1]
k = cond.find(":")
if k < 0:
raise Scapy_Exception("Bad condition in format string: [%s] (read sprintf doc!)"%cond)
cond,format = cond[:k],cond[k+1:]
res = False
if cond[0] == "!":
res = True
cond = cond[1:]
if self.haslayer(cond):
res = not res
if not res:
format = ""
fmt = fmt[:i]+format+fmt[i+j+2:]
# Evaluate directives
s = ""
while "%" in fmt:
i = fmt.index("%")
s += fmt[:i]
fmt = fmt[i+1:]
if fmt and fmt[0] in escape:
s += escape[fmt[0]]
fmt = fmt[1:]
continue
try:
i = fmt.index("%")
sfclsfld = fmt[:i]
fclsfld = sfclsfld.split(",")
if len(fclsfld) == 1:
f = "s"
clsfld = fclsfld[0]
elif len(fclsfld) == 2:
f,clsfld = fclsfld
else:
raise Scapy_Exception
if "." in clsfld:
cls,fld = clsfld.split(".")
else:
cls = self.__class__.__name__
fld = clsfld
num = 1
if ":" in cls:
cls,num = cls.split(":")
num = int(num)
fmt = fmt[i+1:]
except:
raise Scapy_Exception("Bad format string [%%%s%s]" % (fmt[:25], fmt[25:] and "..."))
else:
if fld == "time":
val = time.strftime("%H:%M:%S.%%06i", time.localtime(self.time)) % int((self.time-int(self.time))*1000000)
elif cls == self.__class__.__name__ and hasattr(self, fld):
if num > 1:
val = self.payload.sprintf("%%%s,%s:%s.%s%%" % (f,cls,num-1,fld), relax)
f = "s"
elif f[-1] == "r": # Raw field value
val = getattr(self,fld)
f = f[:-1]
if not f:
f = "s"
else:
val = getattr(self,fld)
if fld in self.fieldtype:
val = self.fieldtype[fld].i2repr(self,val)
else:
val = self.payload.sprintf("%%%s%%" % sfclsfld, relax)
f = "s"
s += ("%"+f) % val
s += fmt
return s
def mysummary(self):
"""DEV: can be overloaded to return a string that summarizes the layer.
Only one mysummary() is used in a whole packet summary: the one of the upper layer,
except if a mysummary() also returns (as a couple) a list of layers whose
mysummary() must be called if they are present."""
return ""
def _do_summary(self):
found,s,needed = self.payload._do_summary()
if s:
s = " / "+s
ret = ""
if not found or self.__class__ in needed:
ret = self.mysummary()
if type(ret) is tuple:
ret,n = ret
needed += n
if ret or needed:
found = 1
if not ret:
ret = self.__class__.__name__
if self.__class__ in conf.emph:
impf = []
for f in self.fields_desc:
if f in conf.emph:
impf.append("%s=%s" % (f.name, f.i2repr(self, self.getfieldval(f.name))))
ret = "%s [%s]" % (ret," ".join(impf))
ret = "%s%s" % (ret,s)
return found,ret,needed
def summary(self, intern=0):
"""Prints a one line summary of a packet."""
found,s,needed = self._do_summary()
return s
def lastlayer(self,layer=None):
"""Returns the uppest layer of the packet"""
return self.payload.lastlayer(self)
def decode_payload_as(self,cls):
"""Reassembles the payload and decode it using another packet class"""
s = str(self.payload)
self.payload = cls(s, _internal=1, _underlayer=self)
pp = self
while pp.underlayer is not None:
pp = pp.underlayer
self.payload.dissection_done(pp)
def libnet(self):
"""Not ready yet. Should give the necessary C code that interfaces with libnet to recreate the packet"""
print "libnet_build_%s(" % self.__class__.name.lower()
det = self.__class__(str(self))
for f in self.fields_desc:
val = det.getfieldval(f.name)
if val is None:
val = 0
elif type(val) is int:
val = str(val)
else:
val = '"%s"' % str(val)
print "\t%s, \t\t/* %s */" % (val,f.name)
print ");"
def command(self):
"""Returns a string representing the command you have to type to obtain the same packet"""
f = []
for fn,fv in self.fields.items():
try: # to get rid of "long"
if type(fv) is long and int(fv) == fv:
fv = int(fv)
except:
pass
fld = self.get_field(fn)
if isinstance(fv, Packet):
fv = fv.command()
elif fld.islist and fld.holds_packets and type(fv) is list:
fv = "[%s]" % ",".join( map(Packet.command, fv))
elif not isinstance(fld, ConditionalField) or fld.cond(self):
fv = repr(fv)
f.append("%s=%s" % (fn, fv))
c = "%s(%s)" % (self.__class__.__name__, ", ".join(f))
pc = self.payload.command()
if pc:
c += "/"+pc
return c
class NoPayload(Packet):
def __new__(cls, *args, **kargs):
singl = cls.__dict__.get("__singl__")
if singl is None:
cls.__singl__ = singl = Packet.__new__(cls)
Packet.__init__(singl)
return singl
def __init__(self, *args, **kargs):
pass
def dissection_done(self,pkt):
return
def add_payload(self, payload):
raise Scapy_Exception("Can't add payload to NoPayload instance")
def remove_payload(self):
pass
def add_underlayer(self,underlayer):
pass
def remove_underlayer(self,other):
pass
def copy(self):
return self
def __repr__(self):
return ""
def __str__(self):
return ""
def __nonzero__(self):
return False
def do_build(self,result = None):
return ""
def build(self):
return ""
def build_padding(self):
return ""
def build_done(self, p):
return p
def build_ps(self, internal=0):
return "",[]
def getfieldval(self, attr):
raise AttributeError(attr)
def getfield_and_val(self, attr):
raise AttributeError(attr)
def setfieldval(self, attr, val):
raise AttributeError(attr)
def delfieldval(self, attr):
raise AttributeError(attr)
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
elif attr in self.__class__.__dict__:
return self.__class__.__dict__[attr]
else:
raise AttributeError, attr
def hide_defaults(self):
pass
def __iter__(self):
return iter([])
def __eq__(self, other):
if isinstance(other, NoPayload):
return True
return False
def hashret(self):
return ""
def answers(self, other):
return isinstance(other, NoPayload) or isinstance(other, conf.padding_layer)
def haslayer(self, cls):
return 0
def getlayer(self, cls, nb=1, _track=None):
if _track is not None:
_track.append(nb)
return None
def fragment(self, *args, **kargs):
raise Scapy_Exception("cannot fragment this packet")
def show(self, indent=3, lvl="", label_lvl=""):
pass
def sprintf(self, fmt, relax):
if relax:
return "??"
else:
raise Scapy_Exception("Format not found [%s]"%fmt)
def _do_summary(self):
return 0,"",[]
def lastlayer(self,layer):
return layer
def command(self):
return ""
####################
## packet classes ##
####################
class Raw(Packet):
name = "Raw"
fields_desc = [ StrField("load", "") ]
def answers(self, other):
return 1
# s = str(other)
# t = self.load
# l = min(len(s), len(t))
# return s[:l] == t[:l]
def mysummary(self):
cs = conf.raw_summary
if cs:
if callable(cs):
return "Raw %s" % cs(self.load)
else:
return "Raw %r" % self.load
return Packet.mysummary(self)
class Padding(Raw):
name = "Padding"
def self_build(self):
return ""
def build_padding(self):
return (self.load if self.raw_packet_cache is None
else self.raw_packet_cache) + self.payload.build_padding()
conf.raw_layer = Raw
conf.padding_layer = Padding
if conf.default_l2 is None:
conf.default_l2 = Raw
#################
## Bind layers ##
#################
def bind_bottom_up(lower, upper, __fval=None, **fval):
if __fval is not None:
fval.update(__fval)
lower.payload_guess = lower.payload_guess[:]
lower.payload_guess.append((fval, upper))
def bind_top_down(lower, upper, __fval=None, **fval):
if __fval is not None:
fval.update(__fval)
upper.overload_fields = upper.overload_fields.copy()
upper.overload_fields[lower] = fval
@conf.commands.register
def bind_layers(lower, upper, __fval=None, **fval):
"""Bind 2 layers on some specific fields' values"""
if __fval is not None:
fval.update(__fval)
bind_top_down(lower, upper, **fval)
bind_bottom_up(lower, upper, **fval)
def split_bottom_up(lower, upper, __fval=None, **fval):
if __fval is not None:
fval.update(__fval)
def do_filter((f,u),upper=upper,fval=fval):
if u != upper:
return True
for k in fval:
if k not in f or f[k] != fval[k]:
return True
return False
lower.payload_guess = filter(do_filter, lower.payload_guess)
def split_top_down(lower, upper, __fval=None, **fval):
if __fval is not None:
fval.update(__fval)
if lower in upper.overload_fields:
ofval = upper.overload_fields[lower]
for k in fval:
if k not in ofval or ofval[k] != fval[k]:
return
upper.overload_fields = upper.overload_fields.copy()
del(upper.overload_fields[lower])
@conf.commands.register
def split_layers(lower, upper, __fval=None, **fval):
"""Split 2 layers previously bound"""
if __fval is not None:
fval.update(__fval)
split_bottom_up(lower, upper, **fval)
split_top_down(lower, upper, **fval)
@conf.commands.register
def ls(obj=None):
"""List available layers, or infos on a given layer"""
if obj is None:
import __builtin__
all = __builtin__.__dict__.copy()
all.update(globals())
objlst = sorted(conf.layers, key=lambda x:x.__name__)
for o in objlst:
print "%-10s : %s" %(o.__name__,o.name)
else:
if isinstance(obj, type) and issubclass(obj, Packet):
for f in obj.fields_desc:
print "%-10s : %-20s = (%s)" % (f.name, f.__class__.__name__, repr(f.default))
elif isinstance(obj, Packet):
for f in obj.fields_desc:
print "%-10s : %-20s = %-15s (%s)" % (f.name, f.__class__.__name__, repr(getattr(obj,f.name)), repr(f.default))
if not isinstance(obj.payload, NoPayload):
print "--"
ls(obj.payload)
else:
print "Not a packet class. Type 'ls()' to list packet classes."
#############
## Fuzzing ##
#############
@conf.commands.register
def fuzz(p, _inplace=0):
"""Transform a layer into a fuzzy layer by replacing some default values by random objects"""
if not _inplace:
p = p.copy()
q = p
while not isinstance(q, NoPayload):
for f in q.fields_desc:
if isinstance(f, PacketListField):
for r in getattr(q, f.name):
print "fuzzing", repr(r)
fuzz(r, _inplace=1)
elif f.default is not None:
rnd = f.randval()
if rnd is not None:
q.default_fields[f.name] = rnd
q = q.payload
return p
|
|
#!/usr/bin/env python
#
# Copyright (C) 2013-2014 Mikkel Krautz <mikkel@krautz.dk>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the Mumble Developers nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# `AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# About the tool
# --------------
# sign-msi.py is a tool that takes an unsigned (or optionally, signed)
# Mumble .MSI file and code signs it according to a 'strategy'.
#
# A strategy is a file that lists the files in the .MSI that need
# to be signed to get a 'proper' build. Beside the files listed in
# the strategy, the tool will also sign the .MSI file itself.
#
# Here is a sample strategy file:
#
# --->8---
# # Beginning of strategy file. This line is a comment.
#
# test.exe # This is the main binary of the program.
# helper.exe # this is a helper executable.
# util.dll # DLLs are allowed too, of course!
# --->8----
#
# The mumble-releng repository holds a collection of these files
# in the msi-strategy directory in the root of the repository.
#
# Using the tool
# --------------
# To sign mumble-1.2.4-unsigned.msi according
# to the 1.2.4.strategy file, and output the
# resulting, signed, .MSI to mumble-1.2.4.msi,
# one would do the following:
#
# $ python sign-msi.py \
# --input=mumble-1.2.4-unsigned.msi \
# --output=mumble-1.2.4.msi \
# --strategy=1.2.4.strategy
#
# Signtool parameters
# -------------------
# By default, the tool uses the '/a' parameter to signtool.exe.
#
# Since this is a very personal preference, sign-msi.py can also
# read signtool.exe parameters from the file %USERPROFILE%\.sign-msi.cfg,
# which is the tool's configuration file.
#
# The configuration file uses JSON. For example, to sign using
# a certificate in your personal certificate store and timestamp
# via timestamp.example.com, you could use something like this:
#
# --->8---
# {
# "signtool-args": ["/n", "SubjectName", "/tr", "http://timestamp.example.com"]
# }
# --->8---
#
# Note: as mentioned above, if no parameters are specified,
# sign-msi.py will use the "/a" parameter, which asks signtool
# to select the best signing cert automatically.
import os
import sys
import subprocess
import tempfile
import shutil
import json
import platform
import distutils.spawn
from optparse import OptionParser
altconfig = None
def homedir():
'''
homedir returns the user's home directory.
'''
return os.getenv('USERPROFILE', os.getenv('HOME'))
class winpath(str):
'''
winpath is a str subclass that the cmd() function uses
to translate Unix-style paths to Windows-style paths when
invoking Windows binaries through Wine.
'''
def to_windows(self, cwd=None):
if cwd is None:
cwd = os.getcwd()
path = str(self)
if not os.path.isabs(path):
path = os.path.normpath(os.path.join(cwd, path))
return 'z:' + path.replace('/', '\\')
def lookupExe(fn, default):
'''
lookupExe tries to look up the executable specified by fn in the
user's PATH. If that fails, lookupExe returns the value of the
default parameter.
'''
exe = distutils.spawn.find_executable(fn)
if exe is not None:
return exe
else:
return default
def msidb():
return lookupExe('msidb.exe', 'C:\\Program Files (x86)\\Windows Kits\\8.0\\bin\\x86\\msidb.exe')
def signtool():
return lookupExe('signtool.exe', 'C:\\Program Files (x86)\\Windows Kits\\8.0\\bin\\x86\\signtool.exe')
def osslsigncode():
return lookupExe('osslsigncode', '/usr/local/osslsigncode')
def cmd(args, cwd=None):
'''
cmd executes the requested program and throws an exception
if if the program returns a non-zero return code.
'''
# Translate from Unix-style to Windows-style paths if needed.
if platform.system() != "Windows":
for i, arg in enumerate(args):
if isinstance(arg, winpath):
args[i] = arg.to_windows(cwd)
ret = subprocess.Popen(args, cwd=cwd).wait()
if ret != 0:
raise Exception('command "%s" exited with exit status %i' % (args[0], ret))
def hasSignature(absFn):
'''
hasSignature returns true if absFn has a digital signature.
'''
if platform.system() == 'Windows':
raise Exception('not supported on Windows')
ret = subprocess.Popen([osslsigncode(), 'extract-signature', '-in', absFn, '-out', '/dev/null']).wait()
if ret == 0:
return True
elif ret == 1 or ret == 255:
return False
else:
raise Exception('unexpected osslsigncode return code: %i' % ret)
def signatureLeafHashMatches(absFn, sha512):
'''
signatureLeafHashMatches performs a signature check on absFn.
The signature check determines whether the sha512 digest of
the leaf certificate of the signature's certificate chain
matches the passed-in sha512 digest.
'''
if platform.system() == 'Windows':
raise Exception('not supported on Windows')
args = ['osslsigncode', 'verify', '-require-leaf-hash', 'sha512:'+sha512, '-in', absFn]
return subprocess.Popen(args).wait() == 0
def hasTrustedSignature(absFn):
'''
hasTrustedSignature checks whether absFn has a leaf hash
that matches one of the trusted leaf hashes as found in
the sign-msi.py configuration file.
'''
if platform.system() == "Windows":
raise Exception('not supported on Windows')
cfg = read_cfg()
trustedSignatures = cfg.get('trusted-leaf-sha512s', [])
for trusted in trustedSignatures:
if signatureLeafHashMatches(absFn, trusted):
return True
return False
def sign(files, cwd=None, force=False, productDescription=None, productURL=None):
'''
sign invokes signtool (on Windows) or osslsigncode (on everything else)
to sign the given files.
Passing in force=True will always replace the signature
of the files to be signed, without respecting the
allow-already-signed configuration flag. (Which also means
that objects signed with force=True aren't checked against
the trusted-leaf-sha512s of the configuration file, either.)
'''
if cwd is None:
cwd = os.getcwd()
cfg = read_cfg()
if platform.system() == "Windows":
signtool_product_args = []
if productDescription:
signtool_product_args.extend(['/d', productDescription])
if productURL:
signtool_product_args.extend(['/du', productURL])
signtool_extra_args = ['/a']
if cfg.has_key('signtool-args'):
signtool_extra_args = cfg['signtool-args']
nest = cfg.get('nest', False)
if nest:
raise Exception('nested signing is not yet implemented in sign-msi.py for signtool')
cmd([signtool(), 'sign'] + signtool_product_args + signtool_extra_args + files, cwd=cwd)
else:
osslsigncode_product_args = []
if productDescription:
osslsigncode_product_args.extend(['-n', productDescription])
if productURL:
osslsigncode_product_args.extend(['-i', productURL])
osslsigncode_args = cfg.get('osslsigncode-args', [])
nest = cfg.get('nest', False)
osslsigncode_nest_args = cfg.get('osslsigncode-nest-args', [])
allowAlreadySignedContent = cfg.get('allow-already-signed-content', False)
reSignAlreadySignedContent = cfg.get('re-sign-already-signed-content', False)
if reSignAlreadySignedContent == True and allowAlreadySignedContent == False:
raise Excpetion('cannot have re-sign-already-signed-contnet == true when allow-already-signed-content == false')
for fn in files:
absFn = os.path.join(cwd, fn)
if force is False and hasSignature(absFn):
if not allowAlreadySignedContent:
raise Exception('object "%s" is already signed; cfg disallows that.' % fn)
if not hasTrustedSignature(absFn):
raise Exception('object "%s" has a bad signature.' % fn)
if not reSignAlreadySignedContent:
print 'Skipping %s - signed by a trusted leaf.' % fn
continue
print 'Signing %s' % fn
os.rename(absFn, absFn+'.orig')
cmd([osslsigncode(), 'sign'] + osslsigncode_product_args + osslsigncode_args + [absFn+'.orig', absFn+'.1st'])
if nest:
cmd([osslsigncode(), 'sign', '-nest'] + osslsigncode_product_args + osslsigncode_nest_args + [absFn+'.1st', absFn+'.2nd'])
os.rename(absFn+'.2nd', absFn)
else:
os.rename(absFn+'.1st', absFn)
def extractCab(absMsiFn, workDir):
'''
extractCab extracts the Mumble.cab file from the MSI file
given by absMsiFn into workDir.
'''
ret = cmd([msidb(), '-d', winpath(absMsiFn), '-x', 'Mumble.cab'], cwd=workDir)
if not os.path.exists(os.path.join(workDir, 'Mumble.cab')):
raise Exception('no Mumble.cab found in workDir')
def unarchiveCab(workDir):
'''
unarchiveCab extracts the content of the Mumble.cab file
in workDir into a subdirectory of workDir called contents.
'''
contentsDir = os.path.join(workDir, 'contents')
os.mkdir(contentsDir)
cmd(['expand.exe', winpath(os.path.join('..', 'Mumble.cab')), '-F:*', '.'], cwd=contentsDir)
def cabContents(workDir):
'''
cabContents returns a directory listing of the
contents directory, sorted in a 'CAB correct' manner.
'''
return sorted(os.listdir(os.path.join(workDir, 'contents')), key=str.lower)
def writeCabDirective(workDir):
'''
writeCabDirective writes a Mumble.ddf file to the
root of workDir.
This file should can be used as an input to makecab.exe
to re-create a Mumble.cab.
'''
directiveFn = os.path.join(workDir, 'Mumble.ddf')
allFiles = cabContents(workDir)
f = open(directiveFn, 'w')
ddfStr = '''.OPTION EXPLICIT
.Set MaxDiskSize=0
.Set MaxCabinetSize=0
.set DiskDirectoryTemplate=
.Set CabinetNameTemplate=Mumble.cab
.Set Cabinet=on
.Set Compress=on
.Set CompressionType=LZX
'''
for fn in allFiles:
ddfStr += fn + '\n'
f.write(ddfStr)
f.close()
def signContentFiles(workDir, files=None):
'''
signContentFiles code-signs the files specified
in the files parameter in the contents directory
of the workDir.
If files is None, signContentFiles will sign all
.EXE and .DLL files in the 'contents' subdirectory
of the workDir.
'''
contentsDir = os.path.join(workDir, 'contents')
if files is None:
def isSignable(fn):
fn = fn.lower()
return fn.endswith('.exe') or fn.endswith('.dll')
files = [fn for fn in os.listdir(contentsDir) if isSignable(fn)]
sign(files, cwd=contentsDir)
def makeCab(workDir):
'''
makeCab creates a new Mumble.cab using the
files in the contents directory.
'''
contentsDir = os.path.join(workDir, 'contents')
cmd(['makecab.exe', '/f', winpath(os.path.join('..', 'Mumble.ddf'))], cwd=contentsDir)
def reassembleMsi(absMsiFn, workDir, outFn):
'''
reassembleMsi copies the target MSI to the contents
directory and does the following:
1. Removes the old Mumble.cab file from it.
2. Inserts the new Mumble.cab file into it.
3. Copies the re-assembled MSI to the outFn.
'''
contentsDir = os.path.join(workDir, 'contents')
contentMsi = os.path.join(contentsDir, 'Mumble.msi')
shutil.copyfile(absMsiFn, contentMsi)
# Remove old
cmd([msidb(), '-d', 'Mumble.msi', '-k', 'Mumble.cab'], cwd=contentsDir)
# Add new
cmd([msidb(), '-d', 'Mumble.msi', '-a', 'Mumble.cab'], cwd=contentsDir)
# Copy to outFn
shutil.copyfile(contentMsi, outFn)
def signMsi(outFn, productDescription=None, productURL=None):
'''
signMsi code-signs the .MSI file specified
in outFn.
'''
sign([outFn], force=True, productDescription=productDescription, productURL=productURL)
def read_cfg():
'''
read_cfg returns a dictionary of configuration
keys for sign-msi.py.
'''
global altconfig
fn = os.path.join(homedir(), '.sign-msi.cfg')
if altconfig is not None:
fn = altconfig
try:
with open(fn) as f:
s = f.read()
return json.loads(s)
except (IOError, ValueError):
pass
return {}
def read_strategy(fn):
'''
read_strategy reads a signing strategy, ignoring all comments.
It returns all the files from the strategy that need to be code signed.
'''
signfiles = []
for line in open(fn):
if len(line) == 0:
continue
idx = line.find('#')
if idx != -1:
line = line[:idx]
line = line.strip()
if len(line) == 0:
continue
signfiles.append(line)
return signfiles
def main():
p = OptionParser(usage='sign-msi.py --input=<in.msi> --output=<out.msi> [--strategy=<ver.strategy>]')
p.add_option('', '--input', dest='input', help='Input MSI file')
p.add_option('', '--output', dest='output', help='Output MSI file')
p.add_option('', '--strategy', dest='strategy', help='Strategy file describing which files to sign (optional; if not present, all files will be signed)')
p.add_option('', '--keep-tree', action='store_true', dest='keep_tree', help='Keep the working tree after signing')
p.add_option('', '--config', dest='config', help='Load the specified config file instead of $HOME/.sign-msi.cfg')
opts, args = p.parse_args()
if opts.input is None:
p.error('missing --input')
if opts.output is None:
p.error('missing --output')
if opts.config is not None:
global altconfig
altconfig = opts.config
absMsiFn = os.path.abspath(opts.input)
workDir = tempfile.mkdtemp()
extractCab(absMsiFn, workDir)
unarchiveCab(workDir)
writeCabDirective(workDir)
contentToSign = None
if opts.strategy is not None:
contentToSign = read_strategy(opts.strategy)
signContentFiles(workDir, contentToSign)
makeCab(workDir)
reassembleMsi(absMsiFn, workDir, opts.output)
productName = os.path.basename(opts.output)
signMsi(opts.output, productDescription=productName)
if opts.keep_tree:
print ''
print 'Working tree: %s' % workDir
else:
shutil.rmtree(workDir, ignore_errors=True)
print ''
print 'Signed MSI available at %s' % opts.output
if __name__ == '__main__':
main()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data loader and processing.
Defines input_fn of Mask-RCNN for TF Estimator. The input_fn includes training
data for category classification, bounding box regression, and number of
positive examples to normalize the loss during training.
"""
from __future__ import division
import functools
import math
import tensorflow.google as tf
from REDACTED.mask_rcnn import anchors
from REDACTED.mask_rcnn import mask_rcnn_params
from REDACTED.mask_rcnn import spatial_transform
from REDACTED.mask_rcnn.object_detection import preprocessor
from REDACTED.mask_rcnn.object_detection import tf_example_decoder
MAX_NUM_INSTANCES = 100
class InputProcessor(object):
"""Base class of Input processor."""
def __init__(self, image, output_size, short_side_image_size,
long_side_max_image_size):
"""Initializes a new `InputProcessor`.
This InputProcessor is tailored for MLPerf. The reference implementation
resizes images as the following:
1. Resize the short side to 800 pixels while keeping the aspect ratio.
2. Clip the long side at a maximum of 1333 pixels.
Args:
image: The input image before processing.
output_size: A integer tuple of the output image size in the form of
(short_side, long_side) after calling resize_and_crop_image function.
short_side_image_size: The image size for the short side. This is analogy
to cfg.TRAIN.scales in the MLPerf reference model.
long_side_max_image_size: The maximum image size for the long side. This
is analogy to cfg.TRAIN.max_size in the MLPerf reference model.
"""
self._image = image
self._output_size = output_size
self._short_side_image_size = short_side_image_size
self._long_side_max_image_size = long_side_max_image_size
# Parameters to control rescaling and shifting during preprocessing.
# Image scale defines scale from original image to scaled image.
self._image_scale = tf.constant(1.0)
# The integer height and width of scaled image.
self._scaled_height = tf.shape(image)[0]
self._scaled_width = tf.shape(image)[1]
self._ori_height = tf.shape(image)[0]
self._ori_width = tf.shape(image)[1]
def normalize_image(self):
"""Normalize the image to zero mean and unit variance."""
# The image normalization is identical to Cloud TPU ResNet.
self._image = tf.image.convert_image_dtype(self._image, dtype=tf.float32)
offset = tf.constant([0.485, 0.456, 0.406])
offset = tf.expand_dims(offset, axis=0)
offset = tf.expand_dims(offset, axis=0)
self._image -= offset
# This is simlar to `PIXEL_MEANS` in the reference. Reference: https://github.com/ddkang/Detectron/blob/80f329530843e66d07ca39e19901d5f3e5daf009/lib/core/config.py#L909 # pylint: disable=line-too-long
scale = tf.constant([0.229, 0.224, 0.225])
scale = tf.expand_dims(scale, axis=0)
scale = tf.expand_dims(scale, axis=0)
self._image /= scale
def set_scale_factors_to_mlperf_reference_size(self):
"""Set the parameters to resize the image according to MLPerf reference."""
# Compute the scale_factor using rounded scaled image size.
height = tf.shape(self._image)[0]
width = tf.shape(self._image)[1]
# Recompute the accurate scale_factor using rounded scaled image size.
# https://github.com/ddkang/Detectron/blob/80f329530843e66d07ca39e19901d5f3e5daf009/lib/utils/blob.py#L70 # pylint: disable=line-too-long
min_image_size = tf.to_float(tf.minimum(height, width))
max_image_size = tf.to_float(tf.maximum(height, width))
short_side_scale = tf.to_float(self._short_side_image_size) / min_image_size
long_side_scale = (
tf.to_float(self._long_side_max_image_size) / max_image_size)
image_scale = tf.minimum(short_side_scale, long_side_scale)
scaled_height = tf.to_int32(tf.to_float(height) * image_scale)
scaled_width = tf.to_int32(tf.to_float(width) * image_scale)
self._image_scale = image_scale
self._scaled_height = scaled_height
self._scaled_width = scaled_width
return image_scale
def resize_and_crop_image(self, method=tf.image.ResizeMethod.BILINEAR):
"""Resize input image and crop it to the self._output dimension."""
scaled_image = tf.image.resize_images(
self._image, [self._scaled_height, self._scaled_width], method=method)
is_height_short_side = tf.less(self._scaled_height, self._scaled_width)
output_image = tf.cond(
is_height_short_side,
lambda: tf.image.pad_to_bounding_box(scaled_image, 0, 0, self._output_size[0], self._output_size[1]), # pylint: disable=line-too-long
lambda: tf.image.pad_to_bounding_box(scaled_image, 0, 0, self._output_size[1], self._output_size[0]) # pylint: disable=line-too-long
)
return output_image
def get_image_info(self):
"""Returns image information for scaled and original height and width."""
return tf.stack([
tf.to_float(self._scaled_height),
tf.to_float(self._scaled_width),
1.0 / self._image_scale,
tf.to_float(self._ori_height),
tf.to_float(self._ori_width)])
class InstanceSegmentationInputProcessor(InputProcessor):
"""Input processor for object detection."""
def __init__(self, image, output_size, short_side_image_size,
long_side_max_image_size, boxes=None, classes=None, masks=None):
InputProcessor.__init__(self, image, output_size, short_side_image_size,
long_side_max_image_size)
self._boxes = boxes
self._classes = classes
self._masks = masks
def random_horizontal_flip(self):
"""Randomly flip input image and bounding boxes."""
self._image, self._boxes, self._masks = preprocessor.random_horizontal_flip(
self._image, boxes=self._boxes, masks=self._masks)
def clip_boxes(self, boxes):
"""Clip boxes to fit in an image."""
boxes = tf.where(tf.less(boxes, 0), tf.zeros_like(boxes), boxes)
is_height_short_side = tf.less(self._scaled_height, self._scaled_width)
bound = tf.where(
is_height_short_side,
tf.convert_to_tensor(
[self._output_size[0] - 1, self._output_size[1] - 1] * 2,
dtype=tf.float32),
tf.convert_to_tensor(
[self._output_size[1] - 1, self._output_size[0] - 1] * 2,
dtype=tf.float32))
boxes = tf.where(
tf.greater(boxes, bound), bound * tf.ones_like(boxes), boxes)
return boxes
def resize_and_crop_boxes(self):
"""Resize boxes and crop it to the self._output dimension."""
boxlist = preprocessor.box_list.BoxList(self._boxes)
boxes = preprocessor.box_list_scale(
boxlist, self._scaled_height, self._scaled_width).get()
# Clip the boxes.
boxes = self.clip_boxes(boxes)
# Filter out ground truth boxes that are all zeros and corresponding classes
# and masks.
indices = tf.where(tf.not_equal(tf.reduce_sum(boxes, axis=1), 0))
boxes = tf.gather_nd(boxes, indices)
classes = tf.gather_nd(self._classes, indices)
self._masks = tf.gather_nd(self._masks, indices)
return boxes, classes
def crop_gt_masks(self, gt_mask_size):
"""Crops the ground truth binary masks and resize to fixed-size masks."""
num_boxes = tf.shape(self._boxes)[0]
num_masks = tf.shape(self._masks)[0]
assert_length = tf.Assert(
tf.equal(num_boxes, num_masks), [num_masks])
def padded_bounding_box_fn():
return tf.reshape(self._masks, [-1, self._ori_height, self._ori_width, 1])
def zeroed_box_fn():
return tf.zeros([0, self._ori_height, self._ori_width, 1])
num_masks = tf.shape(self._masks)[0]
# Check if there is any instance in this image or not.
scaled_masks = tf.cond(num_masks > 0, padded_bounding_box_fn, zeroed_box_fn)
with tf.control_dependencies([assert_length]):
cropped_gt_masks = tf.image.crop_and_resize(
image=scaled_masks, boxes=self._boxes,
box_ind=tf.range(num_masks, dtype=tf.int32),
crop_size=[gt_mask_size, gt_mask_size],
method='bilinear')[:, :, :, 0]
cropped_gt_masks = tf.pad(
cropped_gt_masks, paddings=tf.constant([[0, 0,], [2, 2,], [2, 2]]),
mode='CONSTANT', constant_values=0.)
return cropped_gt_masks
def pad_to_fixed_size(data, pad_value, output_shape):
"""Pad data to a fixed length at the first dimension.
Args:
data: Tensor to be padded to output_shape.
pad_value: A constant value assigned to the paddings.
output_shape: The output shape of a 2D tensor.
Returns:
The Padded tensor with output_shape [max_num_instances, dimension].
"""
max_num_instances = output_shape[0]
dimension = output_shape[1]
data = tf.reshape(data, [-1, dimension])
num_instances = tf.shape(data)[0]
assert_length = tf.Assert(
tf.less_equal(num_instances, max_num_instances), [num_instances])
with tf.control_dependencies([assert_length]):
pad_length = max_num_instances - num_instances
paddings = pad_value * tf.ones([pad_length, dimension])
padded_data = tf.concat([data, paddings], axis=0)
padded_data = tf.reshape(padded_data, output_shape)
return padded_data
class InputReader(object):
"""Input reader for dataset."""
def __init__(self, file_pattern, mode=tf.estimator.ModeKeys.TRAIN,
use_fake_data=False, distributed_eval=False):
if mode not in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.PREDICT]:
raise ValueError('InputReader supports only TRAIN or PREDICT modes.')
self._file_pattern = file_pattern
self._max_num_instances = MAX_NUM_INSTANCES
self._mode = mode
self._use_fake_data = use_fake_data
self._distributed_eval = distributed_eval
def __call__(self, params, num_examples=0):
image_size = params['image_size']
input_anchors = anchors.Anchors(
params['min_level'], params['max_level'], params['num_scales'],
params['aspect_ratios'], params['anchor_scale'], image_size)
anchor_labeler = anchors.AnchorLabeler(
input_anchors, params['num_classes'], params['rpn_positive_overlap'],
params['rpn_negative_overlap'], params['rpn_batch_size_per_im'],
params['rpn_fg_fraction'])
height_long_side_image_size = image_size[::-1]
height_long_side_input_anchors = anchors.Anchors(
params['min_level'], params['max_level'], params['num_scales'],
params['aspect_ratios'], params['anchor_scale'],
height_long_side_image_size)
height_long_side_anchor_labeler = anchors.AnchorLabeler(
height_long_side_input_anchors, params['num_classes'],
params['rpn_positive_overlap'], params['rpn_negative_overlap'],
params['rpn_batch_size_per_im'], params['rpn_fg_fraction'])
example_decoder = tf_example_decoder.TfExampleDecoder(
use_instance_mask=True)
def _dataset_parser(value):
"""Parse data to a fixed dimension input image and learning targets.
Args:
value: A dictionary contains an image and groundtruth annotations.
Returns:
features: A dictionary that contains the image and auxiliary
information. The following describes {key: value} pairs in the
dictionary.
image: An image tensor that is preprocessed to have normalized value
and fixed dimension [image_size, image_size, 3]
image_info: Image information that includes the original height and
width, the scale of the processed image to the original image, and
the scaled height and width.
source_ids: Source image id. Default value -1 if the source id is
empty in the groundtruth annotation.
labels: (only for training) A dictionary that contains groundtruth
labels. The following describes {key: value} pairs in the dictionary.
score_targets_dict: An ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of objectiveness score at l-th level.
box_targets_dict: An ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
gt_boxes: Groundtruth bounding box annotations. The box is represented
in [y1, x1, y2, x2] format. The tennsor is padded with -1 to the
fixed dimension [self._max_num_instances, 4].
gt_classes: Groundtruth classes annotations. The tennsor is padded
with -1 to the fixed dimension [self._max_num_instances].
cropped_gt_masks: Groundtruth masks cropped by the bounding box and
resized to a fixed size determined by params['gt_mask_size']
"""
with tf.name_scope('parser'):
data = example_decoder.decode(value)
image = data['image']
source_id = data['source_id']
source_id = tf.where(tf.equal(source_id, tf.constant('')), '-1',
source_id)
source_id = tf.string_to_number(source_id)
if self._mode == tf.estimator.ModeKeys.PREDICT:
input_processor = InstanceSegmentationInputProcessor(
image, image_size, params['short_side_image_size'],
params['long_side_max_image_size'])
input_processor.normalize_image()
input_processor.set_scale_factors_to_mlperf_reference_size()
image = input_processor.resize_and_crop_image()
if params['use_bfloat16']:
image = tf.cast(image, dtype=tf.bfloat16)
image_info = input_processor.get_image_info()
return {'images': image, 'image_info': image_info,
'source_ids': source_id}
# The following part is for training.
instance_masks = data['groundtruth_instance_masks']
boxes = data['groundtruth_boxes']
classes = data['groundtruth_classes']
classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1])
if not params['use_category']:
classes = tf.cast(tf.greater(classes, 0), dtype=tf.float32)
if (params['skip_crowd_during_training'] and
self._mode == tf.estimator.ModeKeys.TRAIN):
indices = tf.where(tf.logical_not(data['groundtruth_is_crowd']))
classes = tf.gather_nd(classes, indices)
boxes = tf.gather_nd(boxes, indices)
instance_masks = tf.gather_nd(instance_masks, indices)
input_processor = InstanceSegmentationInputProcessor(
image, image_size, params['short_side_image_size'],
params['long_side_max_image_size'], boxes, classes,
instance_masks)
input_processor.normalize_image()
if params['input_rand_hflip']:
input_processor.random_horizontal_flip()
input_processor.set_scale_factors_to_mlperf_reference_size()
image = input_processor.resize_and_crop_image()
boxes, classes = input_processor.resize_and_crop_boxes()
cropped_gt_masks = input_processor.crop_gt_masks(
params['gt_mask_size'])
image_info = input_processor.get_image_info()
# Assign anchors.
is_height_short_side = tf.less(image_info[3], image_info[4])
score_targets, box_targets = tf.cond(
is_height_short_side,
lambda: anchor_labeler.label_anchors(boxes, classes),
lambda: height_long_side_anchor_labeler.label_anchors(boxes, classes)) # pylint: disable=line-too-long
# Pad groundtruth data.
boxes *= image_info[2]
boxes = pad_to_fixed_size(boxes, -1, [self._max_num_instances, 4])
classes = pad_to_fixed_size(classes, -1, [self._max_num_instances, 1])
# Pads cropped_gt_masks.
cropped_gt_masks = tf.reshape(
cropped_gt_masks, [-1, (params['gt_mask_size'] + 4) ** 2])
cropped_gt_masks = pad_to_fixed_size(
cropped_gt_masks, -1,
[self._max_num_instances, (params['gt_mask_size'] + 4) ** 2])
cropped_gt_masks = tf.reshape(
cropped_gt_masks,
[self._max_num_instances, params['gt_mask_size'] + 4,
params['gt_mask_size'] + 4])
if params['use_bfloat16']:
image = tf.cast(image, dtype=tf.bfloat16)
features = {}
features['images'] = image
features['image_info'] = image_info
features['source_ids'] = source_id
labels = {}
for level in range(params['min_level'], params['max_level'] + 1):
labels['score_targets_%d' % level] = score_targets[level]
labels['box_targets_%d' % level] = box_targets[level]
labels['gt_boxes'] = boxes
labels['gt_classes'] = classes
labels['cropped_gt_masks'] = cropped_gt_masks
return features, labels
batch_size = params['batch_size'] if 'batch_size' in params else 1
dataset = tf.data.Dataset.list_files(self._file_pattern, shuffle=False)
if self._mode == tf.estimator.ModeKeys.TRAIN:
# shard and shuffle the image files so each shard has distinctive and
# random set of images.
# To improve model convergence under large number of hosts, multiple hosts
# may share a same dataset shard. This allows a host to get more training
# images.
if 'dataset_num_shards' in params:
train_actual_num_shards = int(params['dataset_num_shards'] //
params['hosts_per_dataset_shard'])
dataset = dataset.shard(
train_actual_num_shards,
params['dataset_index'] // params['hosts_per_dataset_shard'])
if not self._use_fake_data:
dataset = dataset.shuffle(tf.to_int64(256 // train_actual_num_shards))
if self._distributed_eval:
dataset = dataset.shard(params['dataset_num_shards'],
params['dataset_index'])
# Prefetch data from files.
def _prefetch_dataset(filename):
dataset = tf.data.TFRecordDataset(filename).prefetch(1)
return dataset
shuffle_data = (
self._mode == tf.estimator.ModeKeys.TRAIN) and not self._use_fake_data
concurrent_files = 16
dataset = dataset.interleave(
_prefetch_dataset,
cycle_length=concurrent_files,
block_length=1,
num_parallel_calls=concurrent_files)
if shuffle_data:
# Cache the raw images and shuffle them with a resonably large buffer.
dataset = dataset.cache().shuffle(
params['shuffle_buffer_size'],
reshuffle_each_iteration=True).repeat()
# Parse the fetched records to input tensors for model function.
dataset = dataset.map(_dataset_parser, num_parallel_calls=64)
def horizontal_image(*args):
image_info = args[0]['image_info']
return tf.less(image_info[3], image_info[4])
def vertical_image(*args):
return tf.logical_not(horizontal_image(*args))
# Pad dataset to the desired size and mark if the dataset is padding.
# During PREDICT, if batch_size_per_shard * num_shards > 5000, the
# original dataset size won't be evenly divisible by the number of shards.
# Note that 5000 is the number of eval samples in COCO dataset. In this
# case, the eval dataset will take (batch_per_shard * num_shards - 5000)
# samples from the original dataset and mark those extra samples as
# `is_padding` and the original data as `is_not_padding`. This ensures
# correctness of evaluation on only 5000 samples.
# Appends the dataset padding to the original dataset (only in PREDICT).
if (self._mode == tf.estimator.ModeKeys.PREDICT and
num_examples > params['eval_samples']):
def _mark_is_padding(features):
features[mask_rcnn_params.IS_PADDED] = tf.constant(
True, dtype=tf.bool, shape=[1])
return features
def _mark_is_not_padding(features):
features[mask_rcnn_params.IS_PADDED] = tf.constant(
False, dtype=tf.bool, shape=[1])
return features
dataset_padding = dataset
# padd equal number of horizontal and vertical images and interleave them.
pad_size = int(math.ceil(num_examples - params['eval_samples']))
dataset_padding_hor = dataset_padding.filter(horizontal_image).map(
_mark_is_padding).take(pad_size)
dataset_padding_ver = dataset_padding.filter(vertical_image).map(
_mark_is_padding).take(pad_size)
interleaved_dataset_padding = tf.data.experimental.choose_from_datasets(
[dataset_padding_hor, dataset_padding_ver],
tf.data.Dataset.range(2).repeat(pad_size))
if self._distributed_eval:
dataset = dataset.map(_mark_is_not_padding).take(
int(
math.ceil(params['eval_samples'] /
params['dataset_num_shards'])))
else:
dataset = dataset.map(_mark_is_not_padding).take(params['eval_samples'])
dataset = dataset.concatenate(interleaved_dataset_padding)
def key_func(*args):
return tf.cast(horizontal_image(*args), dtype=tf.int64)
def reduce_func(unused_key, dataset):
return dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.apply(
tf.data.experimental.group_by_window(
key_func=key_func,
reduce_func=reduce_func,
window_size=(params['batch_size'] * params['replicas_per_host'])))
dataset = dataset.map(
functools.partial(self._transform_images, params),
num_parallel_calls=16)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
if (self._mode == tf.estimator.ModeKeys.TRAIN and
num_examples > 0):
dataset = dataset.take(num_examples)
# Make eval dataset repeat to get rid of eval dataset init per epoch.
if self._distributed_eval:
dataset = dataset.take(
int(num_examples / params['dataset_num_shards'] /
params['batch_size'])).cache().repeat()
if self._use_fake_data:
# Turn this dataset into a semi-fake dataset which always loop at the
# first batch. This reduces variance in performance and is useful in
# testing.
dataset = dataset.take(1).cache().repeat()
deterministic = (not shuffle_data)
options = tf.data.Options()
options.experimental_threading.max_intra_op_parallelism = 1
options.experimental_deterministic = deterministic
dataset = dataset.with_options(options)
return dataset
def _transform_images(self, params, features, labels=None):
"""Transforms images."""
images = features['images']
batch_size, _, _, c = images.get_shape().as_list()
if params['conv0_space_to_depth_block_size'] != 0:
# Transforms (space-to-depth) images for TPU performance.
def _fused_transform(images, image_size):
return spatial_transform.fused_transpose_and_space_to_depth(
images, image_size, params['conv0_space_to_depth_block_size'],
params['transpose_input'])
images = tf.cond(
tf.less(features['image_info'][0, 3], features['image_info'][0, 4]),
lambda: _fused_transform(images, params['image_size']),
lambda: _fused_transform(images, params['image_size'][::-1]))
else:
# Transposes images for TPU performance.
image_area = params['image_size'][0] * params['image_size'][1]
if params['transpose_input']:
images = tf.transpose(images, [1, 2, 0, 3])
# Flattens spatial dimensions so that the image tensor has a static
# shape.
images = tf.reshape(images, [image_area, batch_size, c])
else:
images = tf.reshape(images, [batch_size, image_area, c])
if params['use_bfloat16']:
images = tf.cast(images, dtype=tf.bfloat16)
features['images'] = images
if labels is not None:
return features, labels
else:
return features, tf.zeros([batch_size])
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the old L{twisted.web.client} APIs, C{getPage} and friends.
"""
from __future__ import division, absolute_import
import os
from errno import ENOSPC
try:
from urlparse import urlparse, urljoin
except ImportError:
from urllib.parse import urlparse, urljoin
from twisted.python.compat import _PY3, networkString, nativeString, intToBytes
from twisted.trial import unittest
from twisted.web import server, client, error, resource
from twisted.web.static import Data
from twisted.web.util import Redirect
from twisted.internet import reactor, defer, interfaces
from twisted.python.filepath import FilePath
from twisted.python.log import msg
from twisted.protocols.policies import WrappingFactory
from twisted.test.proto_helpers import StringTransport
try:
from twisted.internet import ssl
except:
ssl = None
from twisted import test
serverPEM = FilePath(test.__file__.encode("utf-8")).sibling(b'server.pem')
serverPEMPath = nativeString(serverPEM.path)
_PY3DownloadSkip = "downloadPage will be ported to Python 3 in ticket #6197."
class ExtendedRedirect(resource.Resource):
"""
Redirection resource.
The HTTP status code is set according to the C{code} query parameter.
@type lastMethod: C{str}
@ivar lastMethod: Last handled HTTP request method
"""
isLeaf = True
lastMethod = None
def __init__(self, url):
resource.Resource.__init__(self)
self.url = url
def render(self, request):
if self.lastMethod:
self.lastMethod = request.method
return b"OK Thnx!"
else:
self.lastMethod = request.method
code = int(request.args[b'code'][0])
return self.redirectTo(self.url, request, code)
def getChild(self, name, request):
return self
def redirectTo(self, url, request, code):
request.setResponseCode(code)
request.setHeader(b"location", url)
return b"OK Bye!"
class ForeverTakingResource(resource.Resource):
"""
L{ForeverTakingResource} is a resource which never finishes responding
to requests.
"""
def __init__(self, write=False):
resource.Resource.__init__(self)
self._write = write
def render(self, request):
if self._write:
request.write(b'some bytes')
return server.NOT_DONE_YET
class CookieMirrorResource(resource.Resource):
def render(self, request):
l = []
for k,v in sorted(list(request.received_cookies.items())):
l.append((nativeString(k), nativeString(v)))
l.sort()
return networkString(repr(l))
class RawCookieMirrorResource(resource.Resource):
def render(self, request):
header = request.getHeader(b'cookie')
if header is None:
return b'None'
return networkString(repr(nativeString(header)))
class ErrorResource(resource.Resource):
def render(self, request):
request.setResponseCode(401)
if request.args.get(b"showlength"):
request.setHeader(b"content-length", b"0")
return b""
class NoLengthResource(resource.Resource):
def render(self, request):
return b"nolength"
class HostHeaderResource(resource.Resource):
"""
A testing resource which renders itself as the value of the host header
from the request.
"""
def render(self, request):
return request.requestHeaders.getRawHeaders(b"host")[0]
class PayloadResource(resource.Resource):
"""
A testing resource which renders itself as the contents of the request body
as long as the request body is 100 bytes long, otherwise which renders
itself as C{"ERROR"}.
"""
def render(self, request):
data = request.content.read()
contentLength = request.requestHeaders.getRawHeaders(b"content-length")[0]
if len(data) != 100 or int(contentLength) != 100:
return b"ERROR"
return data
class DelayResource(resource.Resource):
def __init__(self, seconds):
self.seconds = seconds
def render(self, request):
def response():
request.write(b'some bytes')
request.finish()
reactor.callLater(self.seconds, response)
return server.NOT_DONE_YET
class BrokenDownloadResource(resource.Resource):
def render(self, request):
# only sends 3 bytes even though it claims to send 5
request.setHeader(b"content-length", b"5")
request.write(b'abc')
return b''
class CountingRedirect(Redirect):
"""
A L{Redirect} resource that keeps track of the number of times the
resource has been accessed.
"""
def __init__(self, *a, **kw):
Redirect.__init__(self, *a, **kw)
self.count = 0
def render(self, request):
self.count += 1
return Redirect.render(self, request)
class CountingResource(resource.Resource):
"""
A resource that keeps track of the number of times it has been accessed.
"""
def __init__(self):
resource.Resource.__init__(self)
self.count = 0
def render(self, request):
self.count += 1
return b"Success"
class URLJoinTests(unittest.TestCase):
"""
Tests for L{client._urljoin}.
"""
def test_noFragments(self):
"""
L{client._urljoin} does not include a fragment identifier in the
resulting URL if neither the base nor the new path include a fragment
identifier.
"""
self.assertEqual(
client._urljoin(b'http://foo.com/bar', b'/quux'),
b'http://foo.com/quux')
self.assertEqual(
client._urljoin(b'http://foo.com/bar#', b'/quux'),
b'http://foo.com/quux')
self.assertEqual(
client._urljoin(b'http://foo.com/bar', b'/quux#'),
b'http://foo.com/quux')
def test_preserveFragments(self):
"""
L{client._urljoin} preserves the fragment identifier from either the
new path or the base URL respectively, as specified in the HTTP 1.1 bis
draft.
@see: U{https://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-22#section-7.1.2}
"""
self.assertEqual(
client._urljoin(b'http://foo.com/bar#frag', b'/quux'),
b'http://foo.com/quux#frag')
self.assertEqual(
client._urljoin(b'http://foo.com/bar', b'/quux#frag2'),
b'http://foo.com/quux#frag2')
self.assertEqual(
client._urljoin(b'http://foo.com/bar#frag', b'/quux#frag2'),
b'http://foo.com/quux#frag2')
class HTTPPageGetterTests(unittest.TestCase):
"""
Tests for L{HTTPPagerGetter}, the HTTP client protocol implementation
used to implement L{getPage}.
"""
def test_earlyHeaders(self):
"""
When a connection is made, L{HTTPPagerGetter} sends the headers from
its factory's C{headers} dict. If I{Host} or I{Content-Length} is
present in this dict, the values are not sent, since they are sent with
special values before the C{headers} dict is processed. If
I{User-Agent} is present in the dict, it overrides the value of the
C{agent} attribute of the factory. If I{Cookie} is present in the
dict, its value is added to the values from the factory's C{cookies}
attribute.
"""
factory = client.HTTPClientFactory(
b'http://foo/bar',
agent=b"foobar",
cookies={b'baz': b'quux'},
postdata=b"some data",
headers={
b'Host': b'example.net',
b'User-Agent': b'fooble',
b'Cookie': b'blah blah',
b'Content-Length': b'12981',
b'Useful': b'value'})
transport = StringTransport()
protocol = client.HTTPPageGetter()
protocol.factory = factory
protocol.makeConnection(transport)
result = transport.value()
for expectedHeader in [
b"Host: example.net\r\n",
b"User-Agent: foobar\r\n",
b"Content-Length: 9\r\n",
b"Useful: value\r\n",
b"connection: close\r\n",
b"Cookie: blah blah; baz=quux\r\n"]:
self.assertIn(expectedHeader, result)
class WebClientTests(unittest.TestCase):
def _listen(self, site):
return reactor.listenTCP(0, site, interface="127.0.0.1")
def setUp(self):
self.agent = None # for twisted.web.client.Agent test
self.cleanupServerConnections = 0
r = resource.Resource()
r.putChild(b"file", Data(b"0123456789", "text/html"))
r.putChild(b"redirect", Redirect(b"/file"))
self.infiniteRedirectResource = CountingRedirect(b"/infiniteRedirect")
r.putChild(b"infiniteRedirect", self.infiniteRedirectResource)
r.putChild(b"wait", ForeverTakingResource())
r.putChild(b"write-then-wait", ForeverTakingResource(write=True))
r.putChild(b"error", ErrorResource())
r.putChild(b"nolength", NoLengthResource())
r.putChild(b"host", HostHeaderResource())
r.putChild(b"payload", PayloadResource())
r.putChild(b"broken", BrokenDownloadResource())
r.putChild(b"cookiemirror", CookieMirrorResource())
r.putChild(b'delay1', DelayResource(1))
r.putChild(b'delay2', DelayResource(2))
self.afterFoundGetCounter = CountingResource()
r.putChild(b"afterFoundGetCounter", self.afterFoundGetCounter)
r.putChild(b"afterFoundGetRedirect", Redirect(b"/afterFoundGetCounter"))
miscasedHead = Data(b"miscased-head GET response content", "major/minor")
miscasedHead.render_Head = lambda request: b"miscased-head content"
r.putChild(b"miscased-head", miscasedHead)
self.extendedRedirect = ExtendedRedirect(b'/extendedRedirect')
r.putChild(b"extendedRedirect", self.extendedRedirect)
self.site = server.Site(r, timeout=None)
self.wrapper = WrappingFactory(self.site)
self.port = self._listen(self.wrapper)
self.portno = self.port.getHost().port
def tearDown(self):
if self.agent:
# clean up connections for twisted.web.client.Agent test.
self.agent.closeCachedConnections()
self.agent = None
# If the test indicated it might leave some server-side connections
# around, clean them up.
connections = list(self.wrapper.protocols.keys())
# If there are fewer server-side connections than requested,
# that's okay. Some might have noticed that the client closed
# the connection and cleaned up after themselves.
for n in range(min(len(connections), self.cleanupServerConnections)):
proto = connections.pop()
msg("Closing %r" % (proto,))
proto.transport.loseConnection()
if connections:
msg("Some left-over connections; this test is probably buggy.")
return self.port.stopListening()
def getURL(self, path):
host = "http://127.0.0.1:%d/" % self.portno
return networkString(urljoin(host, nativeString(path)))
def testPayload(self):
s = b"0123456789" * 10
return client.getPage(self.getURL("payload"), postdata=s
).addCallback(self.assertEqual, s
)
def test_getPageBrokenDownload(self):
"""
If the connection is closed before the number of bytes indicated by
I{Content-Length} have been received, the L{Deferred} returned by
L{getPage} fails with L{PartialDownloadError}.
"""
d = client.getPage(self.getURL("broken"))
d = self.assertFailure(d, client.PartialDownloadError)
d.addCallback(lambda exc: self.assertEqual(exc.response, b"abc"))
return d
def test_downloadPageBrokenDownload(self):
"""
If the connection is closed before the number of bytes indicated by
I{Content-Length} have been received, the L{Deferred} returned by
L{downloadPage} fails with L{PartialDownloadError}.
"""
# test what happens when download gets disconnected in the middle
path = FilePath(self.mktemp())
d = client.downloadPage(self.getURL("broken"), path.path)
d = self.assertFailure(d, client.PartialDownloadError)
def checkResponse(response):
"""
The HTTP status code from the server is propagated through the
C{PartialDownloadError}.
"""
self.assertEqual(response.status, b"200")
self.assertEqual(response.message, b"OK")
return response
d.addCallback(checkResponse)
def cbFailed(ignored):
self.assertEqual(path.getContent(), b"abc")
d.addCallback(cbFailed)
return d
def test_downloadPageLogsFileCloseError(self):
"""
If there is an exception closing the file being written to after the
connection is prematurely closed, that exception is logged.
"""
class BrokenFile:
def write(self, bytes):
pass
def close(self):
raise IOError(ENOSPC, "No file left on device")
d = client.downloadPage(self.getURL("broken"), BrokenFile())
d = self.assertFailure(d, client.PartialDownloadError)
def cbFailed(ignored):
self.assertEqual(len(self.flushLoggedErrors(IOError)), 1)
d.addCallback(cbFailed)
return d
def testHostHeader(self):
# if we pass Host header explicitly, it should be used, otherwise
# it should extract from url
return defer.gatherResults([
client.getPage(self.getURL("host")).addCallback(
self.assertEqual, b"127.0.0.1:" + intToBytes(self.portno)),
client.getPage(self.getURL("host"),
headers={b"Host": b"www.example.com"}).addCallback(
self.assertEqual, b"www.example.com")])
def test_getPage(self):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the body of the response if the default method B{GET} is used.
"""
d = client.getPage(self.getURL("file"))
d.addCallback(self.assertEqual, b"0123456789")
return d
def test_getPageHEAD(self):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the empty string if the method is I{HEAD} and there is a successful
response code.
"""
d = client.getPage(self.getURL("file"), method=b"HEAD")
d.addCallback(self.assertEqual, b"")
return d
def test_getPageNotQuiteHEAD(self):
"""
If the request method is a different casing of I{HEAD} (ie, not all
capitalized) then it is not a I{HEAD} request and the response body
is returned.
"""
d = client.getPage(self.getURL("miscased-head"), method=b'Head')
d.addCallback(self.assertEqual, b"miscased-head content")
return d
def test_timeoutNotTriggering(self):
"""
When a non-zero timeout is passed to L{getPage} and the page is
retrieved before the timeout period elapses, the L{Deferred} is
called back with the contents of the page.
"""
d = client.getPage(self.getURL("host"), timeout=100)
d.addCallback(self.assertEqual,
networkString("127.0.0.1:%s" % (self.portno,)))
return d
def test_timeoutTriggering(self):
"""
When a non-zero timeout is passed to L{getPage} and that many
seconds elapse before the server responds to the request. the
L{Deferred} is errbacked with a L{error.TimeoutError}.
"""
# This will probably leave some connections around.
self.cleanupServerConnections = 1
return self.assertFailure(
client.getPage(self.getURL("wait"), timeout=0.000001),
defer.TimeoutError)
def testDownloadPage(self):
downloads = []
downloadData = [(b"file", self.mktemp(), b"0123456789"),
(b"nolength", self.mktemp(), b"nolength")]
for (url, name, data) in downloadData:
d = client.downloadPage(self.getURL(url), name)
d.addCallback(self._cbDownloadPageTest, data, name)
downloads.append(d)
return defer.gatherResults(downloads)
def _cbDownloadPageTest(self, ignored, data, name):
bytes = file(name, "rb").read()
self.assertEqual(bytes, data)
def testDownloadPageError1(self):
class errorfile:
def write(self, data):
raise IOError("badness happened during write")
def close(self):
pass
ef = errorfile()
return self.assertFailure(
client.downloadPage(self.getURL("file"), ef),
IOError)
def testDownloadPageError2(self):
class errorfile:
def write(self, data):
pass
def close(self):
raise IOError("badness happened during close")
ef = errorfile()
return self.assertFailure(
client.downloadPage(self.getURL("file"), ef),
IOError)
def testDownloadPageError3(self):
# make sure failures in open() are caught too. This is tricky.
# Might only work on posix.
tmpfile = open("unwritable", "wb")
tmpfile.close()
os.chmod("unwritable", 0) # make it unwritable (to us)
d = self.assertFailure(
client.downloadPage(self.getURL("file"), "unwritable"),
IOError)
d.addBoth(self._cleanupDownloadPageError3)
return d
def _cleanupDownloadPageError3(self, ignored):
os.chmod("unwritable", 0o700)
os.unlink("unwritable")
return ignored
def _downloadTest(self, method):
dl = []
for (url, code) in [("nosuchfile", b"404"), ("error", b"401"),
("error?showlength=1", b"401")]:
d = method(url)
d = self.assertFailure(d, error.Error)
d.addCallback(lambda exc, code=code: self.assertEqual(exc.args[0], code))
dl.append(d)
return defer.DeferredList(dl, fireOnOneErrback=True)
def testServerError(self):
return self._downloadTest(lambda url: client.getPage(self.getURL(url)))
def testDownloadServerError(self):
return self._downloadTest(lambda url: client.downloadPage(self.getURL(url), url.split('?')[0]))
def testFactoryInfo(self):
url = self.getURL('file')
uri = client.URI.fromBytes(url)
factory = client.HTTPClientFactory(url)
reactor.connectTCP(nativeString(uri.host), uri.port, factory)
return factory.deferred.addCallback(self._cbFactoryInfo, factory)
def _cbFactoryInfo(self, ignoredResult, factory):
self.assertEqual(factory.status, b'200')
self.assert_(factory.version.startswith(b'HTTP/'))
self.assertEqual(factory.message, b'OK')
self.assertEqual(factory.response_headers[b'content-length'][0], b'10')
def test_followRedirect(self):
"""
By default, L{client.getPage} follows redirects and returns the content
of the target resource.
"""
d = client.getPage(self.getURL("redirect"))
d.addCallback(self.assertEqual, b"0123456789")
return d
def test_noFollowRedirect(self):
"""
If C{followRedirect} is passed a false value, L{client.getPage} does not
follow redirects and returns a L{Deferred} which fails with
L{error.PageRedirect} when it encounters one.
"""
d = self.assertFailure(
client.getPage(self.getURL("redirect"), followRedirect=False),
error.PageRedirect)
d.addCallback(self._cbCheckLocation)
return d
def _cbCheckLocation(self, exc):
self.assertEqual(exc.location, b"/file")
def test_infiniteRedirection(self):
"""
When more than C{redirectLimit} HTTP redirects are encountered, the
page request fails with L{InfiniteRedirection}.
"""
def checkRedirectCount(*a):
self.assertEqual(f._redirectCount, 13)
self.assertEqual(self.infiniteRedirectResource.count, 13)
f = client._makeGetterFactory(
self.getURL('infiniteRedirect'),
client.HTTPClientFactory,
redirectLimit=13)
d = self.assertFailure(f.deferred, error.InfiniteRedirection)
d.addCallback(checkRedirectCount)
return d
def test_isolatedFollowRedirect(self):
"""
C{client.HTTPPagerGetter} instances each obey the C{followRedirect}
value passed to the L{client.getPage} call which created them.
"""
d1 = client.getPage(self.getURL('redirect'), followRedirect=True)
d2 = client.getPage(self.getURL('redirect'), followRedirect=False)
d = self.assertFailure(d2, error.PageRedirect
).addCallback(lambda dummy: d1)
return d
def test_afterFoundGet(self):
"""
Enabling unsafe redirection behaviour overwrites the method of
redirected C{POST} requests with C{GET}.
"""
url = self.getURL('extendedRedirect?code=302')
f = client.HTTPClientFactory(url, followRedirect=True, method=b"POST")
self.assertFalse(
f.afterFoundGet,
"By default, afterFoundGet must be disabled")
def gotPage(page):
self.assertEqual(
self.extendedRedirect.lastMethod,
b"GET",
"With afterFoundGet, the HTTP method must change to GET")
d = client.getPage(
url, followRedirect=True, afterFoundGet=True, method=b"POST")
d.addCallback(gotPage)
return d
def test_downloadAfterFoundGet(self):
"""
Passing C{True} for C{afterFoundGet} to L{client.downloadPage} invokes
the same kind of redirect handling as passing that argument to
L{client.getPage} invokes.
"""
url = self.getURL('extendedRedirect?code=302')
def gotPage(page):
self.assertEqual(
self.extendedRedirect.lastMethod,
b"GET",
"With afterFoundGet, the HTTP method must change to GET")
d = client.downloadPage(url, "downloadTemp",
followRedirect=True, afterFoundGet=True, method="POST")
d.addCallback(gotPage)
return d
def test_afterFoundGetMakesOneRequest(self):
"""
When C{afterFoundGet} is C{True}, L{client.getPage} only issues one
request to the server when following the redirect. This is a regression
test, see #4760.
"""
def checkRedirectCount(*a):
self.assertEqual(self.afterFoundGetCounter.count, 1)
url = self.getURL('afterFoundGetRedirect')
d = client.getPage(
url, followRedirect=True, afterFoundGet=True, method=b"POST")
d.addCallback(checkRedirectCount)
return d
def testPartial(self):
name = self.mktemp()
f = open(name, "wb")
f.write(b"abcd")
f.close()
partialDownload = [(True, b"abcd456789"),
(True, b"abcd456789"),
(False, b"0123456789")]
d = defer.succeed(None)
for (partial, expectedData) in partialDownload:
d.addCallback(self._cbRunPartial, name, partial)
d.addCallback(self._cbPartialTest, expectedData, name)
return d
testPartial.skip = "Cannot test until webserver can serve partial data properly"
def _cbRunPartial(self, ignored, name, partial):
return client.downloadPage(self.getURL("file"), name, supportPartial=partial)
def _cbPartialTest(self, ignored, expectedData, filename):
bytes = file(filename, "rb").read()
self.assertEqual(bytes, expectedData)
def test_downloadTimeout(self):
"""
If the timeout indicated by the C{timeout} parameter to
L{client.HTTPDownloader.__init__} elapses without the complete response
being received, the L{defer.Deferred} returned by
L{client.downloadPage} fires with a L{Failure} wrapping a
L{defer.TimeoutError}.
"""
self.cleanupServerConnections = 2
# Verify the behavior if no bytes are ever written.
first = client.downloadPage(
self.getURL("wait"),
self.mktemp(), timeout=0.01)
# Verify the behavior if some bytes are written but then the request
# never completes.
second = client.downloadPage(
self.getURL("write-then-wait"),
self.mktemp(), timeout=0.01)
return defer.gatherResults([
self.assertFailure(first, defer.TimeoutError),
self.assertFailure(second, defer.TimeoutError)])
def test_downloadHeaders(self):
"""
After L{client.HTTPDownloader.deferred} fires, the
L{client.HTTPDownloader} instance's C{status} and C{response_headers}
attributes are populated with the values from the response.
"""
def checkHeaders(factory):
self.assertEqual(factory.status, b'200')
self.assertEqual(factory.response_headers[b'content-type'][0], b'text/html')
self.assertEqual(factory.response_headers[b'content-length'][0], b'10')
os.unlink(factory.fileName)
factory = client._makeGetterFactory(
self.getURL('file'),
client.HTTPDownloader,
fileOrName=self.mktemp())
return factory.deferred.addCallback(lambda _: checkHeaders(factory))
def test_downloadCookies(self):
"""
The C{cookies} dict passed to the L{client.HTTPDownloader}
initializer is used to populate the I{Cookie} header included in the
request sent to the server.
"""
output = self.mktemp()
factory = client._makeGetterFactory(
self.getURL('cookiemirror'),
client.HTTPDownloader,
fileOrName=output,
cookies={b'foo': b'bar'})
def cbFinished(ignored):
self.assertEqual(
FilePath(output).getContent(),
"[('foo', 'bar')]")
factory.deferred.addCallback(cbFinished)
return factory.deferred
def test_downloadRedirectLimit(self):
"""
When more than C{redirectLimit} HTTP redirects are encountered, the
page request fails with L{InfiniteRedirection}.
"""
def checkRedirectCount(*a):
self.assertEqual(f._redirectCount, 7)
self.assertEqual(self.infiniteRedirectResource.count, 7)
f = client._makeGetterFactory(
self.getURL('infiniteRedirect'),
client.HTTPDownloader,
fileOrName=self.mktemp(),
redirectLimit=7)
d = self.assertFailure(f.deferred, error.InfiniteRedirection)
d.addCallback(checkRedirectCount)
return d
if _PY3:
for method in (
test_downloadPageBrokenDownload,
test_downloadPageLogsFileCloseError,
testDownloadPage,
testDownloadPageError1,
testDownloadPageError2,
testDownloadPageError3,
testDownloadServerError,
test_downloadAfterFoundGet,
testPartial,
test_downloadTimeout,
test_downloadHeaders,
test_downloadCookies,
test_downloadRedirectLimit):
method.skip = _PY3DownloadSkip
del method
def test_setURL(self):
"""
L{client.HTTPClientFactory.setURL} alters the scheme, host, port and
path for absolute URLs.
"""
url = b'http://example.com'
f = client.HTTPClientFactory(url)
self.assertEqual(
(url, b'http', b'example.com', 80, b'/'),
(f.url, f.scheme, f.host, f.port, f.path))
def test_setURLRemovesFragment(self):
"""
L{client.HTTPClientFactory.setURL} removes the fragment identifier from
the path component.
"""
f = client.HTTPClientFactory(b'http://example.com')
url = b'https://foo.com:8443/bar;123?a#frag'
f.setURL(url)
self.assertEqual(
(url, b'https', b'foo.com', 8443, b'/bar;123?a'),
(f.url, f.scheme, f.host, f.port, f.path))
def test_setURLRelativePath(self):
"""
L{client.HTTPClientFactory.setURL} alters the path in a relative URL.
"""
f = client.HTTPClientFactory(b'http://example.com')
url = b'/hello'
f.setURL(url)
self.assertEqual(
(url, b'http', b'example.com', 80, b'/hello'),
(f.url, f.scheme, f.host, f.port, f.path))
class WebClientSSLTests(WebClientTests):
def _listen(self, site):
return reactor.listenSSL(
0, site,
contextFactory=ssl.DefaultOpenSSLContextFactory(
serverPEMPath, serverPEMPath),
interface="127.0.0.1")
def getURL(self, path):
return networkString("https://127.0.0.1:%d/%s" % (self.portno, path))
def testFactoryInfo(self):
url = self.getURL('file')
uri = client.URI.fromBytes(url)
factory = client.HTTPClientFactory(url)
reactor.connectSSL(nativeString(uri.host), uri.port, factory,
ssl.ClientContextFactory())
# The base class defines _cbFactoryInfo correctly for this
return factory.deferred.addCallback(self._cbFactoryInfo, factory)
class WebClientRedirectBetweenSSLandPlainTextTests(unittest.TestCase):
def getHTTPS(self, path):
return networkString("https://127.0.0.1:%d/%s" % (self.tlsPortno, path))
def getHTTP(self, path):
return networkString("http://127.0.0.1:%d/%s" % (self.plainPortno, path))
def setUp(self):
plainRoot = Data(b'not me', 'text/plain')
tlsRoot = Data(b'me neither', 'text/plain')
plainSite = server.Site(plainRoot, timeout=None)
tlsSite = server.Site(tlsRoot, timeout=None)
self.tlsPort = reactor.listenSSL(
0, tlsSite,
contextFactory=ssl.DefaultOpenSSLContextFactory(
serverPEMPath, serverPEMPath),
interface="127.0.0.1")
self.plainPort = reactor.listenTCP(0, plainSite, interface="127.0.0.1")
self.plainPortno = self.plainPort.getHost().port
self.tlsPortno = self.tlsPort.getHost().port
plainRoot.putChild(b'one', Redirect(self.getHTTPS('two')))
tlsRoot.putChild(b'two', Redirect(self.getHTTP('three')))
plainRoot.putChild(b'three', Redirect(self.getHTTPS('four')))
tlsRoot.putChild(b'four', Data(b'FOUND IT!', 'text/plain'))
def tearDown(self):
ds = list(
map(defer.maybeDeferred,
[self.plainPort.stopListening, self.tlsPort.stopListening]))
return defer.gatherResults(ds)
def testHoppingAround(self):
return client.getPage(self.getHTTP("one")
).addCallback(self.assertEqual, b"FOUND IT!"
)
class CookieTests(unittest.TestCase):
def _listen(self, site):
return reactor.listenTCP(0, site, interface="127.0.0.1")
def setUp(self):
root = Data(b'El toro!', 'text/plain')
root.putChild(b"cookiemirror", CookieMirrorResource())
root.putChild(b"rawcookiemirror", RawCookieMirrorResource())
site = server.Site(root, timeout=None)
self.port = self._listen(site)
self.portno = self.port.getHost().port
def tearDown(self):
return self.port.stopListening()
def getHTTP(self, path):
return networkString("http://127.0.0.1:%d/%s" % (self.portno, path))
def testNoCookies(self):
return client.getPage(self.getHTTP("cookiemirror")
).addCallback(self.assertEqual, b"[]"
)
def testSomeCookies(self):
cookies = {b'foo': b'bar', b'baz': b'quux'}
return client.getPage(self.getHTTP("cookiemirror"), cookies=cookies
).addCallback(self.assertEqual, b"[('baz', 'quux'), ('foo', 'bar')]"
)
def testRawNoCookies(self):
return client.getPage(self.getHTTP("rawcookiemirror")
).addCallback(self.assertEqual, b"None"
)
def testRawSomeCookies(self):
cookies = {b'foo': b'bar', b'baz': b'quux'}
return client.getPage(self.getHTTP("rawcookiemirror"), cookies=cookies
).addCallback(self.assertIn,
(b"'foo=bar; baz=quux'", b"'baz=quux; foo=bar'")
)
def testCookieHeaderParsing(self):
factory = client.HTTPClientFactory(b'http://foo.example.com/')
proto = factory.buildProtocol('127.42.42.42')
transport = StringTransport()
proto.makeConnection(transport)
for line in [
b'200 Ok',
b'Squash: yes',
b'Hands: stolen',
b'Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/; expires=Wednesday, 09-Nov-99 23:12:40 GMT',
b'Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/',
b'Set-Cookie: SHIPPING=FEDEX; path=/foo',
b'',
b'body',
b'more body',
]:
proto.dataReceived(line + b'\r\n')
self.assertEqual(transport.value(),
b'GET / HTTP/1.0\r\n'
b'Host: foo.example.com\r\n'
b'User-Agent: Twisted PageGetter\r\n'
b'\r\n')
self.assertEqual(factory.cookies,
{
b'CUSTOMER': b'WILE_E_COYOTE',
b'PART_NUMBER': b'ROCKET_LAUNCHER_0001',
b'SHIPPING': b'FEDEX',
})
class HostHeaderTests(unittest.TestCase):
"""
Test that L{HTTPClientFactory} includes the port in the host header
if needed.
"""
def _getHost(self, bytes):
"""
Retrieve the value of the I{Host} header from the serialized
request given by C{bytes}.
"""
for line in bytes.split(b'\r\n'):
try:
name, value = line.split(b':', 1)
if name.strip().lower() == b'host':
return value.strip()
except ValueError:
pass
def test_HTTPDefaultPort(self):
"""
No port should be included in the host header when connecting to the
default HTTP port.
"""
factory = client.HTTPClientFactory(b'http://foo.example.com/')
proto = factory.buildProtocol(b'127.42.42.42')
proto.makeConnection(StringTransport())
self.assertEqual(self._getHost(proto.transport.value()),
b'foo.example.com')
def test_HTTPPort80(self):
"""
No port should be included in the host header when connecting to the
default HTTP port even if it is in the URL.
"""
factory = client.HTTPClientFactory(b'http://foo.example.com:80/')
proto = factory.buildProtocol('127.42.42.42')
proto.makeConnection(StringTransport())
self.assertEqual(self._getHost(proto.transport.value()),
b'foo.example.com')
def test_HTTPNotPort80(self):
"""
The port should be included in the host header when connecting to the
a non default HTTP port.
"""
factory = client.HTTPClientFactory(b'http://foo.example.com:8080/')
proto = factory.buildProtocol('127.42.42.42')
proto.makeConnection(StringTransport())
self.assertEqual(self._getHost(proto.transport.value()),
b'foo.example.com:8080')
def test_HTTPSDefaultPort(self):
"""
No port should be included in the host header when connecting to the
default HTTPS port.
"""
factory = client.HTTPClientFactory(b'https://foo.example.com/')
proto = factory.buildProtocol('127.42.42.42')
proto.makeConnection(StringTransport())
self.assertEqual(self._getHost(proto.transport.value()),
b'foo.example.com')
def test_HTTPSPort443(self):
"""
No port should be included in the host header when connecting to the
default HTTPS port even if it is in the URL.
"""
factory = client.HTTPClientFactory(b'https://foo.example.com:443/')
proto = factory.buildProtocol('127.42.42.42')
proto.makeConnection(StringTransport())
self.assertEqual(self._getHost(proto.transport.value()),
b'foo.example.com')
def test_HTTPSNotPort443(self):
"""
The port should be included in the host header when connecting to the
a non default HTTPS port.
"""
factory = client.HTTPClientFactory(b'http://foo.example.com:8080/')
proto = factory.buildProtocol('127.42.42.42')
proto.makeConnection(StringTransport())
self.assertEqual(self._getHost(proto.transport.value()),
b'foo.example.com:8080')
if ssl is None or not hasattr(ssl, 'DefaultOpenSSLContextFactory'):
for case in [WebClientSSLTests, WebClientRedirectBetweenSSLandPlainTextTests]:
case.skip = "OpenSSL not present"
if not interfaces.IReactorSSL(reactor, None):
for case in [WebClientSSLTests, WebClientRedirectBetweenSSLandPlainTextTests]:
case.skip = "Reactor doesn't support SSL"
class URITests(unittest.TestCase):
"""
Tests for L{twisted.web.client.URI}.
"""
def assertURIEquals(self, uri, scheme, netloc, host, port, path,
params=b'', query=b'', fragment=b''):
"""
Assert that all of a L{client.URI}'s components match the expected
values.
@param uri: U{client.URI} instance whose attributes will be checked
for equality.
@type scheme: L{bytes}
@param scheme: URI scheme specifier.
@type netloc: L{bytes}
@param netloc: Network location component.
@type host: L{bytes}
@param host: Host name.
@type port: L{int}
@param port: Port number.
@type path: L{bytes}
@param path: Hierarchical path.
@type params: L{bytes}
@param params: Parameters for last path segment, defaults to C{b''}.
@type query: L{bytes}
@param query: Query string, defaults to C{b''}.
@type fragment: L{bytes}
@param fragment: Fragment identifier, defaults to C{b''}.
"""
self.assertEqual(
(scheme, netloc, host, port, path, params, query, fragment),
(uri.scheme, uri.netloc, uri.host, uri.port, uri.path, uri.params,
uri.query, uri.fragment))
def test_parseDefaultPort(self):
"""
L{client.URI.fromBytes} by default assumes port 80 for the I{http}
scheme and 443 for the I{https} scheme.
"""
uri = client.URI.fromBytes(b'http://example.com')
self.assertEqual(80, uri.port)
# Weird (but commonly accepted) structure uses default port.
uri = client.URI.fromBytes(b'http://example.com:')
self.assertEqual(80, uri.port)
uri = client.URI.fromBytes(b'https://example.com')
self.assertEqual(443, uri.port)
def test_parseCustomDefaultPort(self):
"""
L{client.URI.fromBytes} accepts a C{defaultPort} parameter that
overrides the normal default port logic.
"""
uri = client.URI.fromBytes(b'http://example.com', defaultPort=5144)
self.assertEqual(5144, uri.port)
uri = client.URI.fromBytes(b'https://example.com', defaultPort=5144)
self.assertEqual(5144, uri.port)
def test_netlocHostPort(self):
"""
Parsing a I{URI} splits the network location component into I{host} and
I{port}.
"""
uri = client.URI.fromBytes(b'http://example.com:5144')
self.assertEqual(5144, uri.port)
self.assertEqual(b'example.com', uri.host)
self.assertEqual(b'example.com:5144', uri.netloc)
# Spaces in the hostname are trimmed, the default path is /.
uri = client.URI.fromBytes(b'http://example.com ')
self.assertEqual(b'example.com', uri.netloc)
def test_path(self):
"""
Parse the path from a I{URI}.
"""
uri = b'http://example.com/foo/bar'
parsed = client.URI.fromBytes(uri)
self.assertURIEquals(
parsed,
scheme=b'http',
netloc=b'example.com',
host=b'example.com',
port=80,
path=b'/foo/bar')
self.assertEqual(uri, parsed.toBytes())
def test_noPath(self):
"""
The path of a I{URI} that has no path is the empty string.
"""
uri = b'http://example.com'
parsed = client.URI.fromBytes(uri)
self.assertURIEquals(
parsed,
scheme=b'http',
netloc=b'example.com',
host=b'example.com',
port=80,
path=b'')
self.assertEqual(uri, parsed.toBytes())
def test_emptyPath(self):
"""
The path of a I{URI} with an empty path is C{b'/'}.
"""
uri = b'http://example.com/'
self.assertURIEquals(
client.URI.fromBytes(uri),
scheme=b'http',
netloc=b'example.com',
host=b'example.com',
port=80,
path=b'/')
def test_param(self):
"""
Parse I{URI} parameters from a I{URI}.
"""
uri = b'http://example.com/foo/bar;param'
parsed = client.URI.fromBytes(uri)
self.assertURIEquals(
parsed,
scheme=b'http',
netloc=b'example.com',
host=b'example.com',
port=80,
path=b'/foo/bar',
params=b'param')
self.assertEqual(uri, parsed.toBytes())
def test_query(self):
"""
Parse the query string from a I{URI}.
"""
uri = b'http://example.com/foo/bar;param?a=1&b=2'
parsed = client.URI.fromBytes(uri)
self.assertURIEquals(
parsed,
scheme=b'http',
netloc=b'example.com',
host=b'example.com',
port=80,
path=b'/foo/bar',
params=b'param',
query=b'a=1&b=2')
self.assertEqual(uri, parsed.toBytes())
def test_fragment(self):
"""
Parse the fragment identifier from a I{URI}.
"""
uri = b'http://example.com/foo/bar;param?a=1&b=2#frag'
parsed = client.URI.fromBytes(uri)
self.assertURIEquals(
parsed,
scheme=b'http',
netloc=b'example.com',
host=b'example.com',
port=80,
path=b'/foo/bar',
params=b'param',
query=b'a=1&b=2',
fragment=b'frag')
self.assertEqual(uri, parsed.toBytes())
def test_originForm(self):
"""
L{client.URI.originForm} produces an absolute I{URI} path including
the I{URI} path.
"""
uri = client.URI.fromBytes(b'http://example.com/foo')
self.assertEqual(b'/foo', uri.originForm)
def test_originFormComplex(self):
"""
L{client.URI.originForm} produces an absolute I{URI} path including
the I{URI} path, parameters and query string but excludes the fragment
identifier.
"""
uri = client.URI.fromBytes(b'http://example.com/foo;param?a=1#frag')
self.assertEqual(b'/foo;param?a=1', uri.originForm)
def test_originFormNoPath(self):
"""
L{client.URI.originForm} produces a path of C{b'/'} when the I{URI}
specifies no path.
"""
uri = client.URI.fromBytes(b'http://example.com')
self.assertEqual(b'/', uri.originForm)
def test_originFormEmptyPath(self):
"""
L{client.URI.originForm} produces a path of C{b'/'} when the I{URI}
specifies an empty path.
"""
uri = client.URI.fromBytes(b'http://example.com/')
self.assertEqual(b'/', uri.originForm)
def test_externalUnicodeInterference(self):
"""
L{client.URI.fromBytes} parses the scheme, host, and path elements
into L{bytes}, even when passed an URL which has previously been passed
to L{urlparse} as a L{unicode} string.
"""
goodInput = b'http://example.com/path'
badInput = goodInput.decode('ascii')
urlparse(badInput)
uri = client.URI.fromBytes(goodInput)
self.assertIsInstance(uri.scheme, bytes)
self.assertIsInstance(uri.host, bytes)
self.assertIsInstance(uri.path, bytes)
|
|
#!/usr/bin/python
"""
####################################################################################################
TITLE : HPE XP7 Migration, Prepare
DESCRIPTION : Prepare does the setup and start of the CaJ replication
AUTHOR : Koen Schets / StorageTeam
VERSION : Based on previous ODR framework
1.0 Initial version
CONFIG : xpmig.ini
LOG : xpmig_prepare.log
TODO :
Read the source - target mapping file (source box,source ldev,target box,target volume)
Show summary of what is in the mapping file
Request operator confirmation to proceed with the external storage discovery
Check external luns :
- to be not in use already ( no external grp should be defined on it already )
- all mapped ldevs should have an external lun of at least the same size
Define external grps
Add paths to external grps
Define LDEVs on the external grps
Define CaJ relations source - target volumes and start the replication
####################################################################################################
raidcom discover external_storage -port CL5-K -I11
root@omep4040:~# raidcom discover external_storage -port CL5-K -I11
PORT WWN PM USED Serial# VENDOR_ID PRODUCT_ID
CL5-K 50060e80164ef920 M NO 85753 HPE P9500
raidcom discover external_storage -port CL6-K -I11
root@omep4040:~# raidcom discover external_storage -port CL6-K -I11
PORT WWN PM USED Serial# VENDOR_ID PRODUCT_ID
CL6-K 50060e80164ef930 M NO 85753 HPE P9500
root@oldp4001:~# raidcom discover lun -port CL5-K -external_wwn 50060e80164ef920 -I11
PORT WWN LUN VOL_Cap(BLK) PRODUCT_ID E_VOL_ID_C
CL5-K 50060e80164ef920 0 209715200 OPEN-V R500 00085753012809
CL5-K 50060e80164ef920 1 209715200 OPEN-V R500 00085753012862
root@oldp4001:~# raidcom discover lun -port CL6-K -external_wwn 50060e80164ef930 -I11
PORT WWN LUN VOL_Cap(BLK) PRODUCT_ID E_VOL_ID_C
CL6-K 50060e80164ef930 0 209715200 OPEN-V R500 00085753012809
CL6-K 50060e80164ef930 1 209715200 OPEN-V R500 00085753012862
root@oldp4001:~# raidcom add external_grp -path_grp 1 -external_grp_id 1-1 -port CL5-K -external_wwn 50060e80164ef920 -lun_id 0 -I11
root@oldp4001:~# raidcom get external_grp -I11
T GROUP Num_LDEV U(%) AV_CAP(GB) R_LVL E_TYPE SL CL DRIVE_TYPE
E 1-1 0 0 202 - OPEN-V 0 0 OPEN-V
root@oldp4001:~# raidcom get external_grp -external_grp_id 1-1 -I11
T GROUP P_NO LDEV# STS LOC_LBA SIZE_LBA Serial#
E 1-1 0 - NML 0x000000000000 0x0000194a6000 358832
--------------------------------------------------------------------------------------------
Add all to migrate external luns as external_grp
================================================
root@oldp4001:~# raidcom add external_grp -path_grp 1 -external_grp_id 1-2 -port CL5-K -external_wwn 50060e80164ef920 -lun_id 1 -I11
root@oldp4001:~# raidcom get external_grp -I11
T GROUP Num_LDEV U(%) AV_CAP(GB) R_LVL E_TYPE SL CL DRIVE_TYPE
E 1-1 0 0 202 - OPEN-V 0 0 OPEN-V
E 1-2 0 0 101 - OPEN-V 0 0 OPEN-V
--------------------------------------------------------------------------------------------
Add paths on all new external_grp
=================================
root@oldp4001:~# raidcom get path -path_grp 1 -I11
PHG GROUP STS CM IF MP# PORT WWN PR LUN PHS Serial# PRODUCT_ID LB PM DM
1 1-1 NML E D 0 CL5-K 50060e80164ef920 1 0 NML 85753 P9500 N M D
1 1-2 NML E D 1 CL5-K 50060e80164ef920 1 1 NML 85753 P9500 N M D
root@oldp4001:~# raidcom add path -path_grp 1 -port CL6-K -external_wwn 50060e80164ef930 -I11
root@oldp4001:~# raidcom get path -path_grp 1 -I11
PHG GROUP STS CM IF MP# PORT WWN PR LUN PHS Serial# PRODUCT_ID LB PM DM
1 1-1 NML E D 0 CL5-K 50060e80164ef920 1 0 NML 85753 P9500 N M D
1 1-1 NML E D 0 CL6-K 50060e80164ef930 2 0 NML 85753 P9500 N M D
1 1-2 NML E D 1 CL5-K 50060e80164ef920 1 1 NML 85753 P9500 N M D
1 1-2 NML E D 1 CL6-K 50060e80164ef930 2 1 NML 85753 P9500 N M D
---------------------------------------------------------------------------------------------
Create ldev's on external_grp's exact same size as original lun on IBM
======================================================================
root@oldp4001:~# raidcom discover lun -port CL5-K -external_wwn 50060e80164ef920 -I11
PORT WWN LUN VOL_Cap(BLK) PRODUCT_ID E_VOL_ID_C
CL5-K 50060e80164ef920 0 424304640 OPEN-V R500 00085753012308
CL5-K 50060e80164ef920 1 212152320 OPEN-V R500 00085753012309
root@oldp4001:~# raidcom add ldev -external_grp_id 1-1 -ldev_id 64:00 -capacity 424304640 -I11
root@oldp4001:~# raidcom add ldev -external_grp_id 1-2 -ldev_id 64:01 -capacity 212152320 -I11
root@oldp4001:~# raidcom get ldev -ldev_id 64:00 -fx -I11
Serial# : 358832
LDEV : 6400
SL : 0
CL : 0
VOL_TYPE : OPEN-V-CVS
VOL_Capacity(BLK) : 424304640
NUM_PORT : 0
PORTs :
F_POOLID : NONE
VOL_ATTR : CVS : ELUN
E_VendorID : HP
E_ProductID : OPEN-V
E_VOLID : 523530302030303038353735333031323330382000000000000000000000000000000000
E_VOLID_C : R500 00085753012308 ................
NUM_E_PORT : 2
E_PORTs : CL5-K-0 0 50060e80164ef920 : CL6-K-0 0 50060e80164ef930
LDEV_NAMING :
STS : NML
OPE_TYPE : NONE
OPE_RATE : 100
MP# : 0
SSID : 000E
ALUA : Disable
RSGID : 0
root@oldp4001:~# raidcom get ldev -ldev_id 64:01 -fx -I11
Serial# : 358832
LDEV : 6401
SL : 0
CL : 0
VOL_TYPE : OPEN-V-CVS
VOL_Capacity(BLK) : 212152320
NUM_PORT : 0
PORTs :
F_POOLID : NONE
VOL_ATTR : CVS : ELUN
E_VendorID : HP
E_ProductID : OPEN-V
E_VOLID : 523530302030303038353735333031323330392000000000000000000000000000000000
E_VOLID_C : R500 00085753012309 ................
NUM_E_PORT : 2
E_PORTs : CL5-K-0 1 50060e80164ef920 : CL6-K-0 1 50060e80164ef930
LDEV_NAMING :
STS : NML
OPE_TYPE : NONE
OPE_RATE : 100
MP# : 1
SSID : 000E
ALUA : Disable
RSGID : 0
root@oldp4001:~# raidcom get external_grp -I11
T GROUP Num_LDEV U(%) AV_CAP(GB) R_LVL E_TYPE SL CL DRIVE_TYPE
E 1-1 1 100 0 - OPEN-V 0 0 OPEN-V
E 1-2 1 100 0 - OPEN-V 0 0 OPEN-V
------------------------------------------------------------------------------------------------
Name all created external ldevs
===============================
root@oldp4001:~# raidcom modify ldev -ldev_id 64:00 -ldev_name AP0758_6400 -I11
root@oldp4001:~# raidcom modify ldev -ldev_id 64:01 -ldev_name AP0758_6401 -I11
root@oldp4001:~# raidcom get ldev -ldev_id 64:00 -fx -cnt 2 -I11
Serial# : 358832
LDEV : 6400
SL : 0
CL : 0
VOL_TYPE : OPEN-V-CVS
VOL_Capacity(BLK) : 424304640
NUM_PORT : 0
PORTs :
F_POOLID : NONE
VOL_ATTR : CVS : ELUN
E_VendorID : HP
E_ProductID : OPEN-V
E_VOLID : 523530302030303038353735333031323330382000000000000000000000000000000000
E_VOLID_C : R500 00085753012308 ................
NUM_E_PORT : 2
E_PORTs : CL5-K-0 0 50060e80164ef920 : CL6-K-0 0 50060e80164ef930
LDEV_NAMING : AP0758_6400
STS : NML
OPE_TYPE : NONE
OPE_RATE : 100
MP# : 0
SSID : 000E
ALUA : Disable
RSGID : 0
Serial# : 358832
LDEV : 6401
SL : 0
CL : 0
VOL_TYPE : OPEN-V-CVS
VOL_Capacity(BLK) : 212152320
NUM_PORT : 0
PORTs :
F_POOLID : NONE
VOL_ATTR : CVS : ELUN
E_VendorID : HP
E_ProductID : OPEN-V
E_VOLID : 523530302030303038353735333031323330392000000000000000000000000000000000
E_VOLID_C : R500 00085753012309 ................
NUM_E_PORT : 2
E_PORTs : CL5-K-0 1 50060e80164ef920 : CL6-K-0 1 50060e80164ef930
LDEV_NAMING : AP0758_6401
STS : NML
OPE_TYPE : NONE
OPE_RATE : 100
MP# : 1
SSID : 000E
ALUA : Disable
RSGID : 0
Delete external ldevs:
raidcom delete ldev -ldev_id 94:00 -I11
raidcom delete ldev -ldev_id 94:01 -I11
Delete external_grp's:
raidcom disconnect external_grp -external_grp_id 1-1 -I11
raidcom disconnect external_grp -external_grp_id 1-2 -I11
raidcom delete external_grp -external_grp_id 1-1 -I11
raidcom delete external_grp -external_grp_id 1-2 -I11
"""
import curses
from curses import panel
import re
import logging
import logging.handlers
from ConfigParser import ConfigParser
import sys
import os
import os.path
import csv
import string
import xp7
import miglog
import collections
####################################################################################################
### VARIABLES
####################################################################################################
linelen = 100
Mig_tuple = collections.namedtuple("Mig_tuple",["hostgroup","source_box_sn","source_ldev_nbr","source_ldev_size","target_box_sn","target_ldev_nbr"])
mig_list = []
target_storage_dict = {}
comment_re = re.compile("^#")
name_by_serial_dict = {}
serial_by_name_dict = {}
instance_dict = {}
####################################################################################################
### FUNCTIONS
####################################################################################################
####################################################################################################
### CLASSES
####################################################################################################
class Menu(object):
def __init__(self,window,items,stdscr):
self.window = window
self.heigth,self.width = self.window.getmaxyx()
self.window.keypad(1)
self.panel = panel.new_panel(self.window)
self.panel.hide()
panel.update_panels()
self.position = 0
self.items = items
self.items.append(("exit","exit"))
def navigate(self,n):
self.position += n
if self.position < 0:
self.position = 0
elif self.position >= len(self.items):
self.position = len(self.items) - 1
def display(self):
self.panel.top()
self.panel.show()
self.window.clear()
while True:
self.window.refresh()
curses.doupdate()
for index,item in enumerate(self.items):
if index == self.position:
mode = curses.A_STANDOUT
else:
mode = curses.A_NORMAL
# line = "{}: {}".format(index,item[0])
line = "{}".format(item[0])
if len(line) >= self.width:
line = line[:self.width-1]
self.window.addstr(1+index,2,line,mode)
key = self.window.getch()
if key in [curses.KEY_ENTER,ord("\n"),ord("B"),ord("b")]:
if self.position == len(self.items) - 1:
break
else:
### call the next menu item ###
self.items[self.position][1]()
elif key == curses.KEY_UP:
self.navigate(-1)
elif key == curses.KEY_DOWN:
self.navigate(1)
self.window.clear()
self.panel.hide()
panel.update_panels()
curses.doupdate()
class Map_menu(object):
def __init__(self,window,map_dir,selection,stdscr):
self.window = window
self.heigth,self.width = self.window.getmaxyx()
self.window.keypad(1)
self.panel = panel.new_panel(self.window)
self.panel.hide()
panel.update_panels()
self.map_file_list = []
self.slice_start = 0
self.slice_end = 0
self.slice_len = 0
self.position = 0
self.map_dir = map_dir
self.selection = selection
def update(self):
"""
Read the map_dir and fill the list of map files
"""
if os.path.exists(self.map_dir):
del(self.map_file_list[:])
self.map_file_list = [f for f in os.listdir(self.map_dir) if os.path.isfile(os.path.join(self.map_dir,f)) and re.match(".+\.map$",f,flags=re.IGNORECASE)]
self.map_file_list.append("exit")
self.slice_len = min(len(self.map_file_list)-1, self.heigth-6)
self.slice_start = 0
self.slice_end = self.slice_start + self.slice_len
self.position = 0
def navigate(self,n):
self.position += n
if self.position < 0:
self.position = 0
elif self.position >= len(self.map_file_list):
self.position = len(self.map_file_list)-1
if n < 0:
if self.position - self.slice_start < 2 and self.slice_start >= 1:
### slide slice up ###
self.slice_start += n
if self.slice_start < 0:
self.slice_start = 0
self.slice_end = self.slice_start + self.slice_len
elif n > 0:
if self.slice_end - self.position < 2 and self.slice_end < len(self.map_file_list) - 1:
### slide slice down ###
self.slice_end += n
if self.slice_end > len(self.map_file_list) - 1:
self.slice_end = len(self.map_file_list) - 1
self.slice_start = self.slice_end - self.slice_len
def display(self):
self.panel.top()
self.panel.show()
self.window.clear()
self.update()
while True:
self.window.clear()
self.window.refresh()
curses.doupdate()
### show the list of map files ###
for index,item in enumerate(self.map_file_list):
if index == self.position:
mode = curses.A_STANDOUT
else:
mode = curses.A_NORMAL
line = "{}".format(item)
if len(line) >= self.width:
line = line[:self.width-1]
### only add lines in the slice ###
if self.slice_start <= index <= self.slice_end:
self.window.addstr(1+(index-self.slice_start),2,line,mode)
key = self.window.getch()
if key in [ord("b"),ord("B")]:
break
elif key in [curses.KEY_ENTER,ord("\n")]:
if self.position == len(self.map_file_list)-1:
break
else:
logger.info("MAP FILE: {} select for processing".format(self.map_file_list[self.position]))
self.window.clear()
self.window.refresh()
### read & parse the map file ###
map_file = os.path.join(self.map_dir,self.map_file_list[self.position])
self.window.addstr(2,2,"Processing {}".format(map_file))
map_file_ok = True
line_nbr = 0
with open(map_file,"rt") as f:
map_file_reader = csv.reader(f)
for row in map_file_reader:
line_nbr += 1
if not comment_re.match(row[0]):
if len(row) == 6:
mig_tuple = Mig_tuple._make(row)
mig_list.append(mig_tuple)
logger.debug("added {}".format(mig_tuple))
else:
map_file_ok = False
logger.error("{} map file contains invalid data, please correct & re-run".format(map_file))
logger.error("{}: {}".format(line_nbr,row))
self.window.addstr(3,2,"Map file contains an invalid line, please correct & re-run")
self.window.addstr(4,2,"{}: {}".format(line_nbr,row))
key = self.window.getch()
break
if map_file_ok:
self.window.addstr(3,2,"Map file read OK, {} source - target relations discovered".format(len(mig_list)))
else:
self.window.addstr(3,2,"Map file could not be processed, please correct & re-run")
key = self.window.getch()
break
### request confirmation to proceed with external storage discovery ###
self.window.addstr(4,2,"Are all target volumes mapped to XP7 ? (y/N)")
key = self.window.getch()
if key in [ord("y"),ord("Y")]:
### proceed with external storage discovery ###
self.window.addstr(5,2,"Proceeding with target storage discovery..")
### check which target storage is in scope ###
target_sn_set = set([x.target_box_sn for x in mig_list])
for target_sn in target_sn_set:
if target_sn in name_by_serial_dict:
target_name = name_by_serial_dict[target_sn]
### now we're able to find the IO ports to scan
if target_name in target_storage_dict:
ext_storage_ports = target_storage_dict[target_name]
for external_storage_port in external_storage_ports:
### get the wwn ###
self.window.addstr(6,2,"External storage discovery on port {}".format(external_storage_port))
else:
logger.error("No target IO ports defined for target {},skipping..".format(target_name))
else:
logger.error("Unknown target box S/N {}, skipping".format(target_sn)
else:
logger.info("MAP FILE: exit processing {}, target storage not mapped..".format(map_file))
break
elif key == curses.KEY_UP:
self.navigate(-1)
elif key == curses.KEY_DOWN:
self.navigate(1)
elif key == curses.KEY_PPAGE:
self.navigate(-10)
elif key == curses.KEY_NPAGE:
self.navigate(10)
self.window.clear()
self.panel.hide()
panel.update_panels()
curses.doupdate()
class Selection(object):
def __init__(self,window,title,stdscr):
self.window = window
self.heigth,self.width = self.window.getmaxyx()
self.title = title
self.selection = []
def display(self):
self.window.clear()
line = "{} : {}".format(self.title, ",".join(["{}-{}".format(x[0],x[1]) for x in self.selection]))
if len(line) >= self.width:
line = line[:self.width-1]
self.window.addstr(1,2,line)
self.window.border()
self.window.refresh()
curses.doupdate()
def add(self,item):
current_set = set(self.selection)
current_set.add(item)
self.selection = list(sorted(current_set))
self.display()
def clear(self):
del self.selection[:]
self.display()
def get(self):
return self.selection
####################################################################################################
### MAIN
####################################################################################################
def main(stdscr):
### clear screen ###
stdscr.clear()
### check window height and width ###
if curses.COLS < 20 and curses.LINES < 20:
sys.stderr.write("Window not large enough, exiting..\n")
sys.exit(1)
### define title_win ###
title_win = stdscr.subwin(3,curses.COLS,0,0)
title_win.addstr(1,2,"HPE P9500 to XP7 MIGRATION PREPARE")
title_win.border()
### define selection_win ###
select_win = stdscr.subwin(3,curses.COLS,curses.LINES-4,0)
selection = Selection(select_win,"SELECTED HOSTGROUP(s)",stdscr)
selection.display()
### define menu_win ###
menu_win = stdscr.subwin(curses.LINES-7,curses.COLS,3,0)
main_menu_items = []
map_menu = Map_menu(menu_win,map_file_dir,selection,stdscr)
main_menu_items.append(("Process MAP file",map_menu.display))
### define status_win ###
# status_win = stdscr.subwin(3,curses.COLS,curses.LINES-4,0)
# status_win.addstr(1,2,"STATUS:")
# status_win.border()
### fire up the main menu ###
main_menu = Menu(menu_win,main_menu_items,stdscr)
main_menu.display()
### getting here means we exited the menu ###
stdscr.refresh()
configfile = "xpmig.ini"
cfg = ConfigParser()
cfg.read(configfile)
for mandatory_section in ("dir","target"):
if not cfg.has_section(mandatory_section):
sys.stderr.write("{} section missing in the config file {}, exiting..".format(mandatory_section,configfile))
sys.exit(1)
for name,value in cfg.items("target"):
target_storage_dict[name] = value.split(",")
try:
map_file_dir = cfg.get("dir","map")
except:
map_file_dir = "/tmp"
try:
loglevel =cfg.getint("log","level")
except:
loglevel = 30
try:
logdir = cfg.get("dir","log")
except:
logdir = "/tmp/log"
try:
logsize = cfg.getint("log","size")
except:
logsize = 100000000
try:
logversions = cfg.getint("log","maxversions")
except:
logversions = 5
for name,value in cfg.items("serialnbr"):
serial_by_name_dict[name] = value
name_by_serial_dict[value] = name
for name,value in cfg.items("instance"):
instance_dict[name] = value
#####################
### start logging ###
#####################
logger = logging.getLogger("xpmig_prepare")
logger.setLevel(loglevel)
logfile = os.path.join(logdir,"xpmig_prepare.log")
fh = logging.handlers.RotatingFileHandler(logfile,maxBytes=logsize,backupCount=logversions)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s","%Y/%m/%d-%H:%M:%S")
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info("#" * linelen)
logger.info("XPMIG PREPARE started")
logger.info("#" * linelen)
logger.info("Configuration settings :")
logger.info("MAPPING dir: {}".format(map_file_dir))
miglog = miglog.Miglog(logdir,"PREPARE")
#####################
### start menu ###
#####################
curses.wrapper(main)
#####################
### stop logging ###
#####################
logger.info("#" * linelen)
logger.info("XPMIG PREPARE ended")
logger.info("#" * linelen)
|
|
#!/usr/bin/python
"""
Copyright 2013 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
HTTP server for our HTML rebaseline viewer.
"""
# System-level imports
import argparse
import BaseHTTPServer
import json
import logging
import os
import posixpath
import re
import shutil
import socket
import subprocess
import thread
import threading
import time
import urlparse
# Must fix up PYTHONPATH before importing from within Skia
import fix_pythonpath # pylint: disable=W0611
# Imports from within Skia
from py.utils import gs_utils
import gm_json
# Imports from local dir
#
# pylint: disable=C0301
# Note: we import results under a different name, to avoid confusion with the
# Server.results() property. See discussion at
# https://codereview.chromium.org/195943004/diff/1/gm/rebaseline_server/server.py#newcode44
# pylint: enable=C0301
import compare_configs
import compare_to_expectations
import download_actuals
import imagediffdb
import imagepairset
import results as results_mod
PATHSPLIT_RE = re.compile('/([^/]+)/(.+)')
# A simple dictionary of file name extensions to MIME types. The empty string
# entry is used as the default when no extension was given or if the extension
# has no entry in this dictionary.
MIME_TYPE_MAP = {'': 'application/octet-stream',
'html': 'text/html',
'css': 'text/css',
'png': 'image/png',
'js': 'application/javascript',
'json': 'application/json'
}
# Keys that server.py uses to create the toplevel content header.
# NOTE: Keep these in sync with static/constants.js
KEY__EDITS__MODIFICATIONS = 'modifications'
KEY__EDITS__OLD_RESULTS_HASH = 'oldResultsHash'
KEY__EDITS__OLD_RESULTS_TYPE = 'oldResultsType'
DEFAULT_ACTUALS_DIR = results_mod.DEFAULT_ACTUALS_DIR
DEFAULT_GM_SUMMARIES_BUCKET = download_actuals.GM_SUMMARIES_BUCKET
DEFAULT_JSON_FILENAME = download_actuals.DEFAULT_JSON_FILENAME
DEFAULT_PORT = 8888
PARENT_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
TRUNK_DIRECTORY = os.path.dirname(os.path.dirname(PARENT_DIRECTORY))
# Directory, relative to PARENT_DIRECTORY, within which the server will serve
# out live results (not static files).
RESULTS_SUBDIR = 'results'
# Directory, relative to PARENT_DIRECTORY, within which the server will serve
# out static files.
STATIC_CONTENTS_SUBDIR = 'static'
# All of the GENERATED_*_SUBDIRS are relative to STATIC_CONTENTS_SUBDIR
GENERATED_HTML_SUBDIR = 'generated-html'
GENERATED_IMAGES_SUBDIR = 'generated-images'
GENERATED_JSON_SUBDIR = 'generated-json'
# How often (in seconds) clients should reload while waiting for initial
# results to load.
RELOAD_INTERVAL_UNTIL_READY = 10
SUMMARY_TYPES = [
results_mod.KEY__HEADER__RESULTS_FAILURES,
results_mod.KEY__HEADER__RESULTS_ALL,
]
# If --compare-configs is specified, compare these configs.
CONFIG_PAIRS_TO_COMPARE = [('8888', 'gpu')]
_HTTP_HEADER_CONTENT_LENGTH = 'Content-Length'
_HTTP_HEADER_CONTENT_TYPE = 'Content-Type'
_SERVER = None # This gets filled in by main()
def _run_command(args, directory):
"""Runs a command and returns stdout as a single string.
Args:
args: the command to run, as a list of arguments
directory: directory within which to run the command
Returns: stdout, as a string
Raises an Exception if the command failed (exited with nonzero return code).
"""
logging.debug('_run_command: %s in directory %s' % (args, directory))
proc = subprocess.Popen(args, cwd=directory,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode is not 0:
raise Exception('command "%s" failed in dir "%s": %s' %
(args, directory, stderr))
return stdout
def _get_routable_ip_address():
"""Returns routable IP address of this host (the IP address of its network
interface that would be used for most traffic, not its localhost
interface). See http://stackoverflow.com/a/166589 """
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(('8.8.8.8', 80))
host = sock.getsockname()[0]
sock.close()
return host
def _create_index(file_path, config_pairs):
"""Creates an index file linking to all results available from this server.
Prior to https://codereview.chromium.org/215503002 , we had a static
index.html within our repo. But now that the results may or may not include
config comparisons, index.html needs to be generated differently depending
on which results are included.
TODO(epoger): Instead of including raw HTML within the Python code,
consider restoring the index.html file as a template and using django (or
similar) to fill in dynamic content.
Args:
file_path: path on local disk to write index to; any directory components
of this path that do not already exist will be created
config_pairs: what pairs of configs (if any) we compare actual results of
"""
dir_path = os.path.dirname(file_path)
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
with open(file_path, 'w') as file_handle:
file_handle.write(
'<!DOCTYPE html><html>'
'<head><title>rebaseline_server</title></head>'
'<body><ul>')
if SUMMARY_TYPES:
file_handle.write('<li>Expectations vs Actuals</li><ul>')
for summary_type in SUMMARY_TYPES:
file_handle.write(
'<li><a href="/{static_subdir}/view.html#/view.html?'
'resultsToLoad=/{results_subdir}/{summary_type}">'
'{summary_type}</a></li>'.format(
results_subdir=RESULTS_SUBDIR,
static_subdir=STATIC_CONTENTS_SUBDIR,
summary_type=summary_type))
file_handle.write('</ul>')
if config_pairs:
file_handle.write('<li>Comparing configs within actual results</li><ul>')
for config_pair in config_pairs:
file_handle.write('<li>%s vs %s:' % config_pair)
for summary_type in SUMMARY_TYPES:
file_handle.write(
' <a href="/%s/view.html#/view.html?'
'resultsToLoad=/%s/%s/%s-vs-%s_%s.json">%s</a>' % (
STATIC_CONTENTS_SUBDIR, STATIC_CONTENTS_SUBDIR,
GENERATED_JSON_SUBDIR, config_pair[0], config_pair[1],
summary_type, summary_type))
file_handle.write('</li>')
file_handle.write('</ul>')
file_handle.write('</ul></body></html>')
class Server(object):
""" HTTP server for our HTML rebaseline viewer. """
def __init__(self,
actuals_dir=DEFAULT_ACTUALS_DIR,
json_filename=DEFAULT_JSON_FILENAME,
gm_summaries_bucket=DEFAULT_GM_SUMMARIES_BUCKET,
port=DEFAULT_PORT, export=False, editable=True,
reload_seconds=0, config_pairs=None, builder_regex_list=None):
"""
Args:
actuals_dir: directory under which we will check out the latest actual
GM results
json_filename: basename of the JSON summary file to load for each builder
gm_summaries_bucket: Google Storage bucket to download json_filename
files from; if None or '', don't fetch new actual-results files
at all, just compare to whatever files are already in actuals_dir
port: which TCP port to listen on for HTTP requests
export: whether to allow HTTP clients on other hosts to access this server
editable: whether HTTP clients are allowed to submit new baselines
reload_seconds: polling interval with which to check for new results;
if 0, don't check for new results at all
config_pairs: List of (string, string) tuples; for each tuple, compare
actual results of these two configs. If None or empty,
don't compare configs at all.
builder_regex_list: List of regular expressions specifying which builders
we will process. If None, process all builders.
"""
self._actuals_dir = actuals_dir
self._json_filename = json_filename
self._gm_summaries_bucket = gm_summaries_bucket
self._port = port
self._export = export
self._editable = editable
self._reload_seconds = reload_seconds
self._config_pairs = config_pairs or []
self._builder_regex_list = builder_regex_list
_create_index(
file_path=os.path.join(
PARENT_DIRECTORY, STATIC_CONTENTS_SUBDIR, GENERATED_HTML_SUBDIR,
"index.html"),
config_pairs=config_pairs)
# Reentrant lock that must be held whenever updating EITHER of:
# 1. self._results
# 2. the expected or actual results on local disk
self.results_rlock = threading.RLock()
# These will be filled in by calls to update_results()
self._results = None
self._image_diff_db = None
@property
def results(self):
""" Returns the most recently generated results, or None if we don't have
any valid results (update_results() has not completed yet). """
return self._results
@property
def is_exported(self):
""" Returns true iff HTTP clients on other hosts are allowed to access
this server. """
return self._export
@property
def is_editable(self):
""" Returns true iff HTTP clients are allowed to submit new baselines. """
return self._editable
@property
def reload_seconds(self):
""" Returns the result reload period in seconds, or 0 if we don't reload
results. """
return self._reload_seconds
def update_results(self, invalidate=False):
""" Create or update self._results, based on the latest expectations and
actuals.
We hold self.results_rlock while we do this, to guarantee that no other
thread attempts to update either self._results or the underlying files at
the same time.
Args:
invalidate: if True, invalidate self._results immediately upon entry;
otherwise, we will let readers see those results until we
replace them
"""
with self.results_rlock:
if invalidate:
self._results = None
if self._gm_summaries_bucket:
logging.info(
'Updating GM result summaries in %s from gm_summaries_bucket %s ...'
% (self._actuals_dir, self._gm_summaries_bucket))
# Clean out actuals_dir first, in case some builders have gone away
# since we last ran.
if os.path.isdir(self._actuals_dir):
shutil.rmtree(self._actuals_dir)
# Get the list of builders we care about.
all_builders = download_actuals.get_builders_list(
summaries_bucket=self._gm_summaries_bucket)
if self._builder_regex_list:
matching_builders = []
for builder in all_builders:
for regex in self._builder_regex_list:
if re.match(regex, builder):
matching_builders.append(builder)
break # go on to the next builder, no need to try more regexes
else:
matching_builders = all_builders
# Download the JSON file for each builder we care about.
#
# TODO(epoger): When this is a large number of builders, we would be
# better off downloading them in parallel!
for builder in matching_builders:
gs_utils.download_file(
source_bucket=self._gm_summaries_bucket,
source_path=posixpath.join(builder, self._json_filename),
dest_path=os.path.join(self._actuals_dir, builder,
self._json_filename),
create_subdirs_if_needed=True)
# We only update the expectations dir if the server was run with a
# nonzero --reload argument; otherwise, we expect the user to maintain
# her own expectations as she sees fit.
#
# Because the Skia repo is hosted using git, and git does not
# support updating a single directory tree, we have to update the entire
# repo checkout.
#
# Because Skia uses depot_tools, we have to update using "gclient sync"
# instead of raw git commands.
#
# TODO(epoger): Fetch latest expectations in some other way.
# Eric points out that our official documentation recommends an
# unmanaged Skia checkout, so "gclient sync" will not bring down updated
# expectations from origin/master-- you'd have to do a "git pull" of
# some sort instead.
# However, the live rebaseline_server at
# http://skia-tree-status.appspot.com/redirect/rebaseline-server (which
# is probably the only user of the --reload flag!) uses a managed
# checkout, so "gclient sync" works in that case.
# Probably the best idea is to avoid all of this nonsense by fetching
# updated expectations into a temp directory, and leaving the rest of
# the checkout alone. This could be done using "git show", or by
# downloading individual expectation JSON files from
# skia.googlesource.com .
if self._reload_seconds:
logging.info(
'Updating expected GM results in %s by syncing Skia repo ...' %
compare_to_expectations.DEFAULT_EXPECTATIONS_DIR)
_run_command(['gclient', 'sync'], TRUNK_DIRECTORY)
if not self._image_diff_db:
self._image_diff_db = imagediffdb.ImageDiffDB(
storage_root=os.path.join(
PARENT_DIRECTORY, STATIC_CONTENTS_SUBDIR,
GENERATED_IMAGES_SUBDIR))
self._results = compare_to_expectations.ExpectationComparisons(
image_diff_db=self._image_diff_db,
actuals_root=self._actuals_dir,
diff_base_url=posixpath.join(
os.pardir, STATIC_CONTENTS_SUBDIR, GENERATED_IMAGES_SUBDIR),
builder_regex_list=self._builder_regex_list)
json_dir = os.path.join(
PARENT_DIRECTORY, STATIC_CONTENTS_SUBDIR, GENERATED_JSON_SUBDIR)
if not os.path.isdir(json_dir):
os.makedirs(json_dir)
for config_pair in self._config_pairs:
config_comparisons = compare_configs.ConfigComparisons(
configs=config_pair,
actuals_root=self._actuals_dir,
generated_images_root=os.path.join(
PARENT_DIRECTORY, STATIC_CONTENTS_SUBDIR,
GENERATED_IMAGES_SUBDIR),
diff_base_url=posixpath.join(
os.pardir, GENERATED_IMAGES_SUBDIR),
builder_regex_list=self._builder_regex_list)
for summary_type in SUMMARY_TYPES:
gm_json.WriteToFile(
config_comparisons.get_packaged_results_of_type(
results_type=summary_type),
os.path.join(
json_dir, '%s-vs-%s_%s.json' % (
config_pair[0], config_pair[1], summary_type)))
def _result_loader(self, reload_seconds=0):
""" Call self.update_results(), either once or periodically.
Params:
reload_seconds: integer; if nonzero, reload results at this interval
(in which case, this method will never return!)
"""
self.update_results()
logging.info('Initial results loaded. Ready for requests on %s' % self._url)
if reload_seconds:
while True:
time.sleep(reload_seconds)
self.update_results()
def run(self):
arg_tuple = (self._reload_seconds,) # start_new_thread needs a tuple,
# even though it holds just one param
thread.start_new_thread(self._result_loader, arg_tuple)
if self._export:
server_address = ('', self._port)
host = _get_routable_ip_address()
if self._editable:
logging.warning('Running with combination of "export" and "editable" '
'flags. Users on other machines will '
'be able to modify your GM expectations!')
else:
host = '127.0.0.1'
server_address = (host, self._port)
# pylint: disable=W0201
http_server = BaseHTTPServer.HTTPServer(server_address, HTTPRequestHandler)
self._url = 'http://%s:%d' % (host, http_server.server_port)
logging.info('Listening for requests on %s' % self._url)
http_server.serve_forever()
class HTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
""" HTTP request handlers for various types of queries this server knows
how to handle (static HTML and Javascript, expected/actual results, etc.)
"""
def do_GET(self):
"""
Handles all GET requests, forwarding them to the appropriate
do_GET_* dispatcher.
If we see any Exceptions, return a 404. This fixes http://skbug.com/2147
"""
try:
logging.debug('do_GET: path="%s"' % self.path)
if self.path == '' or self.path == '/' or self.path == '/index.html' :
self.redirect_to('/%s/%s/index.html' % (
STATIC_CONTENTS_SUBDIR, GENERATED_HTML_SUBDIR))
return
if self.path == '/favicon.ico' :
self.redirect_to('/%s/favicon.ico' % STATIC_CONTENTS_SUBDIR)
return
# All requests must be of this form:
# /dispatcher/remainder
# where 'dispatcher' indicates which do_GET_* dispatcher to run
# and 'remainder' is the remaining path sent to that dispatcher.
normpath = posixpath.normpath(self.path)
(dispatcher_name, remainder) = PATHSPLIT_RE.match(normpath).groups()
dispatchers = {
RESULTS_SUBDIR: self.do_GET_results,
STATIC_CONTENTS_SUBDIR: self.do_GET_static,
}
dispatcher = dispatchers[dispatcher_name]
dispatcher(remainder)
except:
self.send_error(404)
raise
def do_GET_results(self, results_type):
""" Handle a GET request for GM results.
Args:
results_type: string indicating which set of results to return;
must be one of the results_mod.RESULTS_* constants
"""
logging.debug('do_GET_results: sending results of type "%s"' % results_type)
# Since we must make multiple calls to the ExpectationComparisons object,
# grab a reference to it in case it is updated to point at a new
# ExpectationComparisons object within another thread.
#
# TODO(epoger): Rather than using a global variable for the handler
# to refer to the Server object, make Server a subclass of
# HTTPServer, and then it could be available to the handler via
# the handler's .server instance variable.
results_obj = _SERVER.results
if results_obj:
response_dict = results_obj.get_packaged_results_of_type(
results_type=results_type, reload_seconds=_SERVER.reload_seconds,
is_editable=_SERVER.is_editable, is_exported=_SERVER.is_exported)
else:
now = int(time.time())
response_dict = {
imagepairset.KEY__ROOT__HEADER: {
results_mod.KEY__HEADER__SCHEMA_VERSION: (
results_mod.VALUE__HEADER__SCHEMA_VERSION),
results_mod.KEY__HEADER__IS_STILL_LOADING: True,
results_mod.KEY__HEADER__TIME_UPDATED: now,
results_mod.KEY__HEADER__TIME_NEXT_UPDATE_AVAILABLE: (
now + RELOAD_INTERVAL_UNTIL_READY),
},
}
self.send_json_dict(response_dict)
def do_GET_static(self, path):
""" Handle a GET request for a file under STATIC_CONTENTS_SUBDIR .
Only allow serving of files within STATIC_CONTENTS_SUBDIR that is a
filesystem sibling of this script.
Args:
path: path to file (within STATIC_CONTENTS_SUBDIR) to retrieve
"""
# Strip arguments ('?resultsToLoad=all') from the path
path = urlparse.urlparse(path).path
logging.debug('do_GET_static: sending file "%s"' % path)
static_dir = os.path.realpath(os.path.join(
PARENT_DIRECTORY, STATIC_CONTENTS_SUBDIR))
full_path = os.path.realpath(os.path.join(static_dir, path))
if full_path.startswith(static_dir):
self.send_file(full_path)
else:
logging.error(
'Attempted do_GET_static() of path [%s] outside of static dir [%s]'
% (full_path, static_dir))
self.send_error(404)
def do_POST(self):
""" Handles all POST requests, forwarding them to the appropriate
do_POST_* dispatcher. """
# All requests must be of this form:
# /dispatcher
# where 'dispatcher' indicates which do_POST_* dispatcher to run.
logging.debug('do_POST: path="%s"' % self.path)
normpath = posixpath.normpath(self.path)
dispatchers = {
'/edits': self.do_POST_edits,
}
try:
dispatcher = dispatchers[normpath]
dispatcher()
self.send_response(200)
except:
self.send_error(404)
raise
def do_POST_edits(self):
""" Handle a POST request with modifications to GM expectations, in this
format:
{
KEY__EDITS__OLD_RESULTS_TYPE: 'all', # type of results that the client
# loaded and then made
# modifications to
KEY__EDITS__OLD_RESULTS_HASH: 39850913, # hash of results when the client
# loaded them (ensures that the
# client and server apply
# modifications to the same base)
KEY__EDITS__MODIFICATIONS: [
# as needed by compare_to_expectations.edit_expectations()
...
],
}
Raises an Exception if there were any problems.
"""
if not _SERVER.is_editable:
raise Exception('this server is not running in --editable mode')
content_type = self.headers[_HTTP_HEADER_CONTENT_TYPE]
if content_type != 'application/json;charset=UTF-8':
raise Exception('unsupported %s [%s]' % (
_HTTP_HEADER_CONTENT_TYPE, content_type))
content_length = int(self.headers[_HTTP_HEADER_CONTENT_LENGTH])
json_data = self.rfile.read(content_length)
data = json.loads(json_data)
logging.debug('do_POST_edits: received new GM expectations data [%s]' %
data)
# Update the results on disk with the information we received from the
# client.
# We must hold _SERVER.results_rlock while we do this, to guarantee that
# no other thread updates expectations (from the Skia repo) while we are
# updating them (using the info we received from the client).
with _SERVER.results_rlock:
oldResultsType = data[KEY__EDITS__OLD_RESULTS_TYPE]
oldResults = _SERVER.results.get_results_of_type(oldResultsType)
oldResultsHash = str(hash(repr(
oldResults[imagepairset.KEY__ROOT__IMAGEPAIRS])))
if oldResultsHash != data[KEY__EDITS__OLD_RESULTS_HASH]:
raise Exception('results of type "%s" changed while the client was '
'making modifications. The client should reload the '
'results and submit the modifications again.' %
oldResultsType)
_SERVER.results.edit_expectations(data[KEY__EDITS__MODIFICATIONS])
# Read the updated results back from disk.
# We can do this in a separate thread; we should return our success message
# to the UI as soon as possible.
thread.start_new_thread(_SERVER.update_results, (True,))
def redirect_to(self, url):
""" Redirect the HTTP client to a different url.
Args:
url: URL to redirect the HTTP client to
"""
self.send_response(301)
self.send_header('Location', url)
self.end_headers()
def send_file(self, path):
""" Send the contents of the file at this path, with a mimetype based
on the filename extension.
Args:
path: path of file whose contents to send to the HTTP client
"""
# Grab the extension if there is one
extension = os.path.splitext(path)[1]
if len(extension) >= 1:
extension = extension[1:]
# Determine the MIME type of the file from its extension
mime_type = MIME_TYPE_MAP.get(extension, MIME_TYPE_MAP[''])
# Open the file and send it over HTTP
if os.path.isfile(path):
with open(path, 'rb') as sending_file:
self.send_response(200)
self.send_header('Content-type', mime_type)
self.end_headers()
self.wfile.write(sending_file.read())
else:
self.send_error(404)
def send_json_dict(self, json_dict):
""" Send the contents of this dictionary in JSON format, with a JSON
mimetype.
Args:
json_dict: dictionary to send
"""
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
json.dump(json_dict, self.wfile)
def main():
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--actuals-dir',
help=('Directory into which we will check out the latest '
'actual GM results. If this directory does not '
'exist, it will be created. Defaults to %(default)s'),
default=DEFAULT_ACTUALS_DIR)
# TODO(epoger): Before https://codereview.chromium.org/310093003 ,
# when this tool downloaded the JSON summaries from skia-autogen,
# it had an --actuals-revision the caller could specify to download
# actual results as of a specific point in time. We should add similar
# functionality when retrieving the summaries from Google Storage.
parser.add_argument('--builders', metavar='BUILDER_REGEX', nargs='+',
help=('Only process builders matching these regular '
'expressions. If unspecified, process all '
'builders.'))
parser.add_argument('--compare-configs', action='store_true',
help=('In addition to generating differences between '
'expectations and actuals, also generate '
'differences between these config pairs: '
+ str(CONFIG_PAIRS_TO_COMPARE)))
parser.add_argument('--editable', action='store_true',
help=('Allow HTTP clients to submit new baselines.'))
parser.add_argument('--export', action='store_true',
help=('Instead of only allowing access from HTTP clients '
'on localhost, allow HTTP clients on other hosts '
'to access this server. WARNING: doing so will '
'allow users on other hosts to modify your '
'GM expectations, if combined with --editable.'))
parser.add_argument('--gm-summaries-bucket',
help=('Google Cloud Storage bucket to download '
'JSON_FILENAME files from. '
'Defaults to %(default)s ; if set to '
'empty string, just compare to actual-results '
'already found in ACTUALS_DIR.'),
default=DEFAULT_GM_SUMMARIES_BUCKET)
parser.add_argument('--json-filename',
help=('JSON summary filename to read for each builder; '
'defaults to %(default)s.'),
default=DEFAULT_JSON_FILENAME)
parser.add_argument('--port', type=int,
help=('Which TCP port to listen on for HTTP requests; '
'defaults to %(default)s'),
default=DEFAULT_PORT)
parser.add_argument('--reload', type=int,
help=('How often (a period in seconds) to update the '
'results. If specified, both expected and actual '
'results will be updated by running "gclient sync" '
'on your Skia checkout as a whole. '
'By default, we do not reload at all, and you '
'must restart the server to pick up new data.'),
default=0)
args = parser.parse_args()
if args.compare_configs:
config_pairs = CONFIG_PAIRS_TO_COMPARE
else:
config_pairs = None
global _SERVER
_SERVER = Server(actuals_dir=args.actuals_dir,
json_filename=args.json_filename,
gm_summaries_bucket=args.gm_summaries_bucket,
port=args.port, export=args.export, editable=args.editable,
reload_seconds=args.reload, config_pairs=config_pairs,
builder_regex_list=args.builders)
_SERVER.run()
if __name__ == '__main__':
main()
|
|
from __future__ import annotations
import abc
import shutil
import functools
from pathlib import Path
import urllib.parse
from typing import (
Callable, Any, TypeVar, cast, Tuple, Dict, Optional,
Union, Hashable,
)
import logging
from edgar_code.types import PathLike, Serializer, UserDict
from edgar_code.util.picklable_threading import RLock
logger = logging.getLogger(__name__)
CacheKey = TypeVar('CacheKey')
CacheReturn = TypeVar('CacheReturn')
CacheFunc = TypeVar('CacheFunc', bound=Callable[..., Any])
class Cache:
@classmethod
def decor(
cls,
obj_store: Callable[[str], ObjectStore[CacheKey, CacheReturn]],
hit_msg: bool = False, miss_msg: bool = False, suffix: str = '',
) -> Callable[[CacheFunc], CacheFunc]:
'''Decorator that creates a cached function
>>> @Cache.decor(ObjectStore())
>>> def foo():
... pass
'''
def decor_(function: CacheFunc) -> CacheFunc:
return cast(
CacheFunc,
functools.wraps(function)(
cls(obj_store, function, hit_msg, miss_msg, suffix)
)
)
return decor_
disabled: bool
#pylint: disable=too-many-arguments
def __init__(
self,
obj_store: Callable[[str], ObjectStore[CacheKey, CacheReturn]],
function: CacheFunc,
hit_msg: bool = False, miss_msg: bool = False, suffix: str = ''
) -> None:
'''Cache a function.
Note this uses `function.__qualname__` to determine the file
name. If this is not unique within your program, define
suffix.
Note this uses `function.version` when defined, so objects of
the same functions of different versions will not collide.
'''
self.function = function
self.name = '-'.join(filter(bool, [
self.function.__qualname__,
suffix,
getattr(self.function, 'version', ''),
]))
self.obj_store = obj_store(self.name)
self.hit_msg = hit_msg
self.miss_msg = miss_msg
self.sem = RLock()
self.__qualname__ = f'Cache({self.name})'
self.disabled = False
def __call__(self, *pos_args: Any, **kwargs: Any) -> Any:
if self.disabled:
return self.function(*pos_args, **kwargs)
else:
with self.sem:
args_key = self.obj_store.args2key(pos_args, kwargs)
if args_key in self.obj_store:
if self.hit_msg:
logger.info('hit %s with %s, %s',
self.name, pos_args, kwargs)
res = self.obj_store[args_key]
else:
if self.miss_msg:
logger.info('miss %s with %s, %s',
self.name, pos_args, kwargs)
res = self.function(*pos_args, **kwargs)
self.obj_store[args_key] = res
return res
def clear(self) -> None:
'''Removes all cached items'''
self.obj_store.clear()
def __str__(self) -> str:
store_type = type(self.obj_store).__name__
return f'Cache of {self.name} with {store_type}'
ObjectStoreKey = TypeVar('ObjectStoreKey')
ObjectStoreValue = TypeVar('ObjectStoreValue')
class ObjectStore(UserDict[ObjectStoreKey, ObjectStoreValue], abc.ABC):
@classmethod
def create(
cls, *args: Any, **kwargs: Any
) -> Callable[[str], ObjectStore[ObjectStoreKey, ObjectStoreValue]]:
'''Curried init. Name will be applied later.'''
@functools.wraps(cls)
def create_(name: str) -> ObjectStore[ObjectStoreKey, ObjectStoreValue]:
return cls(*args, name=name, **kwargs) # type: ignore
return create_
def __init__(self, name: str) -> None:
super().__init__()
self.name = name
@abc.abstractmethod
def args2key(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> ObjectStoreKey:
# pylint: disable=unused-argument,no-self-use
...
class MemoryStore(ObjectStore[Hashable, Any]):
def __init__(self, name: str):
# pylint: disable=non-parent-init-called
ObjectStore.__init__(self, name)
def args2key(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Hashable:
# pylint: disable=no-self-use
return to_hashable((args, kwargs))
class FileStore(MemoryStore):
'''An obj_store that persists at ./${CACHE_PATH}/${FUNCTION_NAME}_cache.pickle'''
def __init__(
self, cache_path: PathLike, name: str, serializer: Optional[Serializer] = None,
):
# pylint: disable=non-parent-init-called,super-init-not-called
ObjectStore.__init__(self, name)
if serializer is None:
import pickle
self.serializer = cast(Serializer, pickle)
else:
self.serializer = serializer
self.cache_path = pathify(cache_path) / (self.name + '_cache.pickle')
self.loaded = False
self.data = {}
def load_if_not_loaded(self) -> None:
if not self.loaded:
self.loaded = True
if self.cache_path.exists():
with self.cache_path.open('rb') as fil:
self.data = self.serializer.load(fil)
else:
self.cache_path.parent.mkdir(parents=True, exist_ok=True)
self.data = {}
def args2key(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Hashable:
# pylint: disable=no-self-use
return to_hashable((args, kwargs))
def commit(self) -> None:
self.load_if_not_loaded()
if self.data:
with self.cache_path.open('wb') as fil:
self.serializer.dump(self.data, fil)
else:
if self.cache_path.exists():
print('deleting ', self.cache_path)
self.cache_path.unlink()
def __setitem__(self, key: Hashable, obj: Any) -> None:
self.load_if_not_loaded()
super().__setitem__(key, obj)
self.commit()
def __delitem__(self, key: Hashable) -> None:
self.load_if_not_loaded()
super().__delitem__(key)
self.commit()
def clear(self) -> None:
self.load_if_not_loaded()
super().clear()
self.commit()
class DirectoryStore(ObjectStore[PathLike, Any]):
'''Stores objects at ./${CACHE_PATH}/${FUNCTION_NAME}/${urlencode(args)}.pickle'''
def __init__(
self, object_path: PathLike, name: str,
serializer: Optional[Serializer] = None
) -> None:
# pylint: disable=non-parent-init-called
ObjectStore.__init__(self, name)
if serializer is None:
import pickle
self.serializer = cast(Serializer, pickle)
else:
self.serializer = serializer
self.cache_path = pathify(object_path) / self.name
def args2key(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> PathLike:
if kwargs:
args = args + (kwargs,)
fname = urllib.parse.quote(f'{safe_str(args)}.pickle', safe='')
return self.cache_path / fname
def __setitem__(self, path: PathLike, obj: Any) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
with path.open('wb') as fil:
self.serializer.dump(obj, fil)
def __delitem__(self, path: PathLike) -> None:
path.unlink()
def __getitem__(self, path: PathLike) -> Any:
with path.open('rb') as fil:
return self.serializer.load(fil)
def __contains__(self, path: Any) -> bool:
if hasattr(path, 'exists'):
return bool(path.exists())
else:
return False
def clear(self) -> None:
print('deleting')
if hasattr(self.cache_path, 'rmtree'):
cast(Any, self.cache_path).rmtree()
else:
shutil.rmtree(str(self.cache_path))
def to_hashable(obj: Any) -> Hashable:
'''Converts args and kwargs into a hashable type (overridable)'''
try:
hash(obj)
except TypeError:
if hasattr(obj, 'items'):
# turn dictionaries into frozenset((key, val))
# sorting is necessary to make equal dictionaries map to equal things
# sorted(..., key=hash)
return tuple(sorted(
[(keyf, to_hashable(val)) for keyf, val in obj.items()],
key=hash
))
elif hasattr(obj, '__iter__'):
# turn iterables into tuples
return tuple(to_hashable(val) for val in obj)
else:
raise TypeError(f"I don't know how to hash {obj} ({type(obj)})")
else:
return cast(Hashable, obj)
def safe_str(obj: Any) -> str:
'''
Safe names are compact, unique, urlsafe, and equal when the objects are equal
str does not work because x == y does not imply str(x) == str(y).
>>> a = dict(d=1, e=1)
>>> b = dict(e=1, d=1)
>>> a == b
True
>>> str(a) == str(b)
False
>>> safe_str(a) == safe_str(b)
True
'''
if isinstance(obj, int):
ret = str(obj)
elif isinstance(obj, float):
ret = str(round(obj, 3))
elif isinstance(obj, str):
ret = repr(obj)
elif isinstance(obj, list):
ret = '[' + ','.join(map(safe_str, obj)) + ']'
elif isinstance(obj, tuple):
ret = '(' + ','.join(map(safe_str, obj)) + ')'
elif isinstance(obj, dict):
ret = '{' + ','.join(sorted(
safe_str(key) + ':' + safe_str(val)
for key, val in obj.items()
)) + '}'
else:
raise TypeError()
return urllib.parse.quote(ret, safe='')
def pathify(obj: Union[str, PathLike]) -> PathLike:
if isinstance(obj, str):
return Path(obj)
else:
return obj
|
|
import sys, cv2, bisect
import numpy as np
import skimage
from scipy import stats
from skimage import filters, morphology, util
from datetime import datetime
#===============================================================================
class Graph():
def __init__(self, img=None):
self.seed = None
self.node = {}
self.img = img
self.visited = set()
def set_img(self, img):
self.img = img
def set_arch(self, node_A, node_B):
if node_A.has_out_arch():
node = node_A.get_out_arch()
node.remove_in_arch(node_A)
node_A.set_out_arch(node_B)
node_B.set_in_arch(node_A)
self.node[node_A.pixel] = node_A
self.node[node_B.pixel] = node_B
#self._mark_node(node_B.pixel)
def has_arch(self, pixel_A, pixel_B):
node_B = self.node[pixel_B]
if self.node[pixel_A].has_out_arch(node_B):
return True
return False
def add_node(self, node):
self.node[node.pixel] = node
def get_node(self, pixel):
return self.node[pixel]
def has_node(self, pixel):
if pixel in self.node:
return True
return False
def add_visited(self, pixel):
self.visited.add(pixel)
def is_visited(self, pixel):
if pixel in self.visited:
return True
return False
def has_8_neighbor(self, pixel):
adj_list = [(-1,-1), (-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1), (0,-1)]
for i in adj_list:
p = (pixel[0] + i[0], pixel[1] + i[1])
if p in self.node:
return self.node[p]
return False
def has_4_neighbor(self, pixel):
adj_list = [(-1,0), (0,1), (1,0), (0,-1)]
for i in adj_list:
p = (pixel[0] + i[0], pixel[1] + i[1])
if p in self.node:
return self.node[p]
return False
def _mark_node(self, pixel):
self.img[pixel] = (0,210,0)
#===============================================================================
class Node():
def __init__(self, pixel, cost=sys.maxsize):
self.in_arch = set()
self.out_arch = None
self.pixel = pixel
self.cost = cost
def has_in_arch(self):
if self.in_arch:
return True
return False
def has_out_arch(self):
if self.out_arch:
return True
return False
def get_in_arch(self):
if len(self.in_arch) > 0:
return list(self.in_arch)[0]
def get_out_arch(self):
return self.out_arch
def get_in_degree(self):
return len(self.in_arch)
def set_in_arch(self, node):
self.in_arch.add(node)
def set_out_arch(self, node):
self.out_arch = node
def remove_in_arch(self, node):
self.in_arch.discard(node)
def y(self):
return self.pixel[0]
def x(self):
return self.pixel[1]
def __lt__(self, other):
self.cost < other.cost
#===============================================================================
class PriorityQueue:
def __init__(self):
self.queue = []
self.nodes = {}
def put(self, item, priority):
if item in self.nodes:
pos = bisect.bisect_right(self.queue, [self.nodes[item], item])
del self.queue[pos-1]
bisect.insort_right(self.queue, [priority, item])
self.nodes[item] = priority
def pop(self):
if self.queue:
item = self.queue.pop(0)[1]
#print("queue:", len(self.queue))
#print("nodes:", len(self.nodes))
if item in self.nodes:
del self.nodes[item]
return item
raise KeyError('pop from an empty priority queue')
#===============================================================================
MAX_INT = sys.maxsize
#Main program
#~~~~~~~~~~~~
def main():
seeds = [(99,148), (165,139), (205,129), (246,123), (315,111), (379,103), (450,90)]
sinks = [(88,483), (160,466), (228,441), (291,427), (352,408), (403,390), (462,372)]
color = [(0,128,255),(0,255,128),(0,255,255),(0,0,255),(128,0,255),(255,255,0),(255,51,153)]
img = open_img(sys.argv)
cv2.imshow("teste", img)
cv2.waitKey(0)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
p2, p98 = np.percentile(img, (4, 98))
norm = skimage.exposure.rescale_intensity(gray, in_range=(p2, p98))
# sobel = filters.sobel(norm)
# sobel = skimage.img_as_ubyte(sobel)
# cv2.imshow("teste", sobel)
# cv2.waitKey(0)
#
# ret, thresh = cv2.threshold(sobel, 0, 255, cv2.THRESH_OTSU)
# cv2.imshow("teste", thresh)
# cv2.waitKey(0)
# paths = morphology.binary_closing(thresh, morphology.disk(3))
# paths = skimage.img_as_ubyte(paths)
# paths = 255-paths
paths = norm
cv2.imshow("teste", paths)
cv2.waitKey(0)
#paths += gray
#
# cv2.imshow("teste", paths)
# cv2.waitKey(0)
# paths = gray & paths
# cv2.imshow("teste", paths)
# cv2.waitKey(0)
#sys.exit()
neighborhood = get_neighborhood(4)
#for i in range(len(seeds)):
i = 6
G = Graph()
H = Graph()
Q = PriorityQueue()
pos = seeds[i]
G.set_img(img)
n = Node(pos, 0)
G.add_node(n)
Q.put(n, 0)
sink_count = len(sinks)
counter = 0
while sink_count != 0:
counter += 1
lowest = Q.pop()
G.add_visited(lowest.pixel)
ift(paths, lowest, neighborhood, G, Q)
#ift(paths, lowest, neighborhood)
if lowest.pixel in sinks:
sink_count -= 1
#if counter % 200 == 0:
#cv2.imwrite("img_anim_" + str(counter) + ".jpg", img)
build_path_graph(img, sinks, G, H)
find_correct_path(img, seeds[i], sinks, color[i], H)
print("round!")
cv2.imshow("teste", img)
k = cv2.waitKey(0)
if k & 0xFF == ord('q'):
sys.exit()
cv2.destroyAllWindows()
# Get a window centered on pixel p of size (2*k + 1)^2
#-----------------------------------------------------
def get_window(img, p, k):
lin = p[0]
col = p[1]
return img[lin-k:lin+k+1, col-k:col+k+1]
# Get local adjacency
#---------------------
def get_neighborhood(neighborhood):
if neighborhood == 4:
return [(-1,0), (0,1), (1,0), (0,-1)]
if neighborhood == 8:
return [(-1,-1), (-1,0), (-1,1), (0,1), (1,1), (1,0), (1,-1), (0,-1)]
#Image-Forest Transform
#---------------------------------
def ift(img, node, neighborhood, G, Q):
for i in neighborhood:
pixel = (node.y() + i[0], node.x() + i[1])
if not is_valid_pixel(img, pixel):
continue
if not G.is_visited(pixel):
n = Node(pixel) if not G.has_node(pixel) else G.get_node(pixel)
cost = node.cost + ((int(img[node.pixel]) + int(img[pixel]))/2)**5
#cost = abs(img[pixel] - img[node.pixel]) + img[pixel]
#print("node:", node.pixel, "V:", n.pixel, "custo:", cost, "custo_V:", n.cost)
if cost < n.cost:
n.cost = cost
G.set_arch(n, node)
Q.put(n, cost)
#print("=====================")
#-------------------------
def is_valid_pixel(img, pixel):
y = pixel[0]
x = pixel[1]
if y >= 0 and y < img.shape[0]:
if x >= 0 and x < img.shape[1]:
return True
return False
#Shows a path backwards, stating from sink
#-----------------------------------------
def view_path(img, sink):
x_points = []
y_points = []
node = G.get_node(sink)
while (node.has_out_arch()):
x_points.append(node.x())
y_points.append(node.y())
img[node.pixel] = (0,255,255)
node = node.get_out_arch()
return x_points, y_points
#---------------------------------------
def build_path_graph(img, sinks, G, H):
#counter = 0
for s in sinks:
node = G.get_node(s)
while (node.has_out_arch()):
p1 = node.pixel
p2 = node.get_out_arch().pixel
img[p1] = (0,255,215)
n1 = Node(p1) if not H.has_node(p1) else H.get_node(p1)
n2 = Node(p2) if not H.has_node(p2) else H.get_node(p2)
H.set_arch(n1, n2)
node = node.get_out_arch()
#counter += 1
#if counter % 25 == 0:
# cv2.imshow("teste", img)
# cv2.waitKey(0)
#cv2.imwrite("img_anim_" + str(counter) + ".jpg", img)
#--------------------------------------
def find_correct_path(img, seed, sinks, color, H):
node = H.get_node(seed)
counter = 0
while (node.pixel not in sinks):
best_node = node.get_in_arch()
if node.get_in_degree() > 1:
cv2.circle(img, (node.x(), node.y()), 5, (0,255,0))
best_cost = 0
for n in node.in_arch:
vec1, vec2 = get_vectors(node, n, 80)
cost = np.dot(vec1, vec2)
#print("cost_found:", cost)
if abs(cost) > best_cost:
best_cost = abs(cost)
best_node = n
img[node.pixel] = color
counter += 1
if counter % 10 == 0:
# cv2.imshow("teste", img)
# cv2.waitKey(0)
cv2.imwrite("img_anim_" + str(counter) + ".jpg", img)
node = best_node
#-------------------
def get_pixel_list(node, length, direction):
l = 0
x_list = []
y_list = []
while l < length:
x_list.append(node.x())
y_list.append(node.y())
node = node.get_in_arch() if direction == "in" else node.get_out_arch()
l += 1
return x_list, y_list
#-----------------
def get_vectors(node, pathway, length):
#gambiarra
for i in range(5):
node = node.get_out_arch()
if not node:
break
l = 0
last_pixel = node.pixel
while l < length/2:
first_pixel = node.pixel
node = node.get_out_arch()
if not node:
break
l += 1
vec1 = get_unit_vector(last_pixel, first_pixel)
l = 0
while l < length/2:
last_pixel = pathway.pixel
pathway = pathway.get_in_arch()
if not pathway:
break
l += 1
vec2 = get_unit_vector(last_pixel, first_pixel)
return vec1, vec2
#--------------------
def get_unit_vector(last_pixel, first_pixel):
vec = [last_pixel[1] - first_pixel[1], last_pixel[0] - first_pixel[0]]
norm = np.sqrt(vec[0]**2 + vec[1]**2)
return [vec[0]/norm, vec[1]/norm]
#----------------
def get_vector(node, length, direction):
l = 0
first_pixel = node.pixel
last_pixel = None
while l < length:
last_pixel = node.pixel
node = node.get_in_arch() if direction == "in" else node.get_out_arch()
l += 1
vector = [last_pixel[1]-first_pixel[1], last_pixel[0]-first_pixel[0]]
norm = np.sqrt(vector[0]**2 + vector[1]**2)
return [vector[0]/norm, vector[1]/norm]
# n = H.get_node(node.pixel)
# print(n.get_degree())
# if n.get_degree() > 1:
# cv2.circle(img, (n.pixel[1], n.pixel[0]), 5, (0,255,0))
#--------------------------
def line(x, a, b):
return a * x + b
# Open an image
#--------------
def open_img(argv):
if len(argv) > 2:
print("Usage: <this_program> <your_image>")
sys.exit()
elif len(argv) == 1:
img_name = input("Type in the name of the image: ")
return cv2.imread(img_name)
else:
return cv2.imread(argv[1])
#===============================================================================
if __name__ == "__main__":
main()
|
|
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test some utilities for working with JSON and PyMongo."""
import datetime
import json
import re
import sys
import uuid
from typing import Any, List, MutableMapping
sys.path[0:0] = [""]
from test import IntegrationTest, unittest
from bson import EPOCH_AWARE, EPOCH_NAIVE, SON, json_util
from bson.binary import (
ALL_UUID_REPRESENTATIONS,
MD5_SUBTYPE,
STANDARD,
USER_DEFINED_SUBTYPE,
Binary,
UuidRepresentation,
)
from bson.code import Code
from bson.dbref import DBRef
from bson.int64 import Int64
from bson.json_util import (
LEGACY_JSON_OPTIONS,
DatetimeRepresentation,
JSONMode,
JSONOptions,
)
from bson.max_key import MaxKey
from bson.min_key import MinKey
from bson.objectid import ObjectId
from bson.regex import Regex
from bson.timestamp import Timestamp
from bson.tz_util import FixedOffset, utc
STRICT_JSON_OPTIONS = JSONOptions(
strict_number_long=True,
datetime_representation=DatetimeRepresentation.ISO8601,
strict_uuid=True,
json_mode=JSONMode.LEGACY,
)
class TestJsonUtil(unittest.TestCase):
def round_tripped(self, doc, **kwargs):
return json_util.loads(json_util.dumps(doc, **kwargs), **kwargs)
def round_trip(self, doc, **kwargs):
self.assertEqual(doc, self.round_tripped(doc, **kwargs))
def test_basic(self):
self.round_trip({"hello": "world"})
def test_json_options_with_options(self):
opts = JSONOptions(
datetime_representation=DatetimeRepresentation.NUMBERLONG, json_mode=JSONMode.LEGACY
)
self.assertEqual(opts.datetime_representation, DatetimeRepresentation.NUMBERLONG)
opts2 = opts.with_options(
datetime_representation=DatetimeRepresentation.ISO8601, json_mode=JSONMode.LEGACY
)
self.assertEqual(opts2.datetime_representation, DatetimeRepresentation.ISO8601)
opts = JSONOptions(strict_number_long=True, json_mode=JSONMode.LEGACY)
self.assertEqual(opts.strict_number_long, True)
opts2 = opts.with_options(strict_number_long=False)
self.assertEqual(opts2.strict_number_long, False)
opts = json_util.CANONICAL_JSON_OPTIONS
self.assertNotEqual(opts.uuid_representation, UuidRepresentation.JAVA_LEGACY)
opts2 = opts.with_options(uuid_representation=UuidRepresentation.JAVA_LEGACY)
self.assertEqual(opts2.uuid_representation, UuidRepresentation.JAVA_LEGACY)
self.assertEqual(opts2.document_class, dict)
opts3 = opts2.with_options(document_class=SON)
self.assertEqual(opts3.uuid_representation, UuidRepresentation.JAVA_LEGACY)
self.assertEqual(opts3.document_class, SON)
def test_objectid(self):
self.round_trip({"id": ObjectId()})
def test_dbref(self):
self.round_trip({"ref": DBRef("foo", 5)})
self.round_trip({"ref": DBRef("foo", 5, "db")})
self.round_trip({"ref": DBRef("foo", ObjectId())})
# Check order.
self.assertEqual(
'{"$ref": "collection", "$id": 1, "$db": "db"}',
json_util.dumps(DBRef("collection", 1, "db")),
)
def test_datetime(self):
tz_aware_opts = json_util.DEFAULT_JSON_OPTIONS.with_options(tz_aware=True)
# only millis, not micros
self.round_trip(
{"date": datetime.datetime(2009, 12, 9, 15, 49, 45, 191000, utc)},
json_options=tz_aware_opts,
)
self.round_trip({"date": datetime.datetime(2009, 12, 9, 15, 49, 45, 191000)})
for jsn in [
'{"dt": { "$date" : "1970-01-01T00:00:00.000+0000"}}',
'{"dt": { "$date" : "1970-01-01T00:00:00.000000+0000"}}',
'{"dt": { "$date" : "1970-01-01T00:00:00.000+00:00"}}',
'{"dt": { "$date" : "1970-01-01T00:00:00.000000+00:00"}}',
'{"dt": { "$date" : "1970-01-01T00:00:00.000000+00"}}',
'{"dt": { "$date" : "1970-01-01T00:00:00.000Z"}}',
'{"dt": { "$date" : "1970-01-01T00:00:00.000000Z"}}',
'{"dt": { "$date" : "1970-01-01T00:00:00Z"}}',
'{"dt": {"$date": "1970-01-01T00:00:00.000"}}',
'{"dt": { "$date" : "1970-01-01T00:00:00"}}',
'{"dt": { "$date" : "1970-01-01T00:00:00.000000"}}',
'{"dt": { "$date" : "1969-12-31T16:00:00.000-0800"}}',
'{"dt": { "$date" : "1969-12-31T16:00:00.000000-0800"}}',
'{"dt": { "$date" : "1969-12-31T16:00:00.000-08:00"}}',
'{"dt": { "$date" : "1969-12-31T16:00:00.000000-08:00"}}',
'{"dt": { "$date" : "1969-12-31T16:00:00.000000-08"}}',
'{"dt": { "$date" : "1970-01-01T01:00:00.000+0100"}}',
'{"dt": { "$date" : "1970-01-01T01:00:00.000000+0100"}}',
'{"dt": { "$date" : "1970-01-01T01:00:00.000+01:00"}}',
'{"dt": { "$date" : "1970-01-01T01:00:00.000000+01:00"}}',
'{"dt": { "$date" : "1970-01-01T01:00:00.000000+01"}}',
]:
self.assertEqual(EPOCH_AWARE, json_util.loads(jsn, json_options=tz_aware_opts)["dt"])
self.assertEqual(EPOCH_NAIVE, json_util.loads(jsn)["dt"])
dtm = datetime.datetime(1, 1, 1, 1, 1, 1, 0, utc)
jsn = '{"dt": {"$date": -62135593139000}}'
self.assertEqual(dtm, json_util.loads(jsn, json_options=tz_aware_opts)["dt"])
jsn = '{"dt": {"$date": {"$numberLong": "-62135593139000"}}}'
self.assertEqual(dtm, json_util.loads(jsn, json_options=tz_aware_opts)["dt"])
# Test dumps format
pre_epoch = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000, utc)}
post_epoch = {"dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc)}
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "-62135593138990"}}}', json_util.dumps(pre_epoch)
)
self.assertEqual(
'{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}', json_util.dumps(post_epoch)
)
self.assertEqual(
'{"dt": {"$date": -62135593138990}}',
json_util.dumps(pre_epoch, json_options=LEGACY_JSON_OPTIONS),
)
self.assertEqual(
'{"dt": {"$date": 63075661010}}',
json_util.dumps(post_epoch, json_options=LEGACY_JSON_OPTIONS),
)
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "-62135593138990"}}}',
json_util.dumps(pre_epoch, json_options=STRICT_JSON_OPTIONS),
)
self.assertEqual(
'{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}',
json_util.dumps(post_epoch, json_options=STRICT_JSON_OPTIONS),
)
number_long_options = JSONOptions(
datetime_representation=DatetimeRepresentation.NUMBERLONG, json_mode=JSONMode.LEGACY
)
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "63075661010"}}}',
json_util.dumps(post_epoch, json_options=number_long_options),
)
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "-62135593138990"}}}',
json_util.dumps(pre_epoch, json_options=number_long_options),
)
# ISO8601 mode assumes naive datetimes are UTC
pre_epoch_naive = {"dt": datetime.datetime(1, 1, 1, 1, 1, 1, 10000)}
post_epoch_naive = {"dt": datetime.datetime(1972, 1, 1, 1, 1, 1, 10000)}
self.assertEqual(
'{"dt": {"$date": {"$numberLong": "-62135593138990"}}}',
json_util.dumps(pre_epoch_naive, json_options=STRICT_JSON_OPTIONS),
)
self.assertEqual(
'{"dt": {"$date": "1972-01-01T01:01:01.010Z"}}',
json_util.dumps(post_epoch_naive, json_options=STRICT_JSON_OPTIONS),
)
# Test tz_aware and tzinfo options
self.assertEqual(
datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc),
json_util.loads(
'{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}', json_options=tz_aware_opts
)["dt"],
)
self.assertEqual(
datetime.datetime(1972, 1, 1, 1, 1, 1, 10000, utc),
json_util.loads(
'{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}',
json_options=JSONOptions(tz_aware=True, tzinfo=utc),
)["dt"],
)
self.assertEqual(
datetime.datetime(1972, 1, 1, 1, 1, 1, 10000),
json_util.loads(
'{"dt": {"$date": "1972-01-01T01:01:01.010+0000"}}',
json_options=JSONOptions(tz_aware=False),
)["dt"],
)
self.round_trip(pre_epoch_naive, json_options=JSONOptions(tz_aware=False))
# Test a non-utc timezone
pacific = FixedOffset(-8 * 60, "US/Pacific")
aware_datetime = {"dt": datetime.datetime(2002, 10, 27, 6, 0, 0, 10000, pacific)}
self.assertEqual(
'{"dt": {"$date": "2002-10-27T06:00:00.010-0800"}}',
json_util.dumps(aware_datetime, json_options=STRICT_JSON_OPTIONS),
)
self.round_trip(
aware_datetime,
json_options=JSONOptions(json_mode=JSONMode.LEGACY, tz_aware=True, tzinfo=pacific),
)
self.round_trip(
aware_datetime,
json_options=JSONOptions(
datetime_representation=DatetimeRepresentation.ISO8601,
json_mode=JSONMode.LEGACY,
tz_aware=True,
tzinfo=pacific,
),
)
def test_regex_object_hook(self):
# Extended JSON format regular expression.
pat = "a*b"
json_re = '{"$regex": "%s", "$options": "u"}' % pat
loaded = json_util.object_hook(json.loads(json_re))
self.assertTrue(isinstance(loaded, Regex))
self.assertEqual(pat, loaded.pattern)
self.assertEqual(re.U, loaded.flags)
def test_regex(self):
for regex_instance in (re.compile("a*b", re.IGNORECASE), Regex("a*b", re.IGNORECASE)):
res = self.round_tripped({"r": regex_instance})["r"]
self.assertEqual("a*b", res.pattern)
res = self.round_tripped({"r": Regex("a*b", re.IGNORECASE)})["r"]
self.assertEqual("a*b", res.pattern)
self.assertEqual(re.IGNORECASE, res.flags)
unicode_options = re.I | re.M | re.S | re.U | re.X
regex = re.compile("a*b", unicode_options)
res = self.round_tripped({"r": regex})["r"]
self.assertEqual(unicode_options, res.flags)
# Some tools may not add $options if no flags are set.
res = json_util.loads('{"r": {"$regex": "a*b"}}')["r"]
self.assertEqual(0, res.flags)
self.assertEqual(
Regex(".*", "ilm"), json_util.loads('{"r": {"$regex": ".*", "$options": "ilm"}}')["r"]
)
# Check order.
self.assertEqual(
'{"$regularExpression": {"pattern": ".*", "options": "mx"}}',
json_util.dumps(Regex(".*", re.M | re.X)),
)
self.assertEqual(
'{"$regularExpression": {"pattern": ".*", "options": "mx"}}',
json_util.dumps(re.compile(b".*", re.M | re.X)),
)
self.assertEqual(
'{"$regex": ".*", "$options": "mx"}',
json_util.dumps(Regex(".*", re.M | re.X), json_options=LEGACY_JSON_OPTIONS),
)
def test_regex_validation(self):
non_str_types = [10, {}, []]
docs = [{"$regex": i} for i in non_str_types]
for doc in docs:
self.assertEqual(doc, json_util.loads(json.dumps(doc)))
doc = {"$regex": ""}
self.assertIsInstance(json_util.loads(json.dumps(doc)), Regex)
def test_minkey(self):
self.round_trip({"m": MinKey()})
def test_maxkey(self):
self.round_trip({"m": MaxKey()})
def test_timestamp(self):
dct = {"ts": Timestamp(4, 13)}
res = json_util.dumps(dct, default=json_util.default)
rtdct = json_util.loads(res)
self.assertEqual(dct, rtdct)
self.assertEqual('{"ts": {"$timestamp": {"t": 4, "i": 13}}}', res)
def test_uuid_default(self):
# Cannot directly encode native UUIDs with the default
# uuid_representation.
doc = {"uuid": uuid.UUID("f47ac10b-58cc-4372-a567-0e02b2c3d479")}
with self.assertRaisesRegex(ValueError, "cannot encode native uuid"):
json_util.dumps(doc)
legacy_jsn = '{"uuid": {"$uuid": "f47ac10b58cc4372a5670e02b2c3d479"}}'
expected = {"uuid": Binary(b"\xf4z\xc1\x0bX\xccCr\xa5g\x0e\x02\xb2\xc3\xd4y", 4)}
self.assertEqual(json_util.loads(legacy_jsn), expected)
def test_uuid(self):
doc = {"uuid": uuid.UUID("f47ac10b-58cc-4372-a567-0e02b2c3d479")}
uuid_legacy_opts = LEGACY_JSON_OPTIONS.with_options(
uuid_representation=UuidRepresentation.PYTHON_LEGACY
)
self.round_trip(doc, json_options=uuid_legacy_opts)
self.assertEqual(
'{"uuid": {"$uuid": "f47ac10b58cc4372a5670e02b2c3d479"}}',
json_util.dumps(doc, json_options=LEGACY_JSON_OPTIONS),
)
self.assertEqual(
'{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}',
json_util.dumps(
doc,
json_options=STRICT_JSON_OPTIONS.with_options(
uuid_representation=UuidRepresentation.PYTHON_LEGACY
),
),
)
self.assertEqual(
'{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}',
json_util.dumps(
doc,
json_options=JSONOptions(
strict_uuid=True, json_mode=JSONMode.LEGACY, uuid_representation=STANDARD
),
),
)
self.assertEqual(
doc,
json_util.loads(
'{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "03"}}',
json_options=uuid_legacy_opts,
),
)
for uuid_representation in set(ALL_UUID_REPRESENTATIONS) - {UuidRepresentation.UNSPECIFIED}:
options = JSONOptions(
strict_uuid=True, json_mode=JSONMode.LEGACY, uuid_representation=uuid_representation
)
self.round_trip(doc, json_options=options)
# Ignore UUID representation when decoding BSON binary subtype 4.
self.assertEqual(
doc,
json_util.loads(
'{"uuid": {"$binary": "9HrBC1jMQ3KlZw4CssPUeQ==", "$type": "04"}}',
json_options=options,
),
)
def test_uuid_uuid_rep_unspecified(self):
_uuid = uuid.uuid4()
options = JSONOptions(
strict_uuid=True,
json_mode=JSONMode.LEGACY,
uuid_representation=UuidRepresentation.UNSPECIFIED,
)
# Cannot directly encode native UUIDs with UNSPECIFIED.
doc = {"uuid": _uuid}
with self.assertRaises(ValueError):
json_util.dumps(doc, json_options=options)
# All UUID subtypes are decoded as Binary with UNSPECIFIED.
# subtype 3
doc = {"uuid": Binary(_uuid.bytes, subtype=3)}
ext_json_str = json_util.dumps(doc)
self.assertEqual(doc, json_util.loads(ext_json_str, json_options=options))
# subtype 4
doc = {"uuid": Binary(_uuid.bytes, subtype=4)}
ext_json_str = json_util.dumps(doc)
self.assertEqual(doc, json_util.loads(ext_json_str, json_options=options))
# $uuid-encoded fields
doc = {"uuid": Binary(_uuid.bytes, subtype=4)}
ext_json_str = json_util.dumps({"uuid": _uuid}, json_options=LEGACY_JSON_OPTIONS)
self.assertEqual(doc, json_util.loads(ext_json_str, json_options=options))
def test_binary(self):
bin_type_dict = {"bin": b"\x00\x01\x02\x03\x04"}
md5_type_dict = {
"md5": Binary(b" n7\x18\xaf\t/\xd1\xd1/\x80\xca\xe7q\xcc\xac", MD5_SUBTYPE)
}
custom_type_dict = {"custom": Binary(b"hello", USER_DEFINED_SUBTYPE)}
self.round_trip(bin_type_dict)
self.round_trip(md5_type_dict)
self.round_trip(custom_type_dict)
# Binary with subtype 0 is decoded into bytes in Python 3.
bin = json_util.loads('{"bin": {"$binary": "AAECAwQ=", "$type": "00"}}')["bin"]
self.assertEqual(type(bin), bytes)
# PYTHON-443 ensure old type formats are supported
json_bin_dump = json_util.dumps(bin_type_dict, json_options=LEGACY_JSON_OPTIONS)
self.assertIn('"$type": "00"', json_bin_dump)
self.assertEqual(
bin_type_dict, json_util.loads('{"bin": {"$type": 0, "$binary": "AAECAwQ="}}')
)
json_bin_dump = json_util.dumps(md5_type_dict, json_options=LEGACY_JSON_OPTIONS)
# Check order.
self.assertEqual(
'{"md5": {"$binary": "IG43GK8JL9HRL4DK53HMrA==", "$type": "05"}}', json_bin_dump
)
self.assertEqual(
md5_type_dict,
json_util.loads('{"md5": {"$type": 5, "$binary": "IG43GK8JL9HRL4DK53HMrA=="}}'),
)
json_bin_dump = json_util.dumps(custom_type_dict, json_options=LEGACY_JSON_OPTIONS)
self.assertIn('"$type": "80"', json_bin_dump)
self.assertEqual(
custom_type_dict,
json_util.loads('{"custom": {"$type": 128, "$binary": "aGVsbG8="}}'),
)
# Handle mongoexport where subtype >= 128
self.assertEqual(
128,
json_util.loads('{"custom": {"$type": "ffffff80", "$binary": "aGVsbG8="}}')[
"custom"
].subtype,
)
self.assertEqual(
255,
json_util.loads('{"custom": {"$type": "ffffffff", "$binary": "aGVsbG8="}}')[
"custom"
].subtype,
)
def test_code(self):
self.round_trip({"code": Code("function x() { return 1; }")})
code = Code("return z", z=2)
res = json_util.dumps(code)
self.assertEqual(code, json_util.loads(res))
# Check order.
self.assertEqual('{"$code": "return z", "$scope": {"z": 2}}', res)
no_scope = Code("function() {}")
self.assertEqual('{"$code": "function() {}"}', json_util.dumps(no_scope))
def test_undefined(self):
jsn = '{"name": {"$undefined": true}}'
self.assertIsNone(json_util.loads(jsn)["name"])
def test_numberlong(self):
jsn = '{"weight": {"$numberLong": "65535"}}'
self.assertEqual(json_util.loads(jsn)["weight"], Int64(65535))
self.assertEqual(json_util.dumps({"weight": Int64(65535)}), '{"weight": 65535}')
json_options = JSONOptions(strict_number_long=True, json_mode=JSONMode.LEGACY)
self.assertEqual(json_util.dumps({"weight": Int64(65535)}, json_options=json_options), jsn)
def test_loads_document_class(self):
# document_class dict should always work
self.assertEqual(
{"foo": "bar"},
json_util.loads('{"foo": "bar"}', json_options=JSONOptions(document_class=dict)),
)
self.assertEqual(
SON([("foo", "bar"), ("b", 1)]),
json_util.loads('{"foo": "bar", "b": 1}', json_options=JSONOptions(document_class=SON)),
)
class TestJsonUtilRoundtrip(IntegrationTest):
def test_cursor(self):
db = self.db
db.drop_collection("test")
docs: List[MutableMapping[str, Any]] = [
{"foo": [1, 2]},
{"bar": {"hello": "world"}},
{"code": Code("function x() { return 1; }")},
{"bin": Binary(b"\x00\x01\x02\x03\x04", USER_DEFINED_SUBTYPE)},
{"dbref": {"_ref": DBRef("simple", ObjectId("509b8db456c02c5ab7e63c34"))}},
]
db.test.insert_many(docs)
reloaded_docs = json_util.loads(json_util.dumps(db.test.find()))
for doc in docs:
self.assertTrue(doc in reloaded_docs)
if __name__ == "__main__":
unittest.main()
|
|
"""Zookeeper Partitioner Implementation
:Maintainer: None
:Status: Unknown
:class:`SetPartitioner` implements a partitioning scheme using
Zookeeper for dividing up resources amongst members of a party.
This is useful when there is a set of resources that should only be
accessed by a single process at a time that multiple processes
across a cluster might want to divide up.
Example Use-Case
----------------
- Multiple workers across a cluster need to divide up a list of queues
so that no two workers own the same queue.
"""
import logging
import os
import socket
from functools import partial
from kazoo.exceptions import KazooException
from kazoo.protocol.states import KazooState
from kazoo.recipe.watchers import PatientChildrenWatch
log = logging.getLogger(__name__)
class PartitionState(object):
"""High level partition state values
.. attribute:: ALLOCATING
The set needs to be partitioned, and may require an existing
partition set to be released before acquiring a new partition
of the set.
.. attribute:: ACQUIRED
The set has been partitioned and acquired.
.. attribute:: RELEASE
The set needs to be repartitioned, and the current partitions
must be released before a new allocation can be made.
.. attribute:: FAILURE
The set partition has failed. This occurs when the maximum
time to partition the set is exceeded or the Zookeeper session
is lost. The partitioner is unusable after this state and must
be recreated.
"""
ALLOCATING = "ALLOCATING"
ACQUIRED = "ACQUIRED"
RELEASE = "RELEASE"
FAILURE = "FAILURE"
class SetPartitioner(object):
"""Partitions a set amongst members of a party
This class will partition a set amongst members of a party such
that each member will be given zero or more items of the set and
each set item will be given to a single member. When new members
enter or leave the party, the set will be re-partitioned amongst
the members.
When the :class:`SetPartitioner` enters the
:attr:`~PartitionState.FAILURE` state, it is unrecoverable
and a new :class:`SetPartitioner` should be created.
Example:
.. code-block:: python
from kazoo.client import KazooClient
client = KazooClient()
qp = client.SetPartitioner(
path='/work_queues', set=('queue-1', 'queue-2', 'queue-3'))
while 1:
if qp.failed:
raise Exception("Lost or unable to acquire partition")
elif qp.release:
qp.release_set()
elif qp.acquired:
for partition in qp:
# Do something with each partition
elif qp.allocating:
qp.wait_for_acquire()
**State Transitions**
When created, the :class:`SetPartitioner` enters the
:attr:`PartitionState.ALLOCATING` state.
:attr:`~PartitionState.ALLOCATING` ->
:attr:`~PartitionState.ACQUIRED`
Set was partitioned successfully, the partition list assigned
is accessible via list/iter methods or calling list() on the
:class:`SetPartitioner` instance.
:attr:`~PartitionState.ALLOCATING` ->
:attr:`~PartitionState.FAILURE`
Allocating the set failed either due to a Zookeeper session
expiration, or failure to acquire the items of the set within
the timeout period.
:attr:`~PartitionState.ACQUIRED` ->
:attr:`~PartitionState.RELEASE`
The members of the party have changed, and the set needs to be
repartitioned. :meth:`SetPartitioner.release` should be called
as soon as possible.
:attr:`~PartitionState.ACQUIRED` ->
:attr:`~PartitionState.FAILURE`
The current partition was lost due to a Zookeeper session
expiration.
:attr:`~PartitionState.RELEASE` ->
:attr:`~PartitionState.ALLOCATING`
The current partition was released and is being re-allocated.
"""
def __init__(self, client, path, set, partition_func=None,
identifier=None, time_boundary=30):
"""Create a :class:`~SetPartitioner` instance
:param client: A :class:`~kazoo.client.KazooClient` instance.
:param path: The partition path to use.
:param set: The set of items to partition.
:param partition_func: A function to use to decide how to
partition the set.
:param identifier: An identifier to use for this member of the
party when participating. Defaults to the
hostname + process id.
:param time_boundary: How long the party members must be stable
before allocation can complete.
"""
self.state = PartitionState.ALLOCATING
self._client = client
self._path = path
self._set = set
self._partition_set = []
self._partition_func = partition_func or self._partitioner
self._identifier = identifier or '%s-%s' % (
socket.getfqdn(), os.getpid())
self._locks = []
self._lock_path = '/'.join([path, 'locks'])
self._party_path = '/'.join([path, 'party'])
self._time_boundary = time_boundary
self._acquire_event = client.handler.event_object()
# Create basic path nodes
client.ensure_path(path)
client.ensure_path(self._lock_path)
client.ensure_path(self._party_path)
# Join the party
self._party = client.ShallowParty(self._party_path,
identifier=self._identifier)
self._party.join()
self._was_allocated = False
self._state_change = client.handler.rlock_object()
client.add_listener(self._establish_sessionwatch)
# Now watch the party and set the callback on the async result
# so we know when we're ready
self._children_updated = False
self._child_watching(self._allocate_transition, async=True)
def __iter__(self):
"""Return the partitions in this partition set"""
for partition in self._partition_set:
yield partition
@property
def failed(self):
"""Corresponds to the :attr:`PartitionState.FAILURE` state"""
return self.state == PartitionState.FAILURE
@property
def release(self):
"""Corresponds to the :attr:`PartitionState.RELEASE` state"""
return self.state == PartitionState.RELEASE
@property
def allocating(self):
"""Corresponds to the :attr:`PartitionState.ALLOCATING`
state"""
return self.state == PartitionState.ALLOCATING
@property
def acquired(self):
"""Corresponds to the :attr:`PartitionState.ACQUIRED` state"""
return self.state == PartitionState.ACQUIRED
def wait_for_acquire(self, timeout=30):
"""Wait for the set to be partitioned and acquired
:param timeout: How long to wait before returning.
:type timeout: int
"""
self._acquire_event.wait(timeout)
def release_set(self):
"""Call to release the set
This method begins the step of allocating once the set has
been released.
"""
self._release_locks()
if self._locks: # pragma: nocover
# This shouldn't happen, it means we couldn't release our
# locks, abort
self._fail_out()
return
else:
with self._state_change:
if self.failed:
return
self.state = PartitionState.ALLOCATING
self._child_watching(self._allocate_transition, async=True)
def finish(self):
"""Call to release the set and leave the party"""
self._release_locks()
self._fail_out()
def _fail_out(self):
with self._state_change:
self.state = PartitionState.FAILURE
if self._party.participating:
try:
self._party.leave()
except KazooException: # pragma: nocover
pass
def _allocate_transition(self, result):
"""Called when in allocating mode, and the children settled"""
# Did we get an exception waiting for children to settle?
if result.exception: # pragma: nocover
self._fail_out()
return
children, async_result = result.get()
self._children_updated = False
# Add a callback when children change on the async_result
def updated(result):
with self._state_change:
if self.acquired:
self.state = PartitionState.RELEASE
self._children_updated = True
async_result.rawlink(updated)
# Split up the set
self._partition_set = self._partition_func(
self._identifier, list(self._party), self._set)
# Proceed to acquire locks for the working set as needed
for member in self._partition_set:
if self._children_updated or self.failed:
# Still haven't settled down, release locks acquired
# so far and go back
return self._abort_lock_acquisition()
lock = self._client.Lock(self._lock_path + '/' +
str(member))
try:
lock.acquire()
except KazooException: # pragma: nocover
return self.finish()
self._locks.append(lock)
# All locks acquired! Time for state transition, make sure
# we didn't inadvertently get lost thus far
with self._state_change:
if self.failed: # pragma: nocover
return self.finish()
self.state = PartitionState.ACQUIRED
self._acquire_event.set()
def _release_locks(self):
"""Attempt to completely remove all the locks"""
self._acquire_event.clear()
for lock in self._locks[:]:
try:
lock.release()
except KazooException: # pragma: nocover
# We proceed to remove as many as possible, and leave
# the ones we couldn't remove
pass
else:
self._locks.remove(lock)
def _abort_lock_acquisition(self):
"""Called during lock acquisition if a party change occurs"""
self._partition_set = []
self._release_locks()
if self._locks:
# This shouldn't happen, it means we couldn't release our
# locks, abort
self._fail_out()
return
return self._child_watching(self._allocate_transition)
def _child_watching(self, func=None, async=False):
"""Called when children are being watched to stabilize
This actually returns immediately, child watcher spins up a
new thread/greenlet and waits for it to stabilize before
any callbacks might run.
"""
watcher = PatientChildrenWatch(self._client, self._party_path,
self._time_boundary)
asy = watcher.start()
if func is not None:
# We spin up the function in a separate thread/greenlet
# to ensure that the rawlink's it might use won't be
# blocked
if async:
func = partial(self._client.handler.spawn, func)
asy.rawlink(func)
return asy
def _establish_sessionwatch(self, state):
"""Register ourself to listen for session events, we shut down
if we become lost"""
with self._state_change:
# Handle network partition: If connection gets suspended,
# change state to ALLOCATING if we had already ACQUIRED.
# This way the caller does not process the members since we
# could eventually lose session get repartitioned. If we got
# connected after a suspension it means we've not lost the
# session and still have our members. Hence, restore to ACQUIRED.
if state == KazooState.SUSPENDED:
if self.state == PartitionState.ACQUIRED:
self._was_allocated = True
self.state = PartitionState.ALLOCATING
elif state == KazooState.CONNECTED:
if self._was_allocated:
self._was_allocated = False
self.state = PartitionState.ACQUIRED
if state == KazooState.LOST:
self._client.handler.spawn(self._fail_out)
return True
def _partitioner(self, identifier, members, partitions):
# Ensure consistent order of partitions/members
all_partitions = sorted(partitions)
workers = sorted(members)
i = workers.index(identifier)
# Now return the partition list starting at our location and
# skipping the other workers
return all_partitions[i::len(workers)]
|
|
from autosar.writer.writer_base import ElementWriter
import autosar.portinterface
class XMLPortInterfaceWriter(ElementWriter):
def __init__(self,version, patch):
super().__init__(version, patch)
if self.version >= 3.0 and self.version < 4.0:
self.switcher = {
'SoftwareAddressMethod': self.writeSoftwareAddressMethodXML,
'SenderReceiverInterface': self.writeSenderReceiverInterfaceXML,
'ParameterInterface': self.writeCalPrmInterfaceXML,
'ClientServerInterface': self.writeClientServerInterfaceXML,
}
elif self.version >= 4.0:
self.switcher = {
'ClientServerInterface': self.writeClientServerInterfaceXML,
'ModeSwitchInterface': self.writeModeSwitchInterfaceXML,
'SenderReceiverInterface': self.writeSenderReceiverInterfaceXML,
'ParameterInterface': self.writeParameterInterfaceXML,
'SoftwareAddressMethod': self.writeSoftwareAddressMethodXML,
'NvDataInterface': self.writeNvDataInterfaceXML,
}
else:
switch.keys = {}
def getSupportedXML(self):
return self.switcher.keys()
def getSupportedCode(self):
return []
def writeElementXML(self, elem):
xmlWriteFunc = self.switcher.get(type(elem).__name__)
if xmlWriteFunc is not None:
return xmlWriteFunc(elem)
else:
return None
def writeElementCode(self, elem, localvars):
raise NotImplementedError('writeElementCode')
def writeSenderReceiverInterfaceXML(self, portInterface):
assert(isinstance(portInterface,autosar.portinterface.SenderReceiverInterface))
ws = portInterface.rootWS()
lines=[]
lines.append('<SENDER-RECEIVER-INTERFACE>')
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%portInterface.name,1))
descLines = self.writeDescXML(portInterface)
if descLines is not None:
lines.extend(self.indent(descLines,1))
if portInterface.adminData is not None:
lines.extend(self.indent(self.writeAdminDataXML(portInterface.adminData),1))
lines.append(self.indent('<IS-SERVICE>%s</IS-SERVICE>'%self.toBooleanStr(portInterface.isService),1))
if (self.version >= 4.0) and (portInterface.serviceKind is not None):
lines.append(self.indent('<SERVICE-KIND>%s</SERVICE-KIND>'%portInterface.serviceKind,1))
if len(portInterface.dataElements)>0:
lines.append(self.indent('<DATA-ELEMENTS>',1))
for elem in portInterface.dataElements:
lines.extend(self.indent(self.writeDataElementXML(elem),2))
lines.append(self.indent('</DATA-ELEMENTS>',1))
else:
lines.append(self.indent('<DATA-ELEMENTS/>',1))
if len(portInterface.modeGroups) > 0:
lines.append(self.indent('<MODE-GROUPS>',1))
for group in portInterface.modeGroups:
lines.extend(self.indent(self.writeModeGroupXML(group),2))
lines.append(self.indent('</MODE-GROUPS>',1))
if len(portInterface.invalidationPolicies)>0:
lines.append(self.indent('<INVALIDATION-POLICYS>',1))
for invalidationPolicy in portInterface.invalidationPolicies:
lines.extend(self.indent(self.writeInvalidationPolicyXML(ws, invalidationPolicy),2))
lines.append(self.indent('</INVALIDATION-POLICYS>',1))
lines.append('</SENDER-RECEIVER-INTERFACE>')
return lines
def writeCalPrmInterfaceXML(self, portInterface):
assert(isinstance(portInterface,autosar.portinterface.ParameterInterface))
lines=[]
lines.append('<CALPRM-INTERFACE>')
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%portInterface.name,1))
lines.append(self.indent('<IS-SERVICE>%s</IS-SERVICE>'%self.toBooleanStr(portInterface.isService),1))
if len(portInterface.elements)>0:
lines.append(self.indent('<CALPRM-ELEMENTS>',1))
for elem in portInterface.elements:
lines.extend(self.indent(self.writeCalParamElementXML(elem),2))
lines.append(self.indent('</CALPRM-ELEMENTS>',1))
else:
lines.append(self.indent('<CALPRM-ELEMENTS/>',1))
lines.append('</CALPRM-INTERFACE>')
return lines
def writeCalParamElementXML(self,elem):
assert(isinstance(elem,autosar.element.ParameterDataPrototype))
lines=[]
lines.append('<CALPRM-ELEMENT-PROTOTYPE>')
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%elem.name,1))
ws = elem.rootWS()
if elem.adminData is not None:
lines.extend(self.indent(self.writeAdminDataXML(elem.adminData),1))
if elem.swAddressMethodRef is not None:
lines.append(self.indent('<SW-DATA-DEF-PROPS>',1))
swAddrMethod = ws.find(elem.swAddressMethodRef)
if (swAddrMethod is None):
raise ValueError("invalid reference: '%s'"%elem.swAddressMethodRef)
else:
lines.append(self.indent('<SW-ADDR-METHOD-REF DEST="%s">%s</SW-ADDR-METHOD-REF>'%(swAddrMethod.tag(self.version),swAddrMethod.ref),2))
lines.append(self.indent('</SW-DATA-DEF-PROPS>',1))
typeElem = ws.find(elem.typeRef, role="DataType")
if (typeElem is None):
raise ValueError("invalid type reference: '%s'"%elem.typeRef)
else:
lines.append(self.indent('<TYPE-TREF DEST="%s">%s</TYPE-TREF>'%(typeElem.tag(self.version),typeElem.ref),1))
lines.append('</CALPRM-ELEMENT-PROTOTYPE>')
return lines
def writeParameterInterfaceXML(self, portInterface):
assert(isinstance(portInterface,autosar.portinterface.ParameterInterface))
lines=[]
ws = portInterface.rootWS()
assert(ws is not None)
lines.append('<%s>'%portInterface.tag(self.version))
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%portInterface.name,1))
if portInterface.adminData is not None:
lines.extend(self.indent(self.writeAdminDataXML(portInterface.adminData),1))
if len(portInterface.parameters)>0:
lines.append(self.indent('<PARAMETERS>',1))
for elem in portInterface.parameters:
lines.extend(self.indent(self._writeParameterElement(elem, ws),2))
lines.append(self.indent('</PARAMETERS>',1))
else:
lines.append(self.indent('<PARAMETERS/>',1))
lines.append('</%s>'%portInterface.tag(self.version))
return lines
def _writeParameterElement(self, parameter, ws):
assert(isinstance(parameter, autosar.element.ParameterDataPrototype))
lines=[]
lines.append('<%s>'%parameter.tag(self.version))
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%parameter.name,1))
lines.append(self.indent('<SW-DATA-DEF-PROPS>',1))
if parameter.swCalibrationAccess is None:
access = 'NOT-ACCESSIBLE'
else:
access = parameter.swCalibrationAccess
variants = [autosar.base.SwDataDefPropsConditional(swAddressMethodRef=parameter.swAddressMethodRef, swCalibrationAccess=access)]
lines.extend(self.indent(self.writeSwDataDefPropsVariantsXML(ws, variants),2))
lines.append(self.indent('</SW-DATA-DEF-PROPS>',1))
typeElem = ws.find(parameter.typeRef, role="DataType")
if (typeElem is None):
raise ValueError("invalid type reference: '%s'"%parameter.typeRef)
else:
lines.append(self.indent('<TYPE-TREF DEST="%s">%s</TYPE-TREF>'%(typeElem.tag(self.version),typeElem.ref),1))
lines.append('</%s>'%parameter.tag(self.version))
return lines
def writeClientServerInterfaceXML(self, portInterface):
assert(isinstance(portInterface,autosar.portinterface.ClientServerInterface))
lines=[]
lines.append('<CLIENT-SERVER-INTERFACE>')
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%portInterface.name,1))
descLines = self.writeDescXML(portInterface)
if descLines is not None:
lines.extend(self.indent(descLines,1))
if portInterface.adminData is not None:
lines.extend(self.indent(self.writeAdminDataXML(portInterface.adminData),1))
lines.append(self.indent('<IS-SERVICE>%s</IS-SERVICE>'%self.toBooleanStr(portInterface.isService),1))
if (portInterface.serviceKind is not None) and (self.version >= 4.0):
lines.append(self.indent('<SERVICE-KIND>%s</SERVICE-KIND>'%portInterface.serviceKind,1))
if len(portInterface.operations)>0:
lines.append(self.indent('<OPERATIONS>',1))
for operation in portInterface.operations:
lines.extend(self.indent(self.writeOperationXML(operation),2))
lines.append(self.indent('</OPERATIONS>',1))
else:
lines.append(self.indent('<OPERATIONS/>',1))
if len(portInterface.applicationErrors)>0:
lines.append(self.indent('<POSSIBLE-ERRORS>',1))
for applicationError in portInterface.applicationErrors:
lines.extend(self.indent(self.writeApplicationErrorXML(applicationError),2))
lines.append(self.indent('</POSSIBLE-ERRORS>',1))
lines.append('</CLIENT-SERVER-INTERFACE>')
return lines
def writeOperationXML(self,operation):
assert(isinstance(operation,autosar.portinterface.Operation))
ws = operation.rootWS()
assert(ws is not None)
lines=[]
lines.append('<%s>'%operation.tag(self.version))
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%operation.name,1))
descLines = self.writeDescXML(operation)
if descLines is not None:
lines.extend(self.indent(descLines,1))
if len(operation.arguments)>0:
lines.append(self.indent('<ARGUMENTS>',1))
for argument in operation.arguments:
lines.extend(self.indent(self.writeArgumentXML(ws, argument),2))
lines.append(self.indent('</ARGUMENTS>',1))
if len(operation.errorRefs)>0:
lines.append(self.indent('<POSSIBLE-ERROR-REFS>',1))
for errorRef in operation.errorRefs:
errorElem = ws.find(errorRef)
if (errorElem is None):
raise ValueError("invalid error reference: '%s'"%errorRef)
else:
lines.append(self.indent('<POSSIBLE-ERROR-REF DEST="%s">%s</POSSIBLE-ERROR-REF>'%(errorElem.tag(self.version),errorElem.ref),2))
lines.append(self.indent('</POSSIBLE-ERROR-REFS>',1))
lines.append('</%s>'%operation.tag(self.version))
return lines
def writeArgumentXML(self, ws, argument):
assert(isinstance(argument, autosar.portinterface.Argument))
lines=[]
lines.append('<%s>'%argument.tag(self.version))
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%argument.name,1))
if self.version >= 4.0:
descLines = self.writeDescXML(argument)
if descLines is not None:
lines.extend(self.indent(descLines,1))
lines.append(self.indent('<SW-DATA-DEF-PROPS>',1))
if argument.swCalibrationAccess is None:
tmp = 'NOT-ACCESSIBLE'
else:
tmp = argument.swCalibrationAccess
variants = [autosar.base.SwDataDefPropsConditional(swCalibrationAccess=tmp)]
lines.extend(self.indent(self.writeSwDataDefPropsVariantsXML(ws, variants),2))
lines.append(self.indent('</SW-DATA-DEF-PROPS>',1))
typeElem = ws.find(argument.typeRef, role="DataType")
if (typeElem is None):
raise ValueError("invalid type reference: '%s'"%argument.typeRef)
else:
lines.append(self.indent('<TYPE-TREF DEST="%s">%s</TYPE-TREF>'%(typeElem.tag(self.version),typeElem.ref),1))
lines.append(self.indent('<DIRECTION>%s</DIRECTION>'%argument.direction,1))
if self.version >= 4.0:
if argument.serverArgumentImplPolicy is not None:
policy = argument.serverArgumentImplPolicy
else:
policy = 'USE-ARGUMENT-TYPE'
lines.append(self.indent('<SERVER-ARGUMENT-IMPL-POLICY>%s</SERVER-ARGUMENT-IMPL-POLICY>' % (policy), 1))
lines.append('</%s>'%argument.tag(self.version))
return lines
def writeApplicationErrorXML(self,applicationError):
assert(isinstance(applicationError, autosar.portinterface.ApplicationError))
lines=[]
lines.append('<%s>'%applicationError.tag(self.version))
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%applicationError.name,1))
lines.append(self.indent('<ERROR-CODE>%d</ERROR-CODE>'%applicationError.errorCode,1))
lines.append('</%s>'%applicationError.tag(self.version))
return lines
def writeModeGroupXML(self,modeGroup):
assert(isinstance(modeGroup,autosar.mode.ModeGroup))
lines=[]
ws = modeGroup.rootWS()
assert(ws is not None)
lines.append('<%s>'%modeGroup.tag(self.version))
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%modeGroup.name,1))
typeElem = ws.find(modeGroup.typeRef)
if (typeElem is None):
raise ValueError("invalid type reference: '%s'"%modeGroup.typeRef)
else:
lines.append(self.indent('<TYPE-TREF DEST="%s">%s</TYPE-TREF>'%(typeElem.tag(self.version),typeElem.ref),1))
lines.append('</%s>'%modeGroup.tag(self.version))
return lines
def writeSoftwareAddressMethodXML(self, addressMethod):
assert(isinstance(addressMethod,autosar.element.SoftwareAddressMethod))
lines=[]
lines.append('<%s>'%addressMethod.tag(self.version))
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%addressMethod.name,1))
lines.append('</%s>'%addressMethod.tag(self.version))
return lines
def writeModeSwitchInterfaceXML(self, portInterface):
assert(isinstance(portInterface, autosar.portinterface.ModeSwitchInterface))
lines=[]
lines.append('<%s>'%portInterface.tag(self.version))
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%portInterface.name,1))
if portInterface.adminData is not None:
lines.extend(self.indent(self.writeAdminDataXML(portInterface.adminData),1))
lines.append(self.indent('<IS-SERVICE>%s</IS-SERVICE>'%self.toBooleanStr(portInterface.isService),1))
lines.extend(self.indent(self.writeModeGroupXML(portInterface.modeGroup),1))
lines.append('</%s>'%portInterface.tag(self.version))
return lines
def writeInvalidationPolicyXML(self, ws, invalidationPolicy):
assert(isinstance(invalidationPolicy, autosar.portinterface.InvalidationPolicy))
lines=[]
lines.append('<%s>'%invalidationPolicy.tag(self.version))
dataElement = ws.find(invalidationPolicy.dataElementRef, role="PortInterface")
if (dataElement is None):
raise ValueError("invalid type reference: '%s'"%invalidationPolicy.dataElementRef)
else:
lines.append(self.indent('<DATA-ELEMENT-REF DEST="%s">%s</DATA-ELEMENT-REF>'%(dataElement.tag(self.version), dataElement.ref),1))
lines.append(self.indent('<HANDLE-INVALID>%s</HANDLE-INVALID>'%invalidationPolicy.handleInvalid,1))
lines.append('</%s>'%invalidationPolicy.tag(self.version))
return lines
def writeNvDataInterfaceXML(self, portInterface):
assert(isinstance(portInterface,autosar.portinterface.NvDataInterface))
ws = portInterface.rootWS()
lines=[]
lines.append('<NV-DATA-INTERFACE>')
lines.append(self.indent('<SHORT-NAME>%s</SHORT-NAME>'%portInterface.name,1))
if portInterface.adminData is not None:
lines.extend(self.indent(self.writeAdminDataXML(portInterface.adminData),1))
lines.append(self.indent('<IS-SERVICE>%s</IS-SERVICE>'%self.toBooleanStr(portInterface.isService),1))
if (self.version >= 4.0) and (portInterface.serviceKind is not None):
lines.append(self.indent('<SERVICE-KIND>%s</SERVICE-KIND>'%portInterface.serviceKind,1))
if len(portInterface.nvDatas)>0:
lines.append(self.indent('<NV-DATAS>',1))
for elem in portInterface.nvDatas:
lines.extend(self.indent(self.writeDataElementXML(elem),2))
lines.append(self.indent('</NV-DATAS>',1))
else:
lines.append(self.indent('<NV-DATAS/>',1))
lines.append('</NV-DATA-INTERFACE>')
return lines
class CodePortInterfaceWriter(ElementWriter):
def __init__(self,version, patch):
super().__init__(version, patch)
if self.version >= 3.0 and self.version < 4.0:
self.switcher = {
'SoftwareAddressMethod': self.writeSoftwareAddressMethodCode,
'ModeDeclarationGroup': self.writeModeDeclarationGroupCode,
'SenderReceiverInterface': self.writeSenderReceiverInterfaceCode,
'ParameterInterface': self.writeParameterInterfaceCode,
'ClientServerInterface': self.writeClientServerInterfaceCode,
}
elif self.version >= 4.0:
self.switcher = {
}
else:
switch.keys = {}
def getSupportedXML(self):
return []
def getSupportedCode(self):
return self.switcher.keys()
def writeElementXML(self, elem):
raise NotImplementedError('writeElementXML')
def writeElementCode(self, elem, localvars):
codeWriteFunc = self.switcher.get(type(elem).__name__)
if codeWriteFunc is not None:
return codeWriteFunc(elem, localvars)
else:
return None
def writeSenderReceiverInterfaceCode(self, portInterface, localvars):
assert(isinstance(portInterface,autosar.portinterface.SenderReceiverInterface))
lines=[]
params=['"%s"'%portInterface.name]
if len(portInterface.dataElements)>1:
raise NotImplementedError('more than one data element in an interface not yet supported')
elif len(portInterface.dataElements)==1:
params.append(self.writeDataElementCode(portInterface.dataElements[0], localvars))
if portInterface.modeGroups is not None:
if len(portInterface.modeGroups)>1:
raise NotImplementedError("more then one modegroup not yet supported")
elif len(portInterface.modeGroups)==1:
params.append('modeGroups='+self.writeModeGroupCode(portInterface.modeGroups[0], localvars))
if portInterface.isService:
params.append('isService=True')
if portInterface.adminData is not None:
param = self.writeAdminDataCode(portInterface.adminData, localvars)
assert(len(param)>0)
params.append('adminData='+param)
lines.append('package.createSenderReceiverInterface(%s)'%(', '.join(params)))
return lines
def writeDataElementCode(self, elem, localvars):
ws = elem.rootWS()
assert(ws is not None)
dataType = ws.find(elem.typeRef, role="DataType")
if dataType is None:
raise ValueError('invalid reference: '+elem.typeRef)
#name
params=[repr(elem.name)]
#typeRef
if ws.roles['DataType'] is not None:
params.append(repr(dataType.name)) #use name only
else:
params.append(repr(dataType.ref)) #use full reference
if elem.isQueued:
params.append('True')
if elem.swAddressMethodRef is not None:
params.append('softwareAddressMethodRef="%s"'%elem.swAddressMethodRef)
if elem.adminData is not None:
param = self.writeAdminDataCode(elem.adminData, localvars)
assert(len(param)>0)
params.append('adminData='+param)
return 'autosar.DataElement(%s)'%(', '.join(params))
def writeParameterCode(self, elem, localvars):
ws = elem.rootWS()
assert(ws is not None)
dataType = ws.find(elem.typeRef, role="DataType")
if dataType is None:
raise ValueError('invalid reference: '+elem.typeRef)
#name
params=[repr(elem.name)]
#typeRef
if ws.roles['DataType'] is not None:
params.append(repr(dataType.name)) #use name only
else:
params.append(repr(dataType.ref)) #use full reference
if elem.swAddressMethodRef is not None:
params.append('swAddressMethodRef="%s"'%elem.swAddressMethodRef)
if elem.swCalibrationAccess is not None:
params.append('swCalibrationAccess="%s"'%elem.swCalibrationAccess)
if elem.adminData is not None:
param = self.writeAdminDataCode(elem.adminData, localvars)
assert(len(param)>0)
params.append('adminData='+param)
return 'autosar.Parameter(%s)'%(', '.join(params))
def writeModeGroupCode(self, modeGroup, localvars):
ws = modeGroup.rootWS()
assert(ws is not None)
dataType = ws.find(modeGroup.typeRef, role="ModeDclrGroup")
if dataType is None:
raise ValueError('invalid reference: '+modeGroup.typeRef)
params=['"%s"'%modeGroup.name, '"%s"'%dataType.ref]
return 'autosar.ModeGroup(%s)'%(', '.join(params))
def writeParameterInterfaceCode(self, portInterface, localvars):
assert(isinstance(portInterface,autosar.portinterface.ParameterInterface))
lines=[]
params=['"%s"'%portInterface.name]
if len(portInterface.elements)==1:
code=self.writeParameterCode(portInterface.elements[0], localvars)
if (portInterface.elements[0].adminData is not None) or len(portInterface.elements[0].swAddrMethodRefList)>0:
#this is going to be a long line, create separate dataElement variable
lines.append('parameter=%s'%code)
params.append('parameter')
else:
params.append(code)
elif len(portInterface.elements)>1:
raise NotImplementedError('more than one data element in an interface not yet supported')
if portInterface.isService:
params.append('isService=True')
if portInterface.adminData is not None:
param = self.writeAdminDataCode(portInterface.adminData, localvars)
assert(len(param)>0)
params.append('adminData='+param)
lines.append('package.createParameterInterface(%s)'%(', '.join(params)))
return lines
def writeClientServerInterfaceCode(self, portInterface, localvars):
lines=[]
ws = portInterface.rootWS()
assert(ws is not None)
params=['"%s"'%portInterface.name]
params2=[]
for operation in portInterface.operations:
params2.append('"%s"'%operation.name)
if len(params2)>3:
lines.extend(self.writeListCode("operationsList",params2))
params.append('operationsList')
else:
params.append('['+', '.join(params2)+']')
params2=[]
for error in portInterface.applicationErrors:
params2.append('autosar.ApplicationError("%s", %d)'%(error.name,error.errorCode))
if len(params2)>1:
lines.extend(self.writeListCode("errorsList",params2))
params.append('errorsList')
elif len(params2)==1:
params.append(params2[0])
if portInterface.isService:
params.append('isService=True')
if portInterface.adminData is not None:
param = self.writeAdminDataCode(portInterface.adminData, localvars)
assert(len(param)>0)
params.append('adminData='+param)
lines.append('portInterface=package.createClientServerInterface(%s)'%(', '.join(params)))
localvars['portInterface']=portInterface
for operation in portInterface.operations:
for argument in operation.arguments:
methodLookup={"IN": "createInArgument", "OUT": "createOutArgument", "INOUT": "createInOutArgument"}
dataType = ws.find(argument.typeRef)
if dataType is None:
raise ValueError("invalid reference: "+argument.typeRef)
lines.append('portInterface["%s"].%s("%s", "%s")'%(operation.name, methodLookup[argument.direction], argument.name, dataType.name))
params=[]
if len(operation.errorRefs)>0:
for ref in operation.errorRefs:
error = ws.find(ref)
if error is None:
raise ValueError("invalid reference: "+ref)
params.append('"%s"'%error.name)
lines.append('portInterface["%s"].possibleErrors = %s'%(operation.name, ', '.join(params)))
desc,descAttr=self.writeDescCode(operation)
if desc is not None:
lines.append('portInterface["%s"].desc = "%s"'%(operation.name, desc))
if descAttr is not None:
lines.append('portInterface["%s"].descAttr = "%s"'%(operation.name, descAttr))
return lines
def writeSoftwareAddressMethodCode(self, method, localvars):
lines=[]
lines.append('%s.createSoftwareAddressMethod("%s")'%('package', method.name))
return lines
def writeModeDeclarationGroupCode(self, declarationGroup, localvars):
lines=[]
params=['"%s"'%declarationGroup.name]
params2=[]
for item in declarationGroup.modeDeclarations:
params2.append('"%s"'%item.name)
if len(params2)>6:
lines.extend(self.writeListCode("modeDeclarationsList",params2))
params.append('modeDeclarationsList')
else:
params.append('['+', '.join(params2)+']')
assert(declarationGroup.initialModeRef is not None)
tmp=autosar.base.splitRef(declarationGroup.initialModeRef)
params.append('"%s"'%tmp[-1])
if declarationGroup.adminData is not None:
param = self.writeAdminDataCode(declarationGroup.adminData, localvars)
assert(len(param)>0)
params.append('adminData='+param)
lines.append('package.createModeDeclarationGroup(%s)'%(', '.join(params)))
return lines
|
|
# Copyright (c) Facebook, Inc. All Rights Reserved
"""
Processors for all downstream (ds) tasks.
"""
import json
import os
import pickle
import random
import math
import numpy as np
import torch
from collections import defaultdict
from .processor import (
MetaProcessor,
VideoProcessor,
TextProcessor,
Aligner,
MMAttentionMask2DProcessor,
)
from .how2processor import TextGenerationProcessor
# ------------- A General Aligner for all downstream tasks-----------------
class DSAligner(Aligner):
"""
Downstream (DS) aligner shared by all datasets.
"""
def __call__(self, video_id, video_feature, text_feature, wps=0.7):
# random sample a starting sec for video.
video_start = 0
video_end = min(len(video_feature), self.max_video_len)
# the whole sequence is a single clip.
video_clips = {"start": [video_start], "end": [video_end]}
text_feature = {
"cap": [text_feature],
"start": [video_start],
"end": [len(text_feature) / wps],
}
text_clip_indexs = [0]
vfeats, vmasks = self._build_video_seq(
video_feature, video_clips
)
caps, cmasks = self._build_text_seq(
text_feature, text_clip_indexs
)
return {
"caps": caps,
"cmasks": cmasks,
"vfeats": vfeats,
"vmasks": vmasks,
"video_id": video_id,
}
class NLGTextProcessor(TextProcessor):
"""
Also return the original text as ref.
"""
def __call__(self, text_id):
return super().__call__(text_id), text_id
class DSNLGAligner(DSAligner):
"""extend with the capability of 2d mask for generation."""
def __init__(self, config):
super().__init__(config)
self.attnmasker = MMAttentionMask2DProcessor()
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
self.bert_name, use_fast=self.use_fast,
bos_token="[CLS]", eos_token="[SEP]"
)
self.tokenizer = tokenizer
self.bos_token_id = tokenizer.bos_token_id
self.eos_token_id = tokenizer.eos_token_id
self.textgen = TextGenerationProcessor(tokenizer)
def __call__(self, video_id, video_feature, text_feature):
output = super().__call__(video_id, video_feature, text_feature[0])
if self.split == "test":
# output.update({"ref": text_feature[1]})
output.update({"ref": self.tokenizer.decode(
output["caps"], skip_special_tokens=True)})
text_label = output["caps"]
cmasks = torch.BoolTensor([1] * text_label.size(0))
caps = torch.LongTensor([
self.cls_token_id,
self.sep_token_id,
self.bos_token_id])
else:
caps, text_label = self.textgen(output["caps"])
cmasks = output["cmasks"]
attention_mask = self.attnmasker(
output["vmasks"], cmasks, "textgen")
output.update({
"caps": caps,
"cmasks": cmasks,
"text_label": text_label,
"attention_mask": attention_mask,
})
return output
# -------------------- MSRVTT ------------------------
class MSRVTTMetaProcessor(MetaProcessor):
"""MSRVTT dataset.
reference: `howto100m/msrvtt_dataloader.py`
"""
def __init__(self, config):
super().__init__(config)
import pandas as pd
data = pd.read_csv(self._get_split_path(config))
# TODO: add a text1ka flag.
if config.split == "train" \
and config.full_test_path is not None \
and config.jsfusion_path is not None:
# add testing videos from full_test_path not used by jfusion.
additional_data = pd.read_csv(config.full_test_path)
jsfusion_data = pd.read_csv(config.jsfusion_path)
for video_id in additional_data["video_id"]:
if video_id not in jsfusion_data["video_id"].values:
data = data.append(
{"video_id": video_id}, ignore_index=True)
if config.dup is not None and config.split == "train":
data = data.append([data] * (config.dup - 1), ignore_index=True)
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
"""slightly modify with if condition to combine train/test."""
vid, sentence = None, None
vid = self.data["video_id"].values[idx]
if "sentence" in self.data: # for testing.
sentence = self.data["sentence"].values[idx]
else: # for training.
sentence = vid
return vid, sentence
class MSRVTTTextProcessor(TextProcessor):
"""MSRVTT dataset.
reference: `msrvtt_dataloader.py` `MSRVTT_TrainDataLoader`.
TODO (huxu): add max_words.
"""
def __init__(self, config):
super().__init__(config)
self.sentences = None
if config.json_path is not None and config.split == "train":
with open(config.json_path) as fd:
self.data = json.load(fd)
self.sentences = defaultdict(list)
for s in self.data["sentences"]:
self.sentences[s["video_id"]].append(s["caption"])
def __call__(self, text_id):
if self.sentences is not None:
rind = random.randint(0, len(self.sentences[text_id]) - 1)
sentence = self.sentences[text_id][rind]
else:
sentence = text_id
caption = self.tokenizer(sentence, add_special_tokens=False)
return caption["input_ids"]
class MSRVTTNLGTextProcessor(MSRVTTTextProcessor):
"""TODO: change dsaligner and merge to avoid any NLG text processor."""
def __call__(self, text_id):
if self.sentences is not None:
rind = random.randint(0, len(self.sentences[text_id]) - 1)
sentence = self.sentences[text_id][rind]
else:
sentence = text_id
caption = self.tokenizer(sentence, add_special_tokens=False)
return caption["input_ids"], sentence
class MSRVTTQAMetaProcessor(MetaProcessor):
"""MSRVTT-QA: retrieval-based multi-choice QA from JSFusion dataset.
For simplicity, we use the train retrieval model.
reference: `https://github.com/yj-yu/lsmdc`
"""
def __init__(self, config):
super().__init__(config)
import pandas as pd
csv_data = pd.read_csv(self._get_split_path(config), sep="\t")
data = []
for video_id, a1, a2, a3, a4, a5, answer in zip(
csv_data["vid_key"].values,
csv_data["a1"].values,
csv_data["a2"].values,
csv_data["a3"].values,
csv_data["a4"].values,
csv_data["a5"].values,
csv_data["answer"].values):
video_id = video_id.replace("msr", "video")
data.append((video_id, (answer, [a1, a2, a3, a4, a5])))
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
class MSRVTTQATextProcessor(TextProcessor):
"""MSRVTT-QA dataset.
text_ans is of format `(answer, [a1, a2, a3, a4, a5])`.
"""
def __call__(self, text_ans):
for ans_idx, ans in enumerate(text_ans[1]):
if isinstance(ans, str):
text_ans[1][ans_idx] = self.tokenizer(ans, add_special_tokens=False)["input_ids"]
return text_ans
class MSRVTTQAAligner(DSAligner):
"""MSRVTT dataset.
similar to sample in how2.
we call __call__ multiple times.
"""
def __call__(self, video_id, video_feature, text_feature, wps=0.7):
caps = []
cmasks = []
answer = text_feature[0]
for ans_idx, _text_feature in enumerate(text_feature[1]):
output = super().__call__(
video_id, video_feature, _text_feature, wps)
caps.append(output["caps"])
cmasks.append(output["cmasks"])
output.update({
"caps": torch.stack(caps),
"cmasks": torch.stack(cmasks),
"answers": torch.LongTensor([answer]),
})
return output
# -------------------- Youcook -----------------------
class YoucookMetaProcessor(MetaProcessor):
"""Youcook dataset.
reference: `howto100m/youcook_dataloader.py`
note that the data can be different as the
(1) some videos already in Howto100m are removed.
(2) stop words are removed from caption
TODO (huxu): make a flag to load the original caption.
(see youcookii_annotations_trainval.json).
The max_video_len can be 264 and text can be 64 tokens.
In reality we may not need that long. see projects/task/youcook.yaml
"""
def __init__(self, config):
super().__init__(config)
vfeat_dir = config.vfeat_dir
print(self._get_split_path(config))
with open(self._get_split_path(config), "rb") as fd:
data = pickle.load(fd)
all_valid_video_ids = set(
[os.path.splitext(fn)[0] for fn in os.listdir(vfeat_dir)]
)
recs = []
video_ids = set()
valid_video_ids = set()
for rec in data: # filter videos not available.
udl_idx = rec["id"].rindex("_")
video_id = rec["id"][:udl_idx]
video_ids.add(video_id)
if video_id in all_valid_video_ids:
valid_video_ids.add(video_id)
recs.append(rec)
print("total video_ids in .pkl", len(video_ids))
print("valid video_ids in .pkl", len(valid_video_ids))
print("please verify {train,val}_list.txt")
data = recs
self.data = data
with open(config.trainval_annotation) as fd:
self.youcook_annotation = json.load(fd)["database"]
if config.use_annotation_text is True:
print("using text in annotation.")
self.use_annotation_caption = True
else:
self.use_annotation_caption = False
def __getitem__(self, idx):
def _get_video_and_caption(rec):
vid = rec["id"]
udl_idx = vid.rindex("_")
video_id, clip_id = vid[:udl_idx], int(vid[udl_idx + 1:])
clip = self.youcook_annotation[video_id]["annotations"][clip_id]
start, end = clip["segment"]
if self.use_annotation_caption:
caption = clip["sentence"]
else:
caption = rec["caption"]
return (video_id, start, end), caption
rec = self.data[idx]
video_info, text_info = _get_video_and_caption(rec)
return video_info, text_info
class YoucookVideoProcessor(VideoProcessor):
"""video_fn is a tuple of (video_id, start, end) now."""
def __call__(self, video_fn):
video_id, start, end = video_fn
feat = np.load(os.path.join(self.vfeat_dir, video_id + ".npy"))
return feat[start:end]
class YoucookNLGMetaProcessor(MetaProcessor):
"""NLG uses the original split:
`train_list.txt` and `val_list.txt`
"""
def __init__(self, config):
super().__init__(config)
vfeat_dir = config.vfeat_dir
print(self._get_split_path(config))
with open(self._get_split_path(config)) as fd:
video_ids = [
line.strip().split("/")[1] for line in fd.readlines()]
print("total video_ids in train/val_list.txt", len(video_ids))
all_valid_video_ids = set(
[os.path.splitext(fn)[0] for fn in os.listdir(vfeat_dir)]
)
video_ids = [
video_id for video_id in video_ids
if video_id in all_valid_video_ids]
print("valid video_ids in train/val_list.txt", len(video_ids))
with open(config.trainval_annotation) as fd:
self.youcook_annotation = json.load(fd)["database"]
data = []
for video_id in video_ids:
for clip in self.youcook_annotation[video_id]["annotations"]:
start, end = clip["segment"]
caption = clip["sentence"]
data.append(((video_id, start, end), caption))
self.data = data
def __getitem__(self, idx):
return self.data[idx]
# --------------------- CrossTask -------------------------
class CrossTaskMetaProcessor(MetaProcessor):
def __init__(self, config):
super().__init__(config)
np.random.seed(0) # deterministic random split.
task_vids = self._get_vids(
config.train_csv_path,
config.vfeat_dir,
config.annotation_path)
val_vids = self._get_vids(
config.val_csv_path,
config.vfeat_dir,
config.annotation_path)
# filter out those task and vids appear in val_vids.
task_vids = {
task: [
vid for vid in vids
if task not in val_vids or vid not in val_vids[task]]
for task, vids in task_vids.items()}
primary_info = self._read_task_info(config.primary_path)
test_tasks = set(primary_info['steps'].keys())
# if args.use_related:
related_info = self._read_task_info(config.related_path)
task_steps = {**primary_info['steps'], **related_info['steps']}
n_steps = {**primary_info['n_steps'], **related_info['n_steps']}
# else:
# task_steps = primary_info['steps']
# n_steps = primary_info['n_steps']
all_tasks = set(n_steps.keys())
# filter and keep task in primary or related.
task_vids = {
task: vids for task, vids in task_vids.items()
if task in all_tasks}
# vocab-by-step matrix (A) and vocab (M)
# (huxu): we do not use BoW.
# A, M = self._get_A(task_steps, share="words")
train_vids, test_vids = self._random_split(
task_vids, test_tasks, config.n_train)
print("train_num_videos", sum(len(vids) for vids in train_vids.values()))
print("test_num_videos", sum(len(vids) for vids in test_vids.values()))
# added by huxu to automatically determine the split.
split_map = {
"train": train_vids,
"valid": test_vids,
"test": test_vids
}
task_vids = split_map[config.split]
self.vids = []
for task, vids in task_vids.items():
self.vids.extend([(task, vid) for vid in vids])
self.task_steps = task_steps
self.n_steps = n_steps
def __getitem__(self, idx):
task, vid = self.vids[idx]
n_steps = self.n_steps[task]
steps = self.task_steps[task]
assert len(steps) == n_steps
return (task, vid, steps, n_steps), (task, vid, steps, n_steps)
def __len__(self):
return len(self.vids)
def _random_split(self, task_vids, test_tasks, n_train):
train_vids = {}
test_vids = {}
for task, vids in task_vids.items():
if task in test_tasks and len(vids) > n_train:
train_vids[task] = np.random.choice(
vids, n_train, replace=False).tolist()
test_vids[task] = [
vid for vid in vids if vid not in train_vids[task]]
else:
train_vids[task] = vids
return train_vids, test_vids
def _get_vids(self, path, vfeat_dir, annotation_path):
"""refactored from
https://github.com/DmZhukov/CrossTask/blob/master/data.py
changes: add `vfeat_dir` to check if the video is available.
add `annotation_path` to check if the video is available.
"""
task_vids = {}
with open(path, 'r') as f:
for line in f:
task, vid, url = line.strip().split(',')
# double check the video is available.
if not os.path.exists(
os.path.join(vfeat_dir, vid + ".npy")):
continue
# double check the annotation is available.
if not os.path.exists(os.path.join(
annotation_path,
task + "_" + vid + ".csv")):
continue
if task not in task_vids:
task_vids[task] = []
task_vids[task].append(vid)
return task_vids
def _read_task_info(self, path):
titles = {}
urls = {}
n_steps = {}
steps = {}
with open(path, 'r') as f:
idx = f.readline()
while idx != '':
idx = idx.strip()
titles[idx] = f.readline().strip()
urls[idx] = f.readline().strip()
n_steps[idx] = int(f.readline().strip())
steps[idx] = f.readline().strip().split(',')
next(f)
idx = f.readline()
return {
'title': titles,
'url': urls,
'n_steps': n_steps,
'steps': steps
}
def _get_A(self, task_steps, share="words"):
raise ValueError("running get_A is not allowed for BERT.")
"""Step-to-component matrices."""
if share == 'words':
# share words
task_step_comps = {
task: [step.split(' ') for step in steps]
for task, steps in task_steps.items()}
elif share == 'task_words':
# share words within same task
task_step_comps = {
task: [[task+'_'+tok for tok in step.split(' ')] for step in steps]
for task, steps in task_steps.items()}
elif share == 'steps':
# share whole step descriptions
task_step_comps = {
task: [[step] for step in steps] for task, steps in task_steps.items()}
else:
# no sharing
task_step_comps = {
task: [[task+'_'+step] for step in steps]
for task, steps in task_steps.items()}
# BERT tokenizer here?
vocab = []
for task, steps in task_step_comps.items():
for step in steps:
vocab.extend(step)
vocab = {comp: m for m, comp in enumerate(set(vocab))}
M = len(vocab)
A = {}
for task, steps in task_step_comps.items():
K = len(steps)
a = torch.zeros(M, K)
for k, step in enumerate(steps):
a[[vocab[comp] for comp in step], k] = 1
a /= a.sum(dim=0)
A[task] = a
return A, M
class CrossTaskVideoProcessor(VideoProcessor):
def __call__(self, video_fn):
task, vid, steps, n_steps = video_fn
video_fn = os.path.join(self.vfeat_dir, vid + ".npy")
feat = np.load(video_fn)
return feat
class CrossTaskTextProcessor(TextProcessor):
def __call__(self, text_id):
task, vid, steps, n_steps = text_id
step_ids = []
for step_str in steps:
step_ids.append(
self.tokenizer(step_str, add_special_tokens=False)["input_ids"]
)
return step_ids
class CrossTaskAligner(Aligner):
"""
TODO: it's not clear yet the formulation of the task; finish this later.
"""
def __init__(self, config):
super().__init__(config)
self.annotation_path = config.annotation_path
self.sliding_window = config.sliding_window
self.sliding_window_size = config.sliding_window_size
def __call__(self, video_id, video_feature, text_feature):
task, vid, steps, n_steps = video_id
annot_path = os.path.join(
self.annotation_path, task + '_' + vid + '.csv')
video_len = len(video_feature)
labels = torch.from_numpy(self._read_assignment(
video_len, n_steps, annot_path)).float()
vfeats, vmasks, targets = [], [], []
# sliding window on video features and targets.
for window_start in range(0, video_len, self.sliding_window):
video_start = 0
video_end = min(video_len - window_start, self.sliding_window_size)
video_clip = {"start": [video_start], "end": [video_end]}
vfeat, vmask = self._build_video_seq(
video_feature[window_start: window_start + video_end],
video_clip
)
target = labels[window_start: window_start + video_end]
assert len(vfeat) >= len(target), "{},{}".format(len(vfeat), len(target))
# TODO: randomly drop all zero targets for training ?
# if self.split == "train" and target.sum() == 0:
# continue
vfeats.append(vfeat)
vmasks.append(vmask)
targets.append(target)
if (video_len - window_start) <= self.sliding_window_size:
break
vfeats = torch.stack(vfeats)
vmasks = torch.stack(vmasks)
targets = torch.cat(targets, dim=0)
caps, cmasks = [], []
for step in text_feature:
step_text_feature = {"start": [0], "end": [1], "cap": [step]}
step_text_clip_index = [0]
cap, cmask = self._build_text_seq(
step_text_feature, step_text_clip_index
)
caps.append(cap)
cmasks.append(cmask)
caps = torch.stack(caps)
cmasks = torch.stack(cmasks)
return {
"caps": caps,
"cmasks": cmasks,
"vfeats": vfeats, # X for original code.
"vmasks": vmasks,
"targets": targets,
"video_id": vid,
"task": task,
"video_len": video_len # for later checking.
}
def _read_assignment(self, T, K, path):
"""
refactored from https://github.com/DmZhukov/CrossTask/blob/master/data.py
Howto interpret contraints on loss that is going to be minimized:
lambd is a big number;
self.lambd * C is a big number for all valid position (csv stores invalids)
def forward(self, O, Y, C):
return (Y*(self.lambd * C - self.lsm(O))).mean(dim=0).sum()
This will load the csv file and fill-in the step col from start to end rows.
"""
Y = np.zeros([T, K], dtype=np.uint8)
with open(path, 'r') as f:
for line in f:
step, start, end = line.strip().split(',')
start = int(math.floor(float(start)))
end = int(math.ceil(float(end)))
step = int(step) - 1
Y[start:end, step] = 1
return Y
# --------------------- COIN -------------------------
class MetaTextBinarizer(Aligner):
def __call__(self, text_feature):
text_feature = {
"cap": [text_feature],
"start": [0.],
"end": [100.],
}
text_clip_indexs = [0]
caps, cmasks = self._build_text_seq(
text_feature, text_clip_indexs
)
return {"caps": caps, "cmasks": cmasks}
class COINActionSegmentationMetaProcessor(MetaProcessor):
split_map = {
"train": "training",
"valid": "testing",
"test": "testing",
}
def __init__(self, config):
super().__init__(config)
with open(self._get_split_path(config)) as fr:
database = json.load(fr)["database"]
id2label = {}
data = []
# filter the data by split.
for video_id, rec in database.items():
# always use testing to determine label_set
if rec["subset"] == "testing":
for segment in rec["annotation"]:
id2label[int(segment["id"])] = segment["label"]
# text_labels is used for ZS setting
self.text_labels = ["none"] * len(id2label)
for label_id in id2label:
self.text_labels[label_id-1] = id2label[label_id]
id2label[0] = "O"
print("num of labels", len(id2label))
for video_id, rec in database.items():
if not os.path.isfile(os.path.join(config.vfeat_dir, video_id + ".npy")):
continue
if rec["subset"] == COINActionSegmentationMetaProcessor.split_map[self.split]:
starts, ends, labels = [], [], []
for segment in rec["annotation"]:
start, end = segment["segment"]
label = int(segment["id"])
starts.append(start)
ends.append(end)
labels.append(label)
data.append(
(video_id, {"start": starts, "end": ends, "label": labels}))
self.data = data
def meta_text_labels(self, config):
from transformers import default_data_collator
from ..utils import get_local_rank
text_processor = TextProcessor(config)
binarizer = MetaTextBinarizer(config)
# TODO: add prompts to .yaml.
text_labels = [label for label in self.text_labels]
if get_local_rank() == 0:
print(text_labels)
outputs = []
for text_label in text_labels:
text_feature = text_processor(text_label)
outputs.append(binarizer(text_feature))
return default_data_collator(outputs)
def __getitem__(self, idx):
return self.data[idx]
class COINActionSegmentationTextProcessor(TextProcessor):
def __call__(self, text_label):
return text_label
class COINActionSegmentationAligner(Aligner):
def __init__(self, config):
super().__init__(config)
self.sliding_window = config.sliding_window
self.sliding_window_size = config.sliding_window_size
def __call__(self, video_id, video_feature, text_feature):
starts, ends, label_ids = text_feature["start"], text_feature["end"], text_feature["label"]
# sliding window.
video_len = len(video_feature)
vfeats, vmasks, targets = [], [], []
# sliding window on video features and targets.
for window_start in range(0, video_len, self.sliding_window):
video_start = 0
video_end = min(video_len - window_start, self.sliding_window_size)
video_clip = {"start": [video_start], "end": [video_end]}
vfeat, vmask = self._build_video_seq(
video_feature[window_start: window_start + video_end],
video_clip
)
# covers video length only.
target = torch.full_like(vmask, -100, dtype=torch.long)
target[vmask] = 0
for start, end, label_id in zip(starts, ends, label_ids):
if (window_start < end) and (start < (window_start + video_end)):
start_offset = max(0, math.floor(start) - window_start)
end_offset = min(video_end, math.ceil(end) - window_start)
target[start_offset:end_offset] = label_id
vfeats.append(vfeat)
vmasks.append(vmask)
targets.append(target)
if (video_len - window_start) <= self.sliding_window_size:
break
vfeats = torch.stack(vfeats)
vmasks = torch.stack(vmasks)
targets = torch.stack(targets)
video_targets = torch.full((video_len,), 0)
for start, end, label_id in zip(starts, ends, label_ids):
start_offset = max(0, math.floor(start))
end_offset = min(video_len, math.ceil(end))
video_targets[start_offset:end_offset] = label_id
caps = torch.LongTensor(
[[self.cls_token_id, self.sep_token_id,
self.pad_token_id, self.sep_token_id]],
).repeat(vfeats.size(0), 1)
cmasks = torch.BoolTensor(
[[0, 1, 0, 1]] # pad are valid for attention.
).repeat(vfeats.size(0), 1)
return {
"caps": caps,
"cmasks": cmasks,
"vfeats": vfeats, # X for original code.
"vmasks": vmasks,
"targets": targets,
"video_id": video_id,
"video_len": video_len, # for later checking.
"video_targets": video_targets
}
class DiDeMoMetaProcessor(MetaProcessor):
"""reference: https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/eval.py
https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/data_processing.py
"""
def __init__(self, config):
super().__init__(config)
assert "test" in self._get_split_path(config), "DiDeMo only supports zero-shot testing for now."
with open(self._get_split_path(config)) as data_file:
json_data = json.load(data_file)
data = []
for record in json_data:
data.append((record["video"], record["description"]))
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
class DiDeMoTextProcessor(TextProcessor):
"""reference: https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/eval.py
https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/data_processing.py
"""
def __call__(self, text):
return self.tokenizer(text, add_special_tokens=False)["input_ids"]
class DiDeMoAligner(DSAligner):
"""
check video length.
"""
def __call__(self, video_id, video_feature, text_feature):
# print(video_feature.shape[0])
return super().__call__(video_id, video_feature, text_feature)
|
|
from sqlalchemy.orm import subqueryload, joinedload
from sqlalchemy.orm.exc import NoResultFound
from dataactcore.models.baseInterface import BaseInterface
from dataactvalidator.models.validationModels import TASLookup, Rule, RuleType, FileColumn, FileType ,FieldType, MultiFieldRule, MultiFieldRuleType, RuleTiming
from dataactvalidator.filestreaming.fieldCleaner import FieldCleaner
from dataactcore.config import CONFIG_DB
class ValidatorValidationInterface(BaseInterface):
""" Manages all interaction with the validation database """
dbConfig = CONFIG_DB
dbName = dbConfig['validator_db_name']
Session = None
engine = None
session = None
def __init__(self):
self.dbName = self.dbConfig['validator_db_name']
super(ValidatorValidationInterface, self).__init__()
@classmethod
def getCredDict(cls):
""" Return db credentials. """
credDict = {
'username': CONFIG_DB['username'],
'password': CONFIG_DB['password'],
'host': CONFIG_DB['host'],
'port': CONFIG_DB['port'],
'dbBaseName': CONFIG_DB['base_db_name']
}
return credDict
@staticmethod
def getDbName():
""" Return database name"""
return ValidatorValidationInterface.dbName
def getSession(self):
return self.session
def deleteTAS(self) :
"""
Removes the TAS table
"""
queryResult = self.session.query(TASLookup).delete(synchronize_session='fetch')
self.session.commit()
def addTAS(self,ata,aid,bpoa,epoa,availability,main,sub):
"""
Add a TAS to the validation database if it does not exist.
This method can be slow.
Args:
ata -- allocation transfer agency
aid -- agency identifier
bpoa -- beginning period of availability
epoa -- ending period of availability
availability -- availability type code
main -- main account code
sub -- sub account code
"""
queryResult = self.session.query(TASLookup).\
filter(TASLookup.allocation_transfer_agency == ata).\
filter(TASLookup.agency_identifier == aid).\
filter(TASLookup.beginning_period_of_availability == bpoa).\
filter(TASLookup.ending_period_of_availability == epoa).\
filter(TASLookup.availability_type_code == availability).\
filter(TASLookup.main_account_code == main).\
filter(TASLookup.sub_account_code == sub).all()
if ( len(queryResult) == 0) :
tas = TASLookup()
tas.allocation_transfer_agency =ata
tas.agency_identifier=aid
tas.beginning_period_of_availability = bpoa
tas.ending_period_of_availability = epoa
tas.availability_type_code = availability
tas.main_account_code = main
tas.sub_account_code = sub
self.session.add(tas)
self.session.commit()
return True
return False
def addColumnByFileType(self,fileType,fieldName,required,field_type):
"""
Adds a new column to the schema
Args:
fileType -- One of the set of valid types of files (e.g. Award, AwardFinancial)
fieldName -- The name of the scheam column
required -- marks the column if data is allways required
field_type -- sets the type of data allowed in the column
Returns:
ID of new column
"""
fileId = self.getFileId(fileType)
if(fileId is None) :
raise ValueError("Filetype does not exist")
newColumn = FileColumn()
newColumn.required = False
newColumn.name = fieldName
newColumn.file_id = fileId
field_type = field_type.upper()
types = self.getDataTypes()
#Allow for other names
if(field_type == "STR") :
field_type = "STRING"
elif(field_type == "FLOAT") :
field_type = "DECIMAL"
elif(field_type == "BOOL"):
field_type = "BOOLEAN"
#Check types
if field_type in types :
newColumn.field_types_id = types[field_type]
else :
raise ValueError("".join(["Type ",field_type," is not vaild for ",str(fieldName)]))
#Check Required
required = required.upper()
if( required in ["TRUE","FALSE"]) :
if( required == "TRUE") :
newColumn.required = True
else :
raise ValueError("".join(["Required is not boolean for ",str(fieldName)]))
# Save
self.session.add(newColumn)
self.session.commit()
return newColumn.file_column_id
def getDataTypes(self) :
""""
Returns a dictionary of data types that contains the id of the types
"""
dataTypes = {}
queryResult = self.session.query(FieldType).all()
for column in queryResult :
dataTypes[column.name] = column.field_type_id
return dataTypes
def removeColumnsByFileType(self,fileType) :
"""
Removes the schema for a file
Args:
fileType -- One of the set of valid types of files (e.g. Award, AwardFinancial)
"""
fileId = self.getFileId(fileType)
if(fileId is None) :
raise ValueError("Filetype does not exist")
queryResult = self.session.query(FileColumn).filter(FileColumn.file_id == fileId).delete(synchronize_session='fetch')
self.session.commit()
def removeRulesByFileType(self,fileType) :
"""
Removes the rules for a file
Args:
fileType -- One of the set of valid types of files (e.g. Award, AwardFinancial)
"""
fileId = self.getFileId(fileType)
if(fileId is None) :
raise ValueError("Filetype does not exist")
# Get set of file columns for this file
columns = self.session.query(FileColumn).filter(FileColumn.file_id == fileId).all()
# Get list of ids for file columns
columnIds = []
for column in columns:
columnIds.append(column.file_column_id)
if(len(columnIds) > 0):
# Delete all rules for those columns
self.session.query(Rule).filter(Rule.file_column_id.in_(columnIds)).delete(synchronize_session="fetch")
# Delete multi field rules
self.session.query(MultiFieldRule).filter(MultiFieldRule.file_id == fileId).delete(synchronize_session="fetch")
self.session.commit()
#raise Exception("Check table, rules removed for file " + str(fileId))
def getFieldsByFileList(self, fileType):
""" Returns a list of valid field names that can appear in this type of file
Args:
fileType -- One of the set of valid types of files (e.g. Award, AwardFinancial)
Returns:
list of names
"""
fileId = self.getFileId(fileType)
returnList = []
if(fileId is None) :
raise ValueError("Filetype does not exist")
queryResult = self.session.query(FileColumn).filter(FileColumn.file_id == fileId).all()
for result in queryResult:
result.name = FieldCleaner.cleanString(result.name) # Standardize field names
return queryResult
def getFieldsByFile(self, fileType):
""" Returns a dict of valid field names that can appear in this type of file
Args:
fileType -- One of the set of valid types of files (e.g. Award, AwardFinancial)
Returns:
dict with field names as keys and values are ORM object FileColumn
"""
returnDict = {}
fileId = self.getFileId(fileType)
if(fileId is None) :
raise ValueError("File type does not exist")
queryResult = self.session.query(FileColumn).options(subqueryload("field_type")).filter(FileColumn.file_id == fileId).all()
for column in queryResult :
returnDict[FieldCleaner.cleanString(column.name)] = column
return returnDict
def getFileId(self, filename) :
""" Retrieves ID for specified file type
Args:
filename: Type of file to get ID for
Returns:
ID if file type found, or None if file type is not found
"""
query = self.session.query(FileType).filter(FileType.name== filename)
return self.runUniqueQuery(query,"No ID for specified file type","Conflicting IDs for specified file type").file_id
def getRulesByFile(self, fileType) :
"""
Arguments
fileType -- the int id of the filename
returns an list of rules
"""
fileId = self.getFileId(fileType)
if(fileId is None) :
raise ValueError("Filetype does not exist")
rules = self.session.query(Rule).options(joinedload("rule_type")).options(joinedload("file_column")).filter(FileColumn.file_id == fileId).all()
return rules
def addRule(self, columnId, ruleTypeText, ruleText, description, rule_timing = 1, rule_label = None):
"""
Args:
columnId: ID of column to add rule for
ruleTypeText: Specifies which type of rule by one of the names in the rule_type table
ruleText: Usually a number to compare to, e.g. length or value to be equal to
Returns:
True if successful
"""
if rule_timing is None or rule_timing == "":
# Use default value if timing is unspecified
rule_timing = 1
newRule = Rule(file_column_id = columnId, rule_type_id = self.getRuleType(ruleTypeText), rule_text_1 = ruleText,
description = description, rule_timing_id = rule_timing, rule_label = rule_label)
self.session.add(newRule)
self.session.commit()
return True
def addMultiFieldRule(self,fileId, ruleTypeText, ruleTextOne, ruleTextTwo, description, ruleLabel = None, ruleTiming = 1):
"""
Args:
fileId: Which file this rule applies to
ruleTypeText: type for this rule
ruleTextOne: definition of rule
ruleTextTwo: definition of rule
description: readable explanation of rule
Returns:
True if successful
"""
newRule = MultiFieldRule(file_id = fileId, multi_field_rule_type_id = self.getMultiFieldRuleType(ruleTypeText),
rule_text_1 = ruleTextOne, rule_text_2 = ruleTextTwo, description = description,
rule_label = ruleLabel, rule_timing_id = ruleTiming)
self.session.add(newRule)
self.session.commit()
return True
def getMultiFieldRuleByLabel(self, label):
""" Find multi field rule by label """
ruleQuery = self.session.query(MultiFieldRule).filter(MultiFieldRule.rule_label == label)
return self.runUniqueQuery(ruleQuery,"Rule label not found", "Multiple rules match specified label")
def getMultiFieldRulesByFile(self, fileType):
"""
Args:
fileType: Which type of file to get rules for
Returns:
list of MultiFieldRule objects
"""
fileId = self.getFileId(fileType)
return self.session.query(MultiFieldRule).filter(MultiFieldRule.file_id == fileId).filter(MultiFieldRule.rule_timing_id == self.getRuleTimingIdByName("file_validation")).all()
def getMultiFieldRulesByTiming(self, timing):
"""
Args:
fileType: Which type of file to get rules for
Returns:
list of MultiFieldRule objects
"""
timingId = self.getRuleTimingIdByName(timing)
return self.session.query(MultiFieldRule).filter(MultiFieldRule.rule_timing_id == timingId).all()
def getColumnId(self, fieldName, fileType):
""" Find file column given field name and file type
Args:
fieldName: Field to search for
fileId: Which file this field is associated with
Returns:
ID for file column if found, otherwise raises exception
"""
fileId = self.getFileId(fileType)
column = self.session.query(FileColumn).filter(FileColumn.name == fieldName.lower()).filter(FileColumn.file_id == fileId)
return self.runUniqueQuery(column,"No field found with that name for that file type", "Multiple fields with that name for that file type").file_column_id
def getColumnLength(self,fieldName, fileId):
""" If there is a length rule for this field, return the max length. Otherwise, return None. """
columnId = self.getColumnId(fieldName,fileId)
# Get length rules for this column
query = self.session.query(Rule).filter(Rule.file_column_id == columnId).filter(Rule.rule_type_id == 6)
try:
rule = self.runUniqueQuery(query,False,"Multiple length rules for this column")
except NoResultFound as e:
# No length rule for this column
return None
return int(float(rule.rule_text_1)) # Going through float in case of decimal value
def getRuleType(self,typeName):
""" Get rule ID for specified rule type
Arguments:
typeName - name of rule type (string)
Returns:
ID for rule type (int)
"""
return self.getIdFromDict(RuleType,"TYPE_DICT","name",typeName.upper(),"rule_type_id")
def getMultiFieldRuleType(self,typeName):
""" Get rule ID for specified multi-field rule type
Arguments:
typeName - name of rule type (string)
Returns:
ID for rule type (int)
"""
return self.getIdFromDict(MultiFieldRuleType,"TYPE_DICT","name",typeName.upper(),"multi_field_rule_type_id")
def populateFile(self,column):
""" Populate file object in the ORM for the specified FileColumn object
Arguments:
column - FileColumn object to get File object for
"""
column.file = self.session.query(FileType).filter(FileType.file_id == column.file_id)[0]
def getRuleTimingIdByName(self,timingName):
""" Get rule ID for specified multi-field rule type
Arguments:
typeName - name of rule type (string)
Returns:
ID for rule type (int)
"""
return self.getIdFromDict(RuleTiming,"TIMING_DICT","name",timingName.lower(),"rule_timing_id")
def getRuleByLabel(self,label):
query = self.session.query(Rule).options(joinedload("file_column")).filter(Rule.rule_label == label)
return self.runUniqueQuery(query,"No rule with that label","Multiple rules have that label")
def getFieldTypeById(self, id):
return self.getNameFromDict(FieldType,"TYPE_DICT","name",id,"field_type_id")
def getFieldNameByColId(self, id):
int(id) # Raise appropriate error if id is not an int
query = self.session.query(FileColumn).filter(FileColumn.file_column_id == id)
column = self.runUniqueQuery(query,"No column found with that ID", "Multiple columns found with that ID")
return column.name
|
|
'''
Adapter tests
=============
'''
import unittest
from kivy.uix.listview import SelectableView
from kivy.uix.listview import ListItemButton
from kivy.uix.listview import ListItemLabel
from kivy.uix.listview import CompositeListItem
from kivy.uix.label import Label
from kivy.adapters.models import SelectableDataItem
from kivy.adapters.adapter import Adapter
from kivy.adapters.simplelistadapter import SimpleListAdapter
from kivy.adapters.listadapter import ListAdapter
from kivy.adapters.dictadapter import DictAdapter
from kivy.properties import BooleanProperty
from kivy.properties import StringProperty
from kivy.factory import Factory
from kivy.lang import Builder
from kivy.compat import string_types
from nose.tools import raises
# The following integers_dict and fruit categories / fruit data dictionaries
# are from kivy/examples/widgets/lists/fixtures.py, and the classes are from
# examples there.
# ----------------------------------------------------------------------------
# A dictionary of dicts, with only the minimum required is_selected attribute,
# for use with examples using a simple list of integers in a list view.
integers_dict = \
{str(i): {'text': str(i), 'is_selected': False} for i in range(100)}
# ----------------------------------------------------------------------------
# A dataset of fruit category and fruit data for use in examples.
#
# Data from http://www.fda.gov/Food/LabelingNutrition/\
# FoodLabelingGuidanceRegulatoryInformation/\
# InformationforRestaurantsRetailEstablishments/\
# ucm063482.htm
#
# Available items for import are:
#
# fruit_categories
# fruit_data_attributes
# fruit_data_attribute_units
# fruit_data_list_of_dicts
# fruit_data
#
fruit_categories = \
{'Melons': {'name': 'Melons',
'fruits': ['Cantaloupe', 'Honeydew', 'Watermelon'],
'is_selected': False},
'Tree Fruits': {'name': 'Tree Fruits',
'fruits': ['Apple', 'Avocado', 'Banana', 'Nectarine',
'Peach', 'Pear', 'Pineapple', 'Plum',
'Cherry'],
'is_selected': False},
'Citrus Fruits': {'name': 'Citrus Fruits',
'fruits': ['Grapefruit', 'Lemon', 'Lime', 'Orange',
'Tangerine'],
'is_selected': False},
'Other Fruits': {'name': 'Other Fruits',
'fruits': ['Grape', 'Kiwifruit',
'Strawberry'],
'is_selected': False}}
fruit_data_list_of_dicts = \
[{'name':'Apple',
'Serving Size': '1 large (242 g/8 oz)',
'data': [130, 0, 0, 0, 0, 0, 260, 7, 34, 11, 5, 20, 25, 1, 2, 8, 2, 2],
'is_selected': False},
{'name':'Avocado',
'Serving Size': '1/5 medium (30 g/1.1 oz)',
'data': [50, 35, 4.5, 7, 0, 0, 140, 4, 3, 1, 1, 4, 0, 1, 0, 4, 0, 2],
'is_selected': False},
{'name':'Banana',
'Serving Size': '1 medium (126 g/4.5 oz)',
'data': [110, 0, 0, 0, 0, 0, 450, 13, 30, 10, 3, 12, 19, 1, 2, 15, 0, 2],
'is_selected': False},
{'name':'Cantaloupe',
'Serving Size': '1/4 medium (134 g/4.8 oz)',
'data': [50, 0, 0, 0, 20, 1, 240, 7, 12, 4, 1, 4, 11, 1, 120, 80, 2, 2],
'is_selected': False},
{'name':'Grapefruit',
'Serving Size': '1/2 medium (154 g/5.5 oz)',
'data': [60, 0, 0, 0, 0, 0, 160, 5, 15, 5, 2, 8, 11, 1, 35, 100, 4, 0],
'is_selected': False},
{'name':'Grape',
'Serving Size': '3/4 cup (126 g/4.5 oz)',
'data': [90, 0, 0, 0, 15, 1, 240, 7, 23, 8, 1, 4, 20, 0, 0, 2, 2, 0],
'is_selected': False},
{'name':'Honeydew',
'Serving Size': '1/10 medium melon (134 g/4.8 oz)',
'data': [50, 0, 0, 0, 30, 1, 210, 6, 12, 4, 1, 4, 11, 1, 2, 45, 2, 2],
'is_selected': False},
{'name':'Kiwifruit',
'Serving Size': '2 medium (148 g/5.3 oz)',
'data': [90, 10, 1, 2, 0, 0, 450, 13, 20, 7, 4, 16, 13, 1, 2, 240, 4, 2],
'is_selected': False},
{'name':'Lemon',
'Serving Size': '1 medium (58 g/2.1 oz)',
'data': [15, 0, 0, 0, 0, 0, 75, 2, 5, 2, 2, 8, 2, 0, 0, 40, 2, 0],
'is_selected': False},
{'name':'Lime',
'Serving Size': '1 medium (67 g/2.4 oz)',
'data': [20, 0, 0, 0, 0, 0, 75, 2, 7, 2, 2, 8, 0, 0, 0, 35, 0, 0],
'is_selected': False},
{'name':'Nectarine',
'Serving Size': '1 medium (140 g/5.0 oz)',
'data': [60, 5, 0.5, 1, 0, 0, 250, 7, 15, 5, 2, 8, 11, 1, 8, 15, 0, 2],
'is_selected': False},
{'name':'Orange',
'Serving Size': '1 medium (154 g/5.5 oz)',
'data': [80, 0, 0, 0, 0, 0, 250, 7, 19, 6, 3, 12, 14, 1, 2, 130, 6, 0],
'is_selected': False},
{'name':'Peach',
'Serving Size': '1 medium (147 g/5.3 oz)',
'data': [60, 0, 0.5, 1, 0, 0, 230, 7, 15, 5, 2, 8, 13, 1, 6, 15, 0, 2],
'is_selected': False},
{'name':'Pear',
'Serving Size': '1 medium (166 g/5.9 oz)',
'data': [100, 0, 0, 0, 0, 0, 190, 5, 26, 9, 6, 24, 16, 1, 0, 10, 2, 0],
'is_selected': False},
{'name':'Pineapple',
'Serving Size': '2 slices, 3" diameter, 3/4" thick (112 g/4 oz)',
'data': [50, 0, 0, 0, 10, 0, 120, 3, 13, 4, 1, 4, 10, 1, 2, 50, 2, 2],
'is_selected': False},
{'name':'Plum',
'Serving Size': '2 medium (151 g/5.4 oz)',
'data': [70, 0, 0, 0, 0, 0, 230, 7, 19, 6, 2, 8, 16, 1, 8, 10, 0, 2],
'is_selected': False},
{'name':'Strawberry',
'Serving Size': '8 medium (147 g/5.3 oz)',
'data': [50, 0, 0, 0, 0, 0, 170, 5, 11, 4, 2, 8, 8, 1, 0, 160, 2, 2],
'is_selected': False},
{'name':'Cherry',
'Serving Size': '21 cherries; 1 cup (140 g/5.0 oz)',
'data': [100, 0, 0, 0, 0, 0, 350, 10, 26, 9, 1, 4, 16, 1, 2, 15, 2, 2],
'is_selected': False},
{'name':'Tangerine',
'Serving Size': '1 medium (109 g/3.9 oz)',
'data': [50, 0, 0, 0, 0, 0, 160, 5, 13, 4, 2, 8, 9, 1, 6, 45, 4, 0],
'is_selected': False},
{'name':'Watermelon',
'Serving Size': '1/18 medium melon; 2 cups diced pieces (280 g/10.0 oz)',
'data': [80, 0, 0, 0, 0, 0, 270, 8, 21, 7, 1, 4, 20, 1, 30, 25, 2, 4],
'is_selected': False}]
fruit_data_attributes = ['(gram weight/ ounce weight)',
'Calories',
'Calories from Fat',
'Total Fat',
'Sodium',
'Potassium',
'Total Carbo-hydrate',
'Dietary Fiber',
'Sugars',
'Protein',
'Vitamin A',
'Vitamin C',
'Calcium',
'Iron']
fruit_data_attribute_units = ['(g)',
'(%DV)',
'(mg)',
'(%DV)',
'(mg)',
'(%DV)',
'(g)',
'(%DV)',
'(g)(%DV)',
'(g)',
'(g)',
'(%DV)',
'(%DV)',
'(%DV)',
'(%DV)']
attributes_and_units = \
dict(list(zip(fruit_data_attributes, fruit_data_attribute_units)))
fruit_data = {}
for fruit_record in fruit_data_list_of_dicts:
fruit_data[fruit_record['name']] = {}
fruit_data[fruit_record['name']] = \
dict({'name': fruit_record['name'],
'Serving Size': fruit_record['Serving Size'],
'is_selected': fruit_record['is_selected']},
**dict(list(zip(list(attributes_and_units.keys()),
fruit_record['data']))))
class CategoryItem(SelectableDataItem):
def __init__(self, **kwargs):
super(CategoryItem, self).__init__(**kwargs)
self.name = kwargs.get('name', '')
self.fruits = kwargs.get('fruits', [])
self.is_selected = kwargs.get('is_selected', False)
class FruitItem(SelectableDataItem):
def __init__(self, **kwargs):
super(FruitItem, self).__init__(**kwargs)
self.name = kwargs.get('name', '')
self.serving_size = kwargs.get('Serving Size', '')
self.data = kwargs.get('data', [])
self.is_selected = kwargs.get('is_selected', False)
def reset_to_defaults(db_dict):
for key in db_dict:
db_dict[key]['is_selected'] = False
category_data_items = \
[CategoryItem(**fruit_categories[c]) for c in sorted(fruit_categories)]
fruit_data_items = \
[FruitItem(**fruit_dict) for fruit_dict in fruit_data_list_of_dicts]
class FruitsListAdapter(ListAdapter):
def __init__(self, **kwargs):
kwargs['args_converter'] = \
lambda row_index, selectable: {'text': selectable.name,
'size_hint_y': None,
'height': 25}
super(FruitsListAdapter, self).__init__(**kwargs)
def fruit_category_changed(self, fruit_categories_adapter, *args):
if len(fruit_categories_adapter.selection) == 0:
self.data = []
return
category = \
fruit_categories[str(fruit_categories_adapter.selection[0])]
self.data = \
[f for f in fruit_data_items if f.name in category['fruits']]
# [TODO] Needed if setup.py run normally, after merge to master?
Factory.register('SelectableView', cls=SelectableView)
Factory.register('ListItemButton', cls=ListItemButton)
Builder.load_string('''
[CustomListItem@SelectableView+BoxLayout]:
index: ctx.index
size_hint_y: ctx.size_hint_y
height: ctx.height
is_selected: ctx.is_selected
ListItemButton:
index: ctx.index
text: ctx.text
is_selected: ctx.is_selected
''')
Builder.load_string('''
[CustomSimpleListItem@SelectableView+BoxLayout]:
size_hint_y: ctx.size_hint_y
height: ctx.height
ListItemButton:
text: ctx.text
''')
class AdaptersTestCase(unittest.TestCase):
def setUp(self):
self.args_converter = lambda row_index, rec: {'text': rec['name'],
'size_hint_y': None,
'height': 25}
self.integers_dict = \
{str(i): {'text': str(i), 'is_selected': False} for i in range(100)}
# The third of the four cls_dict items has no kwargs nor text, so
# rec['text'] will be set for it. Likewise, the fifth item has kwargs,
# but it has no 'text' key/value, so should receive the same treatment.
self.composite_args_converter = \
lambda row_index, rec: \
{'text': rec['text'],
'size_hint_y': None,
'height': 25,
'cls_dicts': [{'cls': ListItemButton,
'kwargs': {'text': rec['text']}},
{'cls': ListItemLabel,
'kwargs': {'text': "#-{0}".format(rec['text']),
'is_representing_cls': True}},
{'cls': ListItemButton},
{'cls': ListItemButton,
'kwargs': {'some key': 'some value'}},
{'cls': ListItemButton,
'kwargs': {'text': rec['text']}}]}
reset_to_defaults(fruit_data)
@raises(Exception)
def test_instantiating_an_adapter_with_neither_cls_nor_template(self):
def dummy_converter():
pass
fruit_categories_list_adapter = \
Adapter(data='cat',
args_converter=dummy_converter)
def test_instantiating_an_adapter_with_neither_cls_nor_template(self):
def dummy_converter():
pass
with self.assertRaises(Exception) as cm:
fruit_categories_list_adapter = \
Adapter(data='cat',
args_converter=dummy_converter)
msg = 'adapter: a cls or template must be defined'
self.assertEqual(str(cm.exception), msg)
with self.assertRaises(Exception) as cm:
fruit_categories_list_adapter = \
Adapter(data='cat',
args_converter=dummy_converter,
cls=None)
msg = 'adapter: a cls or template must be defined'
self.assertEqual(str(cm.exception), msg)
with self.assertRaises(Exception) as cm:
fruit_categories_list_adapter = \
Adapter(data='cat',
args_converter=dummy_converter,
template=None)
msg = 'adapter: a cls or template must be defined'
self.assertEqual(str(cm.exception), msg)
with self.assertRaises(Exception) as cm:
fruit_categories_list_adapter = \
Adapter(data='cat',
args_converter=dummy_converter,
cls=None,
template=None)
msg = 'adapter: cannot use cls and template at the same time'
self.assertEqual(str(cm.exception), msg)
def test_instantiating_an_adapter_with_no_data(self):
# with no data
with self.assertRaises(Exception) as cm:
adapter = Adapter()
msg = 'adapter: input must include data argument'
self.assertEqual(str(cm.exception), msg)
def test_instantiating_an_adapter_with_both_cls_and_template(self):
from kivy.adapters.args_converters import list_item_args_converter
with self.assertRaises(Exception) as cm:
adapter = Adapter(data='cat',
args_converter=list_item_args_converter,
template='CustomListItem',
cls=ListItemButton)
msg = 'adapter: cannot use cls and template at the same time'
self.assertEqual(str(cm.exception), msg)
def test_instantiating_adapter(self):
from kivy.adapters.args_converters import list_item_args_converter
def dummy_converter():
pass
class Adapter_1(Adapter):
def __init__(self, **kwargs):
kwargs['args_converter'] = dummy_converter
super(Adapter_1, self).__init__(**kwargs)
kwargs = {}
kwargs['data'] = 'cat'
kwargs['args_converter'] = dummy_converter
kwargs['cls'] = ListItemButton
my_adapter = Adapter(**kwargs)
self.assertEqual(my_adapter.args_converter, dummy_converter)
my_adapter = Adapter_1(**kwargs)
self.assertEqual(my_adapter.args_converter, dummy_converter)
kwargs_2 = {}
kwargs_2['data'] = 'cat'
kwargs_2['cls'] = ListItemButton
adapter_2 = Adapter(**kwargs_2)
self.assertEqual(adapter_2.args_converter, list_item_args_converter)
adapter = Adapter(data='cat', cls=Label)
self.assertEqual(adapter.get_data_item(), 'cat')
adapter = Adapter(data=None, cls=Label)
self.assertEqual(adapter.get_data_item(), None)
def test_instantiating_adapter_bind_triggers_to_view(self):
class PetListener(object):
def __init__(self, pet):
self.current_pet = pet
def callback(self, *args):
self.current_pet = args[1]
pet_listener = PetListener('cat')
adapter = Adapter(data='cat', cls=Label)
adapter.bind_triggers_to_view(pet_listener.callback)
self.assertEqual(pet_listener.current_pet, 'cat')
adapter.data = 'dog'
self.assertEqual(pet_listener.current_pet, 'dog')
def test_simple_list_adapter_for_exceptions(self):
# with no data
with self.assertRaises(Exception) as cm:
simple_list_adapter = SimpleListAdapter()
msg = 'list adapter: input must include data argument'
self.assertEqual(str(cm.exception), msg)
# with data of wrong type
with self.assertRaises(Exception) as cm:
simple_list_adapter = SimpleListAdapter(data=dict)
msg = 'list adapter: data must be a tuple or list'
self.assertEqual(str(cm.exception), msg)
def test_simple_list_adapter_for_inherited_list(self):
# Test for issue 1396 : list, tuple and inheritance
class ExtendedList(list):
pass
class ExtendedTuple(tuple):
pass
# Equivalent to assertNotRaise
simple_list_adapter = SimpleListAdapter(data=ExtendedList(),
template='CustomSimpleListItem')
simple_list_adapter = SimpleListAdapter(data=ExtendedTuple(),
template='CustomSimpleListItem')
def test_simple_list_adapter_with_template(self):
list_item_args_converter = \
lambda row_index, obj: {'text': str(obj),
'size_hint_y': None,
'height': 25}
simple_list_adapter = \
SimpleListAdapter(data=['cat', 'dog'],
args_converter=list_item_args_converter,
template='CustomSimpleListItem')
view = simple_list_adapter.get_view(0)
self.assertEqual(view.__class__.__name__, 'CustomSimpleListItem')
# For coverage of __repr__:
self.assertEqual(type(str(view)), str)
def test_simple_list_adapter_methods(self):
simple_list_adapter = SimpleListAdapter(data=['cat', 'dog'],
cls=Label)
self.assertEqual(simple_list_adapter.get_count(), 2)
self.assertEqual(simple_list_adapter.get_data_item(0), 'cat')
self.assertEqual(simple_list_adapter.get_data_item(1), 'dog')
self.assertIsNone(simple_list_adapter.get_data_item(-1))
self.assertIsNone(simple_list_adapter.get_data_item(2))
view = simple_list_adapter.get_view(0)
self.assertTrue(isinstance(view, Label))
self.assertIsNone(simple_list_adapter.get_view(-1))
self.assertIsNone(simple_list_adapter.get_view(2))
def test_instantiating_list_adapter(self):
str_args_converter = lambda row_index, rec: {'text': rec,
'size_hint_y': None,
'height': 25}
list_adapter = ListAdapter(data=['cat', 'dog'],
args_converter=str_args_converter,
cls=ListItemButton)
self.assertEqual([obj for obj in list_adapter.data],
['cat', 'dog'])
self.assertEqual(list_adapter.get_count(), 2)
self.assertEqual(list_adapter.cls, ListItemButton)
self.assertEqual(list_adapter.args_converter, str_args_converter)
self.assertEqual(list_adapter.template, None)
cat_data_item = list_adapter.get_data_item(0)
self.assertEqual(cat_data_item, 'cat')
self.assertTrue(isinstance(cat_data_item, string_types))
view = list_adapter.get_view(0)
self.assertTrue(isinstance(view, ListItemButton))
view = list_adapter.create_view(0)
self.assertTrue(isinstance(view, ListItemButton))
view = list_adapter.create_view(-1)
self.assertIsNone(view)
view = list_adapter.create_view(100)
self.assertIsNone(view)
def test_list_adapter_selection_mode_single(self):
fruit_data_items[0].is_selected = True
list_item_args_converter = \
lambda row_index, selectable: {'text': selectable.name,
'size_hint_y': None,
'height': 25}
list_adapter = ListAdapter(data=fruit_data_items,
args_converter=list_item_args_converter,
selection_mode='single',
propagate_selection_to_data=True,
allow_empty_selection=False,
cls=ListItemButton)
self.assertEqual(sorted([obj.name for obj in list_adapter.data]),
['Apple', 'Avocado', 'Banana', 'Cantaloupe', 'Cherry', 'Grape',
'Grapefruit', 'Honeydew', 'Kiwifruit', 'Lemon', 'Lime',
'Nectarine', 'Orange', 'Peach', 'Pear', 'Pineapple', 'Plum',
'Strawberry', 'Tangerine', 'Watermelon'])
self.assertEqual(list_adapter.cls, ListItemButton)
self.assertEqual(list_adapter.args_converter,
list_item_args_converter)
self.assertEqual(list_adapter.template, None)
apple_data_item = list_adapter.get_data_item(0)
self.assertTrue(isinstance(apple_data_item, FruitItem))
self.assertTrue(isinstance(apple_data_item, SelectableDataItem))
self.assertTrue(apple_data_item.is_selected)
view = list_adapter.get_view(0)
self.assertTrue(isinstance(view, ListItemButton))
self.assertTrue(view.is_selected)
def test_list_adapter_with_dict_data(self):
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
letters_dicts = \
[{'text': l, 'is_selected': False} for l in alphabet]
letters_dicts[0]['is_selected'] = True
list_item_args_converter = lambda row_index, rec: {'text': rec['text'],
'size_hint_y': None,
'height': 25}
list_adapter = ListAdapter(data=letters_dicts,
args_converter=list_item_args_converter,
selection_mode='single',
propagate_selection_to_data=True,
allow_empty_selection=False,
cls=ListItemButton)
self.assertEqual(list_adapter.cls, ListItemButton)
self.assertEqual(list_adapter.args_converter,
list_item_args_converter)
self.assertEqual(list_adapter.template, None)
apple_data_item = list_adapter.get_data_item(0)
self.assertTrue(isinstance(apple_data_item, dict))
self.assertTrue(apple_data_item['is_selected'])
view = list_adapter.get_view(0)
self.assertTrue(isinstance(view, ListItemButton))
self.assertTrue(view.is_selected)
def test_list_adapter_with_custom_data_item_class(self):
class DataItem(object):
def __init__(self, text='', is_selected=False):
self.text = text
self.is_selected = is_selected
data_items = []
data_items.append(DataItem(text='cat'))
data_items.append(DataItem(text='dog'))
data_items.append(DataItem(text='frog'))
list_item_args_converter = lambda row_index, obj: {'text': obj.text,
'size_hint_y': None,
'height': 25}
list_adapter = ListAdapter(data=data_items,
args_converter=list_item_args_converter,
selection_mode='single',
propagate_selection_to_data=True,
allow_empty_selection=False,
cls=ListItemButton)
data_item = list_adapter.get_data_item(0)
self.assertTrue(isinstance(data_item, DataItem))
self.assertTrue(data_item.is_selected)
view = list_adapter.get_view(0)
self.assertTrue(isinstance(view, ListItemButton))
self.assertTrue(view.is_selected)
def test_list_adapter_with_widget_as_data_item_class(self):
# Use a widget as data item.
class DataItem(Label):
is_selected = BooleanProperty(True)
text = StringProperty('')
class DataItemWithMethod(DataItem):
_is_selected = BooleanProperty(True)
def is_selected(self):
return self._is_selected
class BadDataItem(Label):
text = StringProperty('')
data_items = []
data_items.append(DataItem(text='cat'))
data_items.append(DataItemWithMethod(text='dog'))
data_items.append(BadDataItem(text='frog'))
list_item_args_converter = lambda row_index, obj: {'text': obj.text,
'size_hint_y': None,
'height': 25}
list_adapter = ListAdapter(data=data_items,
args_converter=list_item_args_converter,
selection_mode='single',
propagate_selection_to_data=True,
allow_empty_selection=False,
cls=ListItemButton)
self.assertEqual(list_adapter.cls, ListItemButton)
self.assertEqual(list_adapter.args_converter,
list_item_args_converter)
self.assertEqual(list_adapter.template, None)
data_item = list_adapter.get_data_item(0)
self.assertTrue(isinstance(data_item, DataItem))
self.assertTrue(data_item.is_selected)
view = list_adapter.get_view(0)
self.assertTrue(isinstance(view, ListItemButton))
self.assertTrue(view.is_selected)
view = list_adapter.get_view(1)
self.assertTrue(isinstance(view, ListItemButton))
self.assertTrue(view.is_selected)
with self.assertRaises(Exception) as cm:
view = list_adapter.get_view(2)
msg = "ListAdapter: unselectable data item for 2"
self.assertEqual(str(cm.exception), msg)
def test_instantiating_list_adapter_no_args_converter(self):
list_adapter = \
ListAdapter(data=['cat', 'dog'],
cls=ListItemButton)
self.assertEqual(list_adapter.get_count(), 2)
self.assertEqual(list_adapter.cls, ListItemButton)
self.assertIsNotNone(list_adapter.args_converter)
self.assertEqual(list_adapter.template, None)
cat_data_item = list_adapter.get_data_item(0)
self.assertEqual(cat_data_item, 'cat')
self.assertTrue(isinstance(cat_data_item, string_types))
view = list_adapter.get_view(0)
self.assertTrue(isinstance(view, ListItemButton))
view = list_adapter.create_view(0)
self.assertTrue(isinstance(view, ListItemButton))
view = list_adapter.create_view(-1)
self.assertIsNone(view)
view = list_adapter.create_view(100)
self.assertIsNone(view)
def test_list_adapter_selection_mode_none(self):
list_item_args_converter = \
lambda row_index, selectable: {'text': selectable.name,
'size_hint_y': None,
'height': 25}
list_adapter = ListAdapter(data=fruit_data_items,
args_converter=list_item_args_converter,
selection_mode='none',
allow_empty_selection=True,
cls=ListItemButton)
self.assertEqual(sorted([obj.name for obj in list_adapter.data]),
['Apple', 'Avocado', 'Banana', 'Cantaloupe', 'Cherry', 'Grape',
'Grapefruit', 'Honeydew', 'Kiwifruit', 'Lemon', 'Lime',
'Nectarine', 'Orange', 'Peach', 'Pear', 'Pineapple', 'Plum',
'Strawberry', 'Tangerine', 'Watermelon'])
self.assertEqual(list_adapter.cls, ListItemButton)
self.assertEqual(list_adapter.args_converter, list_item_args_converter)
self.assertEqual(list_adapter.template, None)
apple_data_item = list_adapter.get_data_item(0)
self.assertTrue(isinstance(apple_data_item, FruitItem))
def test_list_adapter_selection_mode_multiple_select_list(self):
list_item_args_converter = \
lambda row_index, selectable: {'text': selectable.name,
'size_hint_y': None,
'height': 25}
list_adapter = ListAdapter(data=fruit_data_items,
args_converter=list_item_args_converter,
selection_mode='multiple',
allow_empty_selection=True,
cls=ListItemButton)
views = []
views.append(list_adapter.get_view(0))
views.append(list_adapter.get_view(1))
views.append(list_adapter.get_view(2))
self.assertEqual(len(views), 3)
list_adapter.select_list(views)
self.assertEqual(len(list_adapter.selection), 3)
views = []
views.append(list_adapter.get_view(3))
views.append(list_adapter.get_view(4))
views.append(list_adapter.get_view(5))
self.assertEqual(len(views), 3)
list_adapter.select_list(views)
self.assertEqual(len(list_adapter.selection), 6)
views = []
views.append(list_adapter.get_view(0))
views.append(list_adapter.get_view(1))
views.append(list_adapter.get_view(2))
self.assertEqual(len(views), 3)
list_adapter.select_list(views, extend=False)
self.assertEqual(len(list_adapter.selection), 3)
list_adapter.deselect_list(views)
self.assertEqual(len(list_adapter.selection), 0)
def test_list_adapter_with_dicts_as_data(self):
bare_minimum_dicts = \
[{'text': str(i), 'is_selected': False} for i in range(100)]
args_converter = lambda row_index, rec: {'text': rec['text'],
'size_hint_y': None,
'height': 25}
list_adapter = ListAdapter(data=bare_minimum_dicts,
args_converter=args_converter,
selection_mode='none',
allow_empty_selection=True,
cls=ListItemButton)
self.assertEqual([rec['text'] for rec in list_adapter.data],
[str(i) for i in range(100)])
self.assertEqual(list_adapter.cls, ListItemButton)
self.assertEqual(list_adapter.args_converter, args_converter)
data_item = list_adapter.get_data_item(0)
self.assertTrue(type(data_item), dict)
# Utility calls for coverage:
self.assertEqual(list_adapter.get_count(), 100)
# Bad index:
self.assertIsNone(list_adapter.get_data_item(-1))
self.assertIsNone(list_adapter.get_data_item(101))
def test_list_adapter_with_dicts_as_data_multiple_selection(self):
bare_minimum_dicts = \
[{'text': str(i), 'is_selected': False} for i in range(100)]
args_converter = lambda row_index, rec: {'text': rec['text'],
'size_hint_y': None,
'height': 25}
list_adapter = ListAdapter(data=bare_minimum_dicts,
args_converter=args_converter,
selection_mode='multiple',
allow_empty_selection=False,
cls=ListItemButton)
self.assertEqual([rec['text'] for rec in list_adapter.data],
[str(i) for i in range(100)])
self.assertEqual(list_adapter.cls, ListItemButton)
self.assertEqual(list_adapter.args_converter, args_converter)
for i in range(50):
list_adapter.handle_selection(list_adapter.get_view(i))
self.assertEqual(len(list_adapter.selection), 50)
# This is for code coverage:
list_adapter.selection_mode = 'none'
list_adapter.handle_selection(list_adapter.get_view(25))
list_adapter.selection_mode = 'single'
list_adapter.handle_selection(list_adapter.get_view(24))
list_adapter.handle_selection(list_adapter.get_view(24))
def test_list_adapter_bindings(self):
list_item_args_converter = \
lambda row_index, selectable: {'text': selectable.name,
'size_hint_y': None,
'height': 25}
fruit_categories_list_adapter = \
ListAdapter(data=category_data_items,
args_converter=list_item_args_converter,
selection_mode='single',
allow_empty_selection=False,
cls=ListItemButton)
first_category_fruits = \
fruit_categories[list(fruit_categories.keys())[0]]['fruits']
first_category_fruit_data_items = \
[f for f in fruit_data_items if f.name in first_category_fruits]
fruits_list_adapter = \
FruitsListAdapter(data=first_category_fruit_data_items,
args_converter=list_item_args_converter,
selection_mode='single',
allow_empty_selection=False,
cls=ListItemButton)
fruit_categories_list_adapter.bind(
on_selection_change=fruits_list_adapter.fruit_category_changed)
def test_instantiating_list_adapters_with_both_cls_and_template(self):
list_item_args_converter = \
lambda row_index, rec: {'text': rec['text'],
'is_selected': rec['is_selected'],
'size_hint_y': None,
'height': 25}
# First, for a plain Adapter:
with self.assertRaises(Exception) as cm:
fruit_categories_list_adapter = \
Adapter(data='cat',
args_converter=list_item_args_converter,
template='CustomListItem',
cls=ListItemButton)
msg = 'adapter: cannot use cls and template at the same time'
self.assertEqual(str(cm.exception), msg)
# And now for a ListAdapter:
with self.assertRaises(Exception) as cm:
fruit_categories_list_adapter = \
ListAdapter(data=category_data_items,
args_converter=list_item_args_converter,
selection_mode='single',
allow_empty_selection=False,
template='CustomListItem',
cls=ListItemButton)
msg = 'adapter: cannot use cls and template at the same time'
self.assertEqual(str(cm.exception), msg)
def test_view_from_list_adapter(self):
# First with a class.
list_item_args_converter = \
lambda row_index, selectable: {'text': selectable.name,
'size_hint_y': None,
'height': 25}
fruit_categories_list_adapter = \
ListAdapter(data=category_data_items,
args_converter=list_item_args_converter,
selection_mode='single',
allow_empty_selection=False,
cls=ListItemButton)
view = fruit_categories_list_adapter.get_view(0)
self.assertTrue(isinstance(view, ListItemButton))
# Now with a template.
list_item_args_converter = \
lambda row_index, item: {'text': item.name,
'is_selected': item.is_selected,
'size_hint_y': None,
'height': 25}
fruit_categories_list_adapter = \
ListAdapter(data=category_data_items,
args_converter=list_item_args_converter,
selection_mode='single',
allow_empty_selection=False,
template='CustomListItem')
view = fruit_categories_list_adapter.get_view(0)
self.assertEqual(view.__class__.__name__, 'CustomListItem')
second_view = fruit_categories_list_adapter.get_view(1)
fruit_categories_list_adapter.handle_selection(second_view)
self.assertEqual(len(fruit_categories_list_adapter.selection), 1)
def test_list_adapter_operations_trimming(self):
alphabet = [l for l in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']
list_item_args_converter = \
lambda row_index, letter: {'text': letter,
'size_hint_y': None,
'height': 25}
# trim right of sel
alphabet_adapter = ListAdapter(
data=alphabet,
args_converter=list_item_args_converter,
selection_mode='multiple',
selection_limit=1000,
allow_empty_selection=True,
cls=ListItemButton)
a_view = alphabet_adapter.get_view(0)
self.assertEqual(a_view.text, 'A')
alphabet_adapter.handle_selection(a_view)
self.assertEqual(len(alphabet_adapter.selection), 1)
self.assertTrue(a_view.is_selected)
alphabet_adapter.trim_right_of_sel()
self.assertEqual(len(alphabet_adapter.data), 1)
# trim left of sel
alphabet_adapter = ListAdapter(
data=alphabet,
args_converter=list_item_args_converter,
selection_mode='multiple',
selection_limit=1000,
allow_empty_selection=True,
cls=ListItemButton)
z_view = alphabet_adapter.get_view(25)
self.assertEqual(z_view.text, 'Z')
alphabet_adapter.handle_selection(z_view)
self.assertEqual(len(alphabet_adapter.selection), 1)
self.assertTrue(z_view.is_selected)
alphabet_adapter.trim_left_of_sel()
self.assertEqual(len(alphabet_adapter.data), 1)
# trim to sel
alphabet_adapter = ListAdapter(
data=alphabet,
args_converter=list_item_args_converter,
selection_mode='multiple',
selection_limit=1000,
allow_empty_selection=True,
cls=ListItemButton)
g_view = alphabet_adapter.get_view(6)
self.assertEqual(g_view.text, 'G')
alphabet_adapter.handle_selection(g_view)
m_view = alphabet_adapter.get_view(12)
self.assertEqual(m_view.text, 'M')
alphabet_adapter.handle_selection(m_view)
alphabet_adapter.trim_to_sel()
self.assertEqual(len(alphabet_adapter.data), 7)
# cut to sel
alphabet_adapter = ListAdapter(
data=alphabet,
args_converter=list_item_args_converter,
selection_mode='multiple',
selection_limit=1000,
allow_empty_selection=True,
cls=ListItemButton)
g_view = alphabet_adapter.get_view(6)
self.assertEqual(g_view.text, 'G')
alphabet_adapter.handle_selection(g_view)
m_view = alphabet_adapter.get_view(12)
self.assertEqual(m_view.text, 'M')
alphabet_adapter.handle_selection(m_view)
alphabet_adapter.cut_to_sel()
self.assertEqual(len(alphabet_adapter.data), 2)
def test_list_adapter_reset_data(self):
class PetListener(object):
def __init__(self, pet):
self.current_pet = pet
# This should happen as a result of data changing.
def callback(self, *args):
self.current_pet = args[1]
pet_listener = PetListener('cat')
list_item_args_converter = \
lambda row_index, rec: {'text': rec['text'],
'size_hint_y': None,
'height': 25}
list_adapter = ListAdapter(
data=['cat'],
args_converter=list_item_args_converter,
selection_mode='multiple',
selection_limit=1000,
allow_empty_selection=True,
cls=ListItemButton)
list_adapter.bind_triggers_to_view(pet_listener.callback)
self.assertEqual(pet_listener.current_pet, 'cat')
dog_data = ['dog']
list_adapter.data = dog_data
self.assertEqual(list_adapter.data, ['dog'])
self.assertEqual(pet_listener.current_pet, dog_data)
# Now just change an item.
list_adapter.data[0] = 'cat'
self.assertEqual(list_adapter.data, ['cat'])
self.assertEqual(pet_listener.current_pet, ['cat'])
def test_dict_adapter_composite(self):
item_strings = ["{0}".format(index) for index in range(100)]
# And now the list adapter, constructed with the item_strings as
# the data, a dict to add the required is_selected boolean onto
# data records, and the args_converter above that will operate one
# each item in the data to produce list item view instances from the
# CompositeListItem class.
dict_adapter = DictAdapter(sorted_keys=item_strings,
data=self.integers_dict,
args_converter=self.composite_args_converter,
selection_mode='single',
allow_empty_selection=False,
cls=CompositeListItem)
self.assertEqual(len(dict_adapter.selection), 1)
view = dict_adapter.get_view(1)
dict_adapter.handle_selection(view)
self.assertEqual(len(dict_adapter.selection), 1)
# test that sorted_keys is built, if not provided.
def test_dict_adapter_no_sorted_keys(self):
dict_adapter = DictAdapter(data=self.integers_dict,
args_converter=self.composite_args_converter,
selection_mode='single',
allow_empty_selection=False,
cls=CompositeListItem)
self.assertEqual(len(dict_adapter.sorted_keys), 100)
self.assertEqual(len(dict_adapter.selection), 1)
view = dict_adapter.get_view(1)
dict_adapter.handle_selection(view)
self.assertEqual(len(dict_adapter.selection), 1)
def test_dict_adapter_bad_sorted_keys(self):
with self.assertRaises(Exception) as cm:
dict_adapter = DictAdapter(
sorted_keys={},
data=self.integers_dict,
args_converter=self.composite_args_converter,
selection_mode='single',
allow_empty_selection=False,
cls=CompositeListItem)
msg = 'DictAdapter: sorted_keys must be tuple or list'
self.assertEqual(str(cm.exception), msg)
def test_instantiating_dict_adapter_bind_triggers_to_view(self):
class PetListener(object):
def __init__(self, pets):
self.current_pets = pets
def callback(self, *args):
self.current_pets = args[1]
pet_listener = PetListener(['cat'])
list_item_args_converter = \
lambda row_index, rec: {'text': rec['text'],
'size_hint_y': None,
'height': 25}
dict_adapter = DictAdapter(sorted_keys=['cat'],
data={'cat': {'text': 'cat', 'is_selected': False},
'dog': {'text': 'dog', 'is_selected': False}},
args_converter=list_item_args_converter,
propagate_selection_to_data=True,
selection_mode='single',
allow_empty_selection=False,
cls=ListItemButton)
dict_adapter.bind_triggers_to_view(pet_listener.callback)
self.assertEqual(pet_listener.current_pets, ['cat'])
dict_adapter.sorted_keys = ['dog']
self.assertEqual(pet_listener.current_pets, ['dog'])
def test_dict_adapter_reset_data(self):
class PetListener(object):
def __init__(self, pet):
self.current_pet = pet
# This can happen as a result of sorted_keys changing,
# or data changing.
def callback(self, *args):
self.current_pet = args[1]
pet_listener = PetListener('cat')
list_item_args_converter = \
lambda row_index, rec: {'text': rec['text'],
'size_hint_y': None,
'height': 25}
dict_adapter = DictAdapter(
sorted_keys=['cat'],
data={'cat': {'text': 'cat', 'is_selected': False}},
args_converter=list_item_args_converter,
propagate_selection_to_data=True,
selection_mode='single',
allow_empty_selection=False,
cls=ListItemButton)
dict_adapter.bind_triggers_to_view(pet_listener.callback)
self.assertEqual(pet_listener.current_pet, 'cat')
dog_data = {'dog': {'text': 'dog', 'is_selected': False}}
dict_adapter.data = dog_data
self.assertEqual(dict_adapter.sorted_keys, ['dog'])
self.assertEqual(pet_listener.current_pet, dog_data)
cat_dog_data = {'cat': {'text': 'cat', 'is_selected': False},
'dog': {'text': 'dog', 'is_selected': False}}
dict_adapter.data = cat_dog_data
# new data added, sorted_keys are updated with new entries
self.assertIn(dict_adapter.sorted_keys,
(['dog', 'cat'], ['cat', 'dog']))
dict_adapter.sorted_keys = ['cat']
self.assertEqual(pet_listener.current_pet, ['cat'])
# Make some utility calls for coverage:
# 1, because get_count() does len(self.sorted_keys).
self.assertEqual(dict_adapter.get_count(), 1)
# Bad index:
self.assertIsNone(dict_adapter.get_data_item(-1))
self.assertIsNone(dict_adapter.get_data_item(2))
def test_dict_adapter_selection_mode_single_without_propagation(self):
list_item_args_converter = \
lambda row_index, rec: {'text': rec['name'],
'size_hint_y': None,
'height': 25}
dict_adapter = DictAdapter(sorted_keys=sorted(fruit_data.keys()),
data=fruit_data,
args_converter=list_item_args_converter,
selection_mode='single',
allow_empty_selection=False,
cls=ListItemButton)
self.assertEqual(sorted(dict_adapter.data),
['Apple', 'Avocado', 'Banana', 'Cantaloupe', 'Cherry', 'Grape',
'Grapefruit', 'Honeydew', 'Kiwifruit', 'Lemon', 'Lime',
'Nectarine', 'Orange', 'Peach', 'Pear', 'Pineapple', 'Plum',
'Strawberry', 'Tangerine', 'Watermelon'])
self.assertEqual(dict_adapter.cls, ListItemButton)
self.assertEqual(dict_adapter.args_converter, list_item_args_converter)
self.assertEqual(dict_adapter.template, None)
apple_data_item = dict_adapter.get_data_item(0)
self.assertTrue(isinstance(apple_data_item, dict))
self.assertEqual(apple_data_item['name'], 'Apple')
apple_view = dict_adapter.get_view(0)
self.assertTrue(isinstance(apple_view, ListItemButton))
self.assertEqual(len(dict_adapter.selection), 1)
self.assertTrue(apple_view.is_selected)
self.assertFalse(apple_data_item['is_selected'])
def test_dict_adapter_selection_mode_single_with_propagation(self):
list_item_args_converter = \
lambda row_index, rec: {'text': rec['name'],
'size_hint_y': None,
'height': 25}
dict_adapter = DictAdapter(sorted_keys=sorted(fruit_data.keys()),
data=fruit_data,
args_converter=list_item_args_converter,
propagate_selection_to_data=True,
selection_mode='single',
allow_empty_selection=False,
cls=ListItemButton)
self.assertEqual(sorted(dict_adapter.data),
['Apple', 'Avocado', 'Banana', 'Cantaloupe', 'Cherry', 'Grape',
'Grapefruit', 'Honeydew', 'Kiwifruit', 'Lemon', 'Lime',
'Nectarine', 'Orange', 'Peach', 'Pear', 'Pineapple', 'Plum',
'Strawberry', 'Tangerine', 'Watermelon'])
self.assertEqual(dict_adapter.cls, ListItemButton)
self.assertEqual(dict_adapter.args_converter, list_item_args_converter)
self.assertEqual(dict_adapter.template, None)
apple_data_item = dict_adapter.get_data_item(0)
self.assertTrue(isinstance(apple_data_item, dict))
self.assertEqual(apple_data_item['name'], 'Apple')
apple_view = dict_adapter.get_view(0)
self.assertTrue(isinstance(apple_view, ListItemButton))
self.assertEqual(len(dict_adapter.selection), 1)
self.assertTrue(apple_view.is_selected)
self.assertTrue(apple_data_item['is_selected'])
def test_dict_adapter_sorted_keys(self):
list_item_args_converter = \
lambda row_index, rec: {'text': rec['name'],
'size_hint_y': None,
'height': 25}
dict_adapter = DictAdapter(sorted_keys=sorted(fruit_data.keys()),
data=fruit_data,
args_converter=list_item_args_converter,
propagate_selection_to_data=True,
selection_mode='single',
allow_empty_selection=False,
cls=ListItemButton)
self.assertEqual(sorted(dict_adapter.data),
['Apple', 'Avocado', 'Banana', 'Cantaloupe', 'Cherry', 'Grape',
'Grapefruit', 'Honeydew', 'Kiwifruit', 'Lemon', 'Lime',
'Nectarine', 'Orange', 'Peach', 'Pear', 'Pineapple', 'Plum',
'Strawberry', 'Tangerine', 'Watermelon'])
apple_view = dict_adapter.get_view(0)
self.assertEqual(apple_view.text, 'Apple')
avocado_view = dict_adapter.get_view(1)
self.assertEqual(avocado_view.text, 'Avocado')
banana_view = dict_adapter.get_view(2)
self.assertEqual(banana_view.text, 'Banana')
dict_adapter.sorted_keys = ['Lemon', 'Pear', 'Tangerine']
self.assertEqual(len(dict_adapter.sorted_keys), 3)
self.assertEqual(sorted(dict_adapter.data),
['Apple', 'Avocado', 'Banana', 'Cantaloupe', 'Cherry', 'Grape',
'Grapefruit', 'Honeydew', 'Kiwifruit', 'Lemon', 'Lime',
'Nectarine', 'Orange', 'Peach', 'Pear', 'Pineapple', 'Plum',
'Strawberry', 'Tangerine', 'Watermelon'])
lemon_view = dict_adapter.get_view(0)
self.assertEqual(lemon_view.text, 'Lemon')
pear_view = dict_adapter.get_view(1)
self.assertEqual(pear_view.text, 'Pear')
tangerine_view = dict_adapter.get_view(2)
self.assertEqual(tangerine_view.text, 'Tangerine')
def test_dict_adapter_operations_trimming(self):
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
letters_dict = \
{l: {'text': l, 'is_selected': False} for l in alphabet}
list_item_args_converter = \
lambda row_index, rec: {'text': rec['text'],
'size_hint_y': None,
'height': 25}
letters = [l for l in alphabet]
def sorted_keys_ok(letters_dict_adapter):
sorted_keys_ok = True
for key in letters_dict_adapter.sorted_keys:
if not key in letters_dict_adapter.data:
sorted_keys_ok = False
break
return sorted_keys_ok
# trim left of sel
letters_dict_adapter = DictAdapter(
sorted_keys=letters[:],
data=letters_dict,
args_converter=list_item_args_converter,
selection_mode='multiple',
selection_limit=1000,
allow_empty_selection=True,
cls=ListItemButton)
a_view = letters_dict_adapter.get_view(0)
self.assertEqual(a_view.text, 'A')
letters_dict_adapter.handle_selection(a_view)
self.assertEqual(len(letters_dict_adapter.selection), 1)
self.assertTrue(a_view.is_selected)
letters_dict_adapter.trim_right_of_sel()
self.assertEqual(len(letters_dict_adapter.data), 1)
self.assertTrue(sorted_keys_ok(letters_dict_adapter))
# trim right of sel
letters_dict_adapter = DictAdapter(
sorted_keys=letters[:],
data=letters_dict,
args_converter=list_item_args_converter,
selection_mode='multiple',
selection_limit=1000,
allow_empty_selection=True,
cls=ListItemButton)
z_view = letters_dict_adapter.get_view(25)
self.assertEqual(z_view.text, 'Z')
letters_dict_adapter.handle_selection(z_view)
self.assertEqual(len(letters_dict_adapter.selection), 1)
self.assertTrue(z_view.is_selected)
letters_dict_adapter.trim_left_of_sel()
self.assertEqual(len(letters_dict_adapter.data), 1)
self.assertTrue(sorted_keys_ok(letters_dict_adapter))
# trim to sel
letters_dict_adapter = DictAdapter(
sorted_keys=letters[:],
data=letters_dict,
args_converter=list_item_args_converter,
selection_mode='multiple',
selection_limit=1000,
allow_empty_selection=True,
cls=ListItemButton)
g_view = letters_dict_adapter.get_view(6)
self.assertEqual(g_view.text, 'G')
letters_dict_adapter.handle_selection(g_view)
m_view = letters_dict_adapter.get_view(12)
self.assertEqual(m_view.text, 'M')
letters_dict_adapter.handle_selection(m_view)
letters_dict_adapter.trim_to_sel()
self.assertEqual(len(letters_dict_adapter.data), 7)
self.assertTrue(sorted_keys_ok(letters_dict_adapter))
# cut to sel
letters_dict_adapter = DictAdapter(
sorted_keys=letters[:],
data=letters_dict,
args_converter=list_item_args_converter,
selection_mode='multiple',
selection_limit=1000,
allow_empty_selection=True,
cls=ListItemButton)
g_view = letters_dict_adapter.get_view(6)
self.assertEqual(g_view.text, 'G')
letters_dict_adapter.handle_selection(g_view)
m_view = letters_dict_adapter.get_view(12)
self.assertEqual(m_view.text, 'M')
letters_dict_adapter.handle_selection(m_view)
letters_dict_adapter.cut_to_sel()
self.assertEqual(len(letters_dict_adapter.data), 2)
self.assertTrue(sorted_keys_ok(letters_dict_adapter))
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class aaagroup(base_resource) :
""" Configuration for AAA group resource. """
def __init__(self) :
self._groupname = ""
self._loggedin = False
self.___count = 0
@property
def groupname(self) :
"""Name for the group. Must begin with a letter, number, or the underscore character (_), and must consist only of letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore characters. Cannot be changed after the group is added.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or
single quotation marks (for example, "my aaa group" or 'my aaa
group).<br/>Minimum length = 1.
"""
try :
return self._groupname
except Exception as e:
raise e
@groupname.setter
def groupname(self, groupname) :
"""Name for the group. Must begin with a letter, number, or the underscore character (_), and must consist only of letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore characters. Cannot be changed after the group is added.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or
single quotation marks (for example, "my aaa group" or 'my aaa
group).<br/>Minimum length = 1
"""
try :
self._groupname = groupname
except Exception as e:
raise e
@property
def loggedin(self) :
"""Display only the group members who are currently logged in.
"""
try :
return self._loggedin
except Exception as e:
raise e
@loggedin.setter
def loggedin(self, loggedin) :
"""Display only the group members who are currently logged in.
"""
try :
self._loggedin = loggedin
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(aaagroup_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.aaagroup
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.groupname) :
return str(self.groupname)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add aaagroup.
"""
try :
if type(resource) is not list :
addresource = aaagroup()
addresource.groupname = resource.groupname
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ aaagroup() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].groupname = resource[i].groupname
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete aaagroup.
"""
try :
if type(resource) is not list :
deleteresource = aaagroup()
if type(resource) != type(deleteresource):
deleteresource.groupname = resource
else :
deleteresource.groupname = resource.groupname
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ aaagroup() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].groupname = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ aaagroup() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].groupname = resource[i].groupname
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the aaagroup resources that are configured on netscaler.
"""
try :
if not name :
obj = aaagroup()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = aaagroup()
obj.groupname = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [aaagroup() for _ in range(len(name))]
obj = [aaagroup() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = aaagroup()
obj[i].groupname = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_args(cls, client, args) :
""" Use this API to fetch all the aaagroup resources that are configured on netscaler.
# This uses aaagroup_args which is a way to provide additional arguments while fetching the resources.
"""
try :
obj = aaagroup()
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(args)
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of aaagroup resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = aaagroup()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the aaagroup resources configured on NetScaler.
"""
try :
obj = aaagroup()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of aaagroup resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = aaagroup()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class aaagroup_response(base_response) :
def __init__(self, length=1) :
self.aaagroup = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.aaagroup = [aaagroup() for _ in range(length)]
|
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
from flask import session
from flask_split import ab_test, finished
from flask_split.core import _get_session
from flask_split.models import Alternative, Experiment
from flexmock import flexmock
from pytest import raises
from redis import ConnectionError, Redis
from . import TestCase
class TestExtension(TestCase):
def test_provides_defaults_for_settings(self):
assert self.app.config['SPLIT_IGNORE_IP_ADDRESSES'] == []
assert self.app.config['SPLIT_ROBOT_REGEX'].strip() == r"""
(?i)\b(
Baidu|
Gigabot|
Googlebot|
libwww-perl|
lwp-trivial|
msnbot|
SiteUptime|
Slurp|
WordPress|
ZIBB|
ZyBorg
)\b
""".strip()
assert self.app.config['SPLIT_DB_FAILOVER'] is False
assert self.app.config['SPLIT_ALLOW_MULTIPLE_EXPERIMENTS'] is False
def test_ab_test_assigns_random_alternative_to_a_new_user(self):
ab_test('link_color', 'blue', 'red')
assert _get_session()['link_color'] in ['red', 'blue']
def test_ab_test_increments_participation_counter_for_new_user(self):
Experiment.find_or_create(self.redis, 'link_color', 'blue', 'red')
red = Alternative(self.redis, 'red', 'link_color')
blue = Alternative(self.redis, 'blue', 'link_color')
previous_red_count = red.participant_count
previous_blue_count = blue.participant_count
ab_test('link_color', 'blue', 'red')
new_red_count = red.participant_count
new_blue_count = blue.participant_count
assert (new_red_count + new_blue_count ==
previous_red_count + previous_blue_count + 1)
def test_ab_test_returns_the_given_alternative_for_an_existing_user(self):
Experiment.find_or_create(self.redis, 'link_color', 'blue', 'red')
alternative = ab_test('link_color', 'blue', 'red')
repeat_alternative = ab_test('link_color', 'blue', 'red')
assert alternative == repeat_alternative
def test_ab_test_always_returns_the_winner_if_one_is_present(self):
experiment = Experiment.find_or_create(
self.redis, 'link_color', 'blue', 'red')
experiment.winner = "orange"
assert ab_test('link_color', 'blue', 'red') == 'orange'
def test_ab_test_allows_the_share_of_visitors_see_an_alternative(self):
ab_test('link_color', ('blue', 0.8), ('red', 20))
assert _get_session()['link_color'] in ['red', 'blue']
def test_ab_test_only_lets_user_participate_in_one_experiment(self):
ab_test('link_color', 'blue', 'red')
ab_test('button_size', 'small', 'big')
assert _get_session()['button_size'] == 'small'
big = Alternative(self.redis, 'big', 'button_size')
assert big.participant_count == 0
small = Alternative(self.redis, 'small', 'button_size')
assert small.participant_count == 0
def test_can_participate_in_many_experiments_with_allow_multiple_experiments(self):
self.app.config['SPLIT_ALLOW_MULTIPLE_EXPERIMENTS'] = True
link_color = ab_test('link_color', 'blue', 'red')
button_size = ab_test('button_size', 'small', 'big')
assert _get_session()['button_size'] == button_size
button_size_alt = Alternative(self.redis, button_size, 'button_size')
assert button_size_alt.participant_count == 1
def test_finished_increments_completed_alternative_counter(self):
Experiment.find_or_create(self.redis, 'link_color', 'blue', 'red')
alternative_name = ab_test('link_color', 'blue', 'red')
alternative = Alternative(self.redis, alternative_name, 'link_color')
previous_completion_count = alternative.completed_count
finished('link_color')
new_completion_count = alternative.completed_count
assert new_completion_count == previous_completion_count + 1
def test_finished_clears_out_users_participation_from_their_session(self):
Experiment.find_or_create(self.redis, 'link_color', 'blue', 'red')
alternative_name = ab_test('link_color', 'blue', 'red')
assert session['split'] == {"link_color": alternative_name}
finished('link_color')
assert session['split'] == {}
def test_finished_clears_test_session_when_version_is_greater_than_0(self):
experiment = Experiment.find_or_create(
self.redis, 'link_color', 'blue', 'red')
experiment.increment_version()
alternative_name = ab_test('link_color', 'blue', 'red')
assert session['split'] == {"link_color:1": alternative_name}
finished('link_color')
assert session['split'] == {}
def test_finished_dont_clear_out_the_users_session_if_reset_is_false(self):
Experiment.find_or_create(self.redis, 'link_color', 'blue', 'red')
alternative_name = ab_test('link_color', 'blue', 'red')
assert session['split'] == {"link_color": alternative_name}
finished('link_color', reset=False)
assert session['split'] == {
"link_color": alternative_name,
}
assert session['split_finished'] == set(['link_color'])
def test_finished_does_nothing_if_experiment_was_not_started_by_the_user(self):
session['split'] = None
finished('some_experiment_not_started_by_the_user')
def test_finished_dont_incr_completed_twice_if_no_reset(self):
Experiment.find_or_create(self.redis, 'link_color', 'blue', 'red')
alternative_name = ab_test('link_color', 'blue', 'red')
finished('link_color', reset=False)
finished('link_color', reset=False)
alternative = Alternative(self.redis, alternative_name, 'link_color')
completion_count = alternative.completed_count
assert completion_count == 1
def test_finished_dont_incr_completed_twice_if_ver_gt_0_and_no_reset(self):
experiment = Experiment.find_or_create(
self.redis, 'link_color', 'blue', 'red')
experiment.increment_version()
alternative_name = ab_test('link_color', 'blue', 'red')
finished('link_color', reset=False)
alternative_name = ab_test('link_color', 'blue', 'red')
finished('link_color', reset=False)
alternative = Alternative(self.redis, alternative_name, 'link_color')
completion_count = alternative.completed_count
assert completion_count == 1
def test_conversions_return_conversion_rates_for_alternatives(self):
Experiment.find_or_create(self.redis, 'link_color', 'blue', 'red')
alternative_name = ab_test('link_color', 'blue', 'red')
alternative = Alternative(self.redis, alternative_name, 'link_color')
assert alternative.conversion_rate == 0.0
finished('link_color')
assert alternative.conversion_rate == 1.0
class TestExtensionWhenUserIsARobot(TestCase):
def make_test_request_context(self):
return self.app.test_request_context(
headers={
'User-Agent': 'Googlebot/2.1 (+http://www.google.com/bot.html)'
}
)
def test_ab_test_return_the_control(self):
experiment = Experiment.find_or_create(
self.redis, 'link_color', 'blue', 'red')
alternative = ab_test('link_color', 'blue', 'red')
assert alternative == experiment.control.name
def test_ab_test_does_not_increment_the_participation_count(self):
Experiment.find_or_create(self.redis, 'link_color', 'blue', 'red')
red = Alternative(self.redis, 'red', 'link_color')
blue = Alternative(self.redis, 'blue', 'link_color')
previous_red_count = red.participant_count
previous_blue_count = blue.participant_count
ab_test('link_color', 'blue', 'red')
new_red_count = red.participant_count
new_blue_count = blue.participant_count
assert (new_red_count + new_blue_count ==
previous_red_count + previous_blue_count)
def test_finished_does_not_increment_the_completed_count(self):
Experiment.find_or_create(self.redis, 'link_color', 'blue', 'red')
alternative_name = ab_test('link_color', 'blue', 'red')
alternative = Alternative(self.redis, alternative_name, 'link_color')
previous_completion_count = alternative.completed_count
finished('link_color')
new_completion_count = alternative.completed_count
assert new_completion_count == previous_completion_count
class TestExtensionWhenIPAddressIsIgnored(TestCase):
def setup_method(self, method):
super(TestExtensionWhenIPAddressIsIgnored, self).setup_method(method)
self.app.config['SPLIT_IGNORE_IP_ADDRESSES'] = ['81.19.48.130']
def make_test_request_context(self):
return self.app.test_request_context(environ_overrides={
'REMOTE_ADDR': '81.19.48.130'
})
def test_ab_test_return_the_control(self):
experiment = Experiment.find_or_create(
self.redis, 'link_color', 'blue', 'red')
alternative = ab_test('link_color', 'blue', 'red')
assert alternative == experiment.control.name
def test_ab_test_does_not_increment_the_participation_count(self):
Experiment.find_or_create(self.redis, 'link_color', 'blue', 'red')
red = Alternative(self.redis, 'red', 'link_color')
blue = Alternative(self.redis, 'blue', 'link_color')
previous_red_count = red.participant_count
previous_blue_count = blue.participant_count
ab_test('link_color', 'blue', 'red')
new_red_count = red.participant_count
new_blue_count = blue.participant_count
assert (new_red_count + new_blue_count ==
previous_red_count + previous_blue_count)
def test_finished_does_not_increment_the_completed_count(self):
Experiment.find_or_create(self.redis, 'link_color', 'blue', 'red')
alternative_name = ab_test('link_color', 'blue', 'red')
alternative = Alternative(self.redis, alternative_name, 'link_color')
previous_completion_count = alternative.completed_count
finished('link_color')
new_completion_count = alternative.completed_count
assert new_completion_count == previous_completion_count
class TestVersionedExperiments(TestCase):
def test_uses_version_zero_if_no_version_is_present(self):
experiment = Experiment.find_or_create(
self.redis, 'link_color', 'blue', 'red')
alternative_name = ab_test('link_color', 'blue', 'red')
assert experiment.version == 0
assert session['split'] == {'link_color': alternative_name}
def test_saves_the_version_of_the_experiment_to_the_session(self):
experiment = Experiment.find_or_create(
self.redis, 'link_color', 'blue', 'red')
experiment.reset()
assert experiment.version == 1
alternative_name = ab_test('link_color', 'blue', 'red')
assert session['split'] == {'link_color:1': alternative_name}
def test_loads_the_experiment_even_if_the_version_is_not_0(self):
experiment = Experiment.find_or_create(
self.redis, 'link_color', 'blue', 'red')
experiment.reset()
assert experiment.version == 1
alternative_name = ab_test('link_color', 'blue', 'red')
assert session['split'] == {'link_color:1': alternative_name}
return_alternative_name = ab_test('link_color', 'blue', 'red')
assert return_alternative_name == alternative_name
def test_resets_users_session_on_an_older_version_of_the_experiment(self):
experiment = Experiment.find_or_create(
self.redis, 'link_color', 'blue', 'red')
alternative_name = ab_test('link_color', 'blue', 'red')
assert session['split'] == {'link_color': alternative_name}
alternative = Alternative(self.redis, alternative_name, 'link_color')
assert alternative.participant_count == 1
experiment.reset()
assert experiment.version == 1
alternative = Alternative(self.redis, alternative_name, 'link_color')
assert alternative.participant_count == 0
new_alternative_name = ab_test('link_color', 'blue', 'red')
assert session['split']['link_color:1'] == new_alternative_name
new_alternative = Alternative(
self.redis, new_alternative_name, 'link_color')
assert new_alternative.participant_count == 1
def test_cleans_up_old_versions_of_experiments_from_the_session(self):
experiment = Experiment.find_or_create(
self.redis, 'link_color', 'blue', 'red')
alternative_name = ab_test('link_color', 'blue', 'red')
assert session['split'] == {'link_color': alternative_name}
alternative = Alternative(self.redis, alternative_name, 'link_color')
assert alternative.participant_count == 1
experiment.reset()
assert experiment.version == 1
alternative = Alternative(self.redis, alternative_name, 'link_color')
assert alternative.participant_count == 0
new_alternative_name = ab_test('link_color', 'blue', 'red')
assert session['split'] == {'link_color:1': new_alternative_name}
def test_only_counts_completion_of_users_on_the_current_version(self):
experiment = Experiment.find_or_create(
self.redis, 'link_color', 'blue', 'red')
alternative_name = ab_test('link_color', 'blue', 'red')
assert session['split'] == {'link_color': alternative_name}
alternative = Alternative(self.redis, alternative_name, 'link_color')
experiment.reset()
assert experiment.version == 1
finished('link_color')
alternative = Alternative(self.redis, alternative_name, 'link_color')
assert alternative.completed_count == 0
class TestExtensionWhenRedisNotAvailable(TestCase):
def test_ab_test_raises_an_exception_without_db_failover(self):
self.app.config['SPLIT_DB_FAILOVER'] = False
(flexmock(Redis)
.should_receive('execute_command')
.and_raise(ConnectionError))
with raises(ConnectionError):
ab_test('link_color', 'blue', 'red')
def test_finished_raises_an_exception_without_db_failover(self):
self.app.config['SPLIT_DB_FAILOVER'] = False
(flexmock(Redis)
.should_receive('execute_command')
.and_raise(ConnectionError))
with raises(ConnectionError):
finished('link_color')
def test_ab_test_does_not_raise_an_exception_with_db_failover(self):
self.app.config['SPLIT_DB_FAILOVER'] = True
(flexmock(Redis)
.should_receive('execute_command')
.and_raise(ConnectionError))
ab_test('link_color', 'blue', 'red')
def test_ab_test_always_uses_first_alternative_with_db_failover(self):
self.app.config['SPLIT_DB_FAILOVER'] = True
(flexmock(Redis)
.should_receive('execute_command')
.and_raise(ConnectionError))
assert ab_test('link_color', 'blue', 'red') == 'blue'
assert ab_test('link_color', ('blue', 0.01), ('red', 0.2)) == 'blue'
assert ab_test('link_color', ('blue', 0.8), ('red', 20)) == 'blue'
def test_finished_does_not_raise_an_exception_with_db_failover(self):
self.app.config['SPLIT_DB_FAILOVER'] = True
(flexmock(Redis)
.should_receive('execute_command')
.and_raise(ConnectionError))
finished('link_color')
|
|
# -*- coding: utf-8 -*-
import os
import time
from mlpatches import base
_stash = base._stash
def _get_status(exitcode, killer=0):
"""
calculates the exit status for a command.
see the documentation of os.wait for info about this.
"""
return (exitcode * 256) + killer
class VoidIO(object):
"""no-op I/O"""
def __init__(self):
pass
def write(self, *args):
pass
def writelines(self, *args):
pass
def read(self, *args):
return ""
def readline(self, *args):
return ""
def readlines(self, *args):
return []
def close(self):
pass
def flush(self):
pass
class _PipeEndpoint(object):
"""this class represents a pipe endpoint."""
def __init__(self, root, pipe):
self.__root = root
self.__pipe = pipe
def __getattr__(self, name):
"""return attribute name of the pipe."""
return getattr(self.__pipe, name)
def __hasattr__(self, name):
"""checks wether the pioe has a attribute called name."""
return hasattr(self.__pipe, name)
def __repr__(self):
"""returns the representation of the pipe."""
return repr(self.__pipe)
def __del__(self):
"""called on deletion."""
self.close()
def close(self):
"""closes the pipe."""
try:
os.close(self.__pipe.fileno())
except (OSError, IOError):
pass
ec = self.__root.get_exit_code(wait=True)
if ec / 256 == 0:
return None # see os.popen
else:
return ec
class _PopenCmd(object):
"""This class handles the command processing."""
# TODO: replace state mechanics with single bool and threading.Lock()
STATE_INIT = "INIT"
STATE_RUNNING = "RUNNING"
STATE_FINISHED = "FINISHED"
def __init__(self, cmd, mode, bufsize, shared_eo=False):
self.cmd = cmd
self.mode = mode
self.bufsize = bufsize
self.fds = []
self.worker = None
self.state = self.STATE_INIT
self.shared_eo = shared_eo
self.chinr, self.chinw = self.create_pipe(wbuf=bufsize)
self.choutr, self.choutw = self.create_pipe(rbuf=bufsize)
if shared_eo:
self.cherrr, self.cherrw = self.choutr, self.choutw
else:
self.cherrr, self.cherrw = self.create_pipe(rbuf=bufsize)
def get_pipes(self):
"""returns the pipes."""
if not self.shared_eo:
return (_PipeEndpoint(self, self.chinw), _PipeEndpoint(self, self.choutr), _PipeEndpoint(self, self.cherrr))
else:
return (_PipeEndpoint(self, self.chinw), _PipeEndpoint(self, self.choutr))
def close_fds(self):
"""close all fds."""
for fd in self.fds:
try:
os.close(fd)
except os.OSError:
pass
def create_pipe(self, rbuf=0, wbuf=0):
"""creates a pipe. returns (readpipe, writepipe)"""
rfd, wfd = os.pipe()
self.fds += [rfd, wfd]
rf, wf = os.fdopen(rfd, "rb", rbuf), os.fdopen(wfd, "wb", wbuf)
return rf, wf
def run(self):
"""runs the command."""
self.state = self.STATE_RUNNING
self.worker = _stash.runtime.run(
input_=self.cmd,
persistent_level=2,
is_background=False,
add_to_history=False,
final_ins=self.chinr,
final_outs=self.choutw,
final_errs=self.cherrw
)
if not self.worker.is_alive():
# sometimes stash is faster than the return
self.state = self.STATE_FINISHED
def get_exit_code(self, wait=True):
"""returns the exitcode.
If wait is False and the worker has not finishef yet, return None."""
if self.state != self.STATE_INIT:
if self.worker is None:
# temp fix for pipes for fast commands
if not wait:
return 0
while self.worker is None:
time.sleep(0.01)
if wait and self.worker.is_alive():
self.worker.join()
self.state = self.STATE_FINISHED
elif self.worker.status() != self.worker.STOPPED:
return None
es = self.worker.state.return_value
return _get_status(es, self.worker.killer)
raise RuntimeError("get_exit_code() called before run()!")
def popen(patch, cmd, mode="r", bufsize=0):
"""Open a pipe to or from command. The return value is an open file object connected to the pipe, which can be read or written depending on whether mode is 'r' (default) or 'w'. The bufsize argument has the same meaning as the corresponding argument to the built-in open() function. The exit status of the command (encoded in the format specified for wait()) is available as the return value of the close() method of the file object, except that when the exit status is zero (termination without errors), None is returned."""
cmd = _PopenCmd(cmd, mode, bufsize, shared_eo=False)
pipes = cmd.get_pipes()
cmd.run()
if mode == "r":
return pipes[1]
elif mode == "w":
return pipes[0]
def popen2(patch, cmd, mode="r", bufsize=0):
"""Execute cmd as a sub-process and return the file objects (child_stdin, child_stdout)."""
cmd = _PopenCmd(cmd, mode, bufsize, shared_eo=False)
pipes = cmd.get_pipes()
cmd.run()
return pipes[0], pipes[1]
def popen3(patch, cmd, mode="r", bufsize=0):
"""Execute cmd as a sub-process and return the file objects (child_stdin, child_stdout, child_stderr)."""
cmd = _PopenCmd(cmd, mode, bufsize, shared_eo=False)
pipes = cmd.get_pipes()
cmd.run()
return pipes[0], pipes[1], pipes[2]
def popen4(patch, cmd, mode="r", bufsize=0):
"""Execute cmd as a sub-process and return the file objects (child_stdin, child_stdout_and_stderr)."""
cmd = _PopenCmd(cmd, mode, bufsize, shared_eo=True)
pipes = cmd.get_pipes()
cmd.run()
return pipes[0], pipes[1]
def system(patch, command):
"""Execute the command (a string) in a subshell. This is implemented by calling the Standard C function system(), and has the same limitations. Changes to sys.stdin, etc. are not reflected in the environment of the executed command.
On Unix, the return value is the exit status of the process encoded in the format specified for wait(). Note that POSIX does not specify the meaning of the return value of the C system() function, so the return value of the Python function is system-dependent.
On Windows, the return value is that returned by the system shell after running command, given by the Windows environment variable COMSPEC: on command.com systems (Windows 95, 98 and ME) this is always 0; on cmd.exe systems (Windows NT, 2000 and XP) this is the exit status of the command run; on systems using a non-native shell, consult your shell documentation.
The subprocess module provides more powerful facilities for spawning new processes and retrieving their results; using that module is preferable to using this function. See the Replacing Older Functions with the subprocess Module section in the subprocess documentation for some helpful recipes."""
io = VoidIO()
worker = _stash.runtime.run(
input_=command,
persistent_level=2,
is_background=False,
add_to_history=False,
final_ins=io,
final_outs=io,
final_errs=io,
)
worker.join() # wait for completion
es = worker.state.return_value
return _get_status(es, worker.killer)
|
|
from __future__ import absolute_import, division, print_function, with_statement
import contextlib
import datetime
import functools
import sys
import textwrap
import time
import platform
import weakref
from tornado.concurrent import return_future, Future
from tornado.escape import url_escape
from tornado.httpclient import AsyncHTTPClient
from tornado.ioloop import IOLoop
from tornado.log import app_log
from tornado import stack_context
from tornado.testing import AsyncHTTPTestCase, AsyncTestCase, ExpectLog, gen_test
from tornado.test.util import unittest, skipOnTravis
from tornado.web import Application, RequestHandler, asynchronous, HTTPError
from tornado import gen
try:
from concurrent import futures
except ImportError:
futures = None
skipBefore33 = unittest.skipIf(sys.version_info < (3, 3), 'PEP 380 not available')
skipNotCPython = unittest.skipIf(platform.python_implementation() != 'CPython',
'Not CPython implementation')
class GenEngineTest(AsyncTestCase):
def setUp(self):
super(GenEngineTest, self).setUp()
self.named_contexts = []
def named_context(self, name):
@contextlib.contextmanager
def context():
self.named_contexts.append(name)
try:
yield
finally:
self.assertEqual(self.named_contexts.pop(), name)
return context
def run_gen(self, f):
f()
return self.wait()
def delay_callback(self, iterations, callback, arg):
"""Runs callback(arg) after a number of IOLoop iterations."""
if iterations == 0:
callback(arg)
else:
self.io_loop.add_callback(functools.partial(
self.delay_callback, iterations - 1, callback, arg))
@return_future
def async_future(self, result, callback):
self.io_loop.add_callback(callback, result)
def test_no_yield(self):
@gen.engine
def f():
self.stop()
self.run_gen(f)
def test_inline_cb(self):
@gen.engine
def f():
(yield gen.Callback("k1"))()
res = yield gen.Wait("k1")
self.assertTrue(res is None)
self.stop()
self.run_gen(f)
def test_ioloop_cb(self):
@gen.engine
def f():
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
self.stop()
self.run_gen(f)
def test_exception_phase1(self):
@gen.engine
def f():
1 / 0
self.assertRaises(ZeroDivisionError, self.run_gen, f)
def test_exception_phase2(self):
@gen.engine
def f():
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
1 / 0
self.assertRaises(ZeroDivisionError, self.run_gen, f)
def test_exception_in_task_phase1(self):
def fail_task(callback):
1 / 0
@gen.engine
def f():
try:
yield gen.Task(fail_task)
raise Exception("did not get expected exception")
except ZeroDivisionError:
self.stop()
self.run_gen(f)
def test_exception_in_task_phase2(self):
# This is the case that requires the use of stack_context in gen.engine
def fail_task(callback):
self.io_loop.add_callback(lambda: 1 / 0)
@gen.engine
def f():
try:
yield gen.Task(fail_task)
raise Exception("did not get expected exception")
except ZeroDivisionError:
self.stop()
self.run_gen(f)
def test_with_arg(self):
@gen.engine
def f():
(yield gen.Callback("k1"))(42)
res = yield gen.Wait("k1")
self.assertEqual(42, res)
self.stop()
self.run_gen(f)
def test_with_arg_tuple(self):
@gen.engine
def f():
(yield gen.Callback((1, 2)))((3, 4))
res = yield gen.Wait((1, 2))
self.assertEqual((3, 4), res)
self.stop()
self.run_gen(f)
def test_key_reuse(self):
@gen.engine
def f():
yield gen.Callback("k1")
yield gen.Callback("k1")
self.stop()
self.assertRaises(gen.KeyReuseError, self.run_gen, f)
def test_key_reuse_tuple(self):
@gen.engine
def f():
yield gen.Callback((1, 2))
yield gen.Callback((1, 2))
self.stop()
self.assertRaises(gen.KeyReuseError, self.run_gen, f)
def test_key_mismatch(self):
@gen.engine
def f():
yield gen.Callback("k1")
yield gen.Wait("k2")
self.stop()
self.assertRaises(gen.UnknownKeyError, self.run_gen, f)
def test_key_mismatch_tuple(self):
@gen.engine
def f():
yield gen.Callback((1, 2))
yield gen.Wait((2, 3))
self.stop()
self.assertRaises(gen.UnknownKeyError, self.run_gen, f)
def test_leaked_callback(self):
@gen.engine
def f():
yield gen.Callback("k1")
self.stop()
self.assertRaises(gen.LeakedCallbackError, self.run_gen, f)
def test_leaked_callback_tuple(self):
@gen.engine
def f():
yield gen.Callback((1, 2))
self.stop()
self.assertRaises(gen.LeakedCallbackError, self.run_gen, f)
def test_parallel_callback(self):
@gen.engine
def f():
for k in range(3):
self.io_loop.add_callback((yield gen.Callback(k)))
yield gen.Wait(1)
self.io_loop.add_callback((yield gen.Callback(3)))
yield gen.Wait(0)
yield gen.Wait(3)
yield gen.Wait(2)
self.stop()
self.run_gen(f)
def test_bogus_yield(self):
@gen.engine
def f():
yield 42
self.assertRaises(gen.BadYieldError, self.run_gen, f)
def test_bogus_yield_tuple(self):
@gen.engine
def f():
yield (1, 2)
self.assertRaises(gen.BadYieldError, self.run_gen, f)
def test_reuse(self):
@gen.engine
def f():
self.io_loop.add_callback((yield gen.Callback(0)))
yield gen.Wait(0)
self.stop()
self.run_gen(f)
self.run_gen(f)
def test_task(self):
@gen.engine
def f():
yield gen.Task(self.io_loop.add_callback)
self.stop()
self.run_gen(f)
def test_wait_all(self):
@gen.engine
def f():
(yield gen.Callback("k1"))("v1")
(yield gen.Callback("k2"))("v2")
results = yield gen.WaitAll(["k1", "k2"])
self.assertEqual(results, ["v1", "v2"])
self.stop()
self.run_gen(f)
def test_exception_in_yield(self):
@gen.engine
def f():
try:
yield gen.Wait("k1")
raise Exception("did not get expected exception")
except gen.UnknownKeyError:
pass
self.stop()
self.run_gen(f)
def test_resume_after_exception_in_yield(self):
@gen.engine
def f():
try:
yield gen.Wait("k1")
raise Exception("did not get expected exception")
except gen.UnknownKeyError:
pass
(yield gen.Callback("k2"))("v2")
self.assertEqual((yield gen.Wait("k2")), "v2")
self.stop()
self.run_gen(f)
def test_orphaned_callback(self):
@gen.engine
def f():
self.orphaned_callback = yield gen.Callback(1)
try:
self.run_gen(f)
raise Exception("did not get expected exception")
except gen.LeakedCallbackError:
pass
self.orphaned_callback()
def test_multi(self):
@gen.engine
def f():
(yield gen.Callback("k1"))("v1")
(yield gen.Callback("k2"))("v2")
results = yield [gen.Wait("k1"), gen.Wait("k2")]
self.assertEqual(results, ["v1", "v2"])
self.stop()
self.run_gen(f)
def test_multi_dict(self):
@gen.engine
def f():
(yield gen.Callback("k1"))("v1")
(yield gen.Callback("k2"))("v2")
results = yield dict(foo=gen.Wait("k1"), bar=gen.Wait("k2"))
self.assertEqual(results, dict(foo="v1", bar="v2"))
self.stop()
self.run_gen(f)
def test_multi_delayed(self):
@gen.engine
def f():
# callbacks run at different times
responses = yield [
gen.Task(self.delay_callback, 3, arg="v1"),
gen.Task(self.delay_callback, 1, arg="v2"),
]
self.assertEqual(responses, ["v1", "v2"])
self.stop()
self.run_gen(f)
def test_multi_dict_delayed(self):
@gen.engine
def f():
# callbacks run at different times
responses = yield dict(
foo=gen.Task(self.delay_callback, 3, arg="v1"),
bar=gen.Task(self.delay_callback, 1, arg="v2"),
)
self.assertEqual(responses, dict(foo="v1", bar="v2"))
self.stop()
self.run_gen(f)
@skipOnTravis
@gen_test
def test_multi_performance(self):
# Yielding a list used to have quadratic performance; make
# sure a large list stays reasonable. On my laptop a list of
# 2000 used to take 1.8s, now it takes 0.12.
start = time.time()
yield [gen.Task(self.io_loop.add_callback) for i in range(2000)]
end = time.time()
self.assertLess(end - start, 1.0)
@gen_test
def test_multi_empty(self):
# Empty lists or dicts should return the same type.
x = yield []
self.assertTrue(isinstance(x, list))
y = yield {}
self.assertTrue(isinstance(y, dict))
@gen_test
def test_future(self):
result = yield self.async_future(1)
self.assertEqual(result, 1)
@gen_test
def test_multi_future(self):
results = yield [self.async_future(1), self.async_future(2)]
self.assertEqual(results, [1, 2])
@gen_test
def test_multi_dict_future(self):
results = yield dict(foo=self.async_future(1), bar=self.async_future(2))
self.assertEqual(results, dict(foo=1, bar=2))
def test_arguments(self):
@gen.engine
def f():
(yield gen.Callback("noargs"))()
self.assertEqual((yield gen.Wait("noargs")), None)
(yield gen.Callback("1arg"))(42)
self.assertEqual((yield gen.Wait("1arg")), 42)
(yield gen.Callback("kwargs"))(value=42)
result = yield gen.Wait("kwargs")
self.assertTrue(isinstance(result, gen.Arguments))
self.assertEqual(((), dict(value=42)), result)
self.assertEqual(dict(value=42), result.kwargs)
(yield gen.Callback("2args"))(42, 43)
result = yield gen.Wait("2args")
self.assertTrue(isinstance(result, gen.Arguments))
self.assertEqual(((42, 43), {}), result)
self.assertEqual((42, 43), result.args)
def task_func(callback):
callback(None, error="foo")
result = yield gen.Task(task_func)
self.assertTrue(isinstance(result, gen.Arguments))
self.assertEqual(((None,), dict(error="foo")), result)
self.stop()
self.run_gen(f)
def test_stack_context_leak(self):
# regression test: repeated invocations of a gen-based
# function should not result in accumulated stack_contexts
def _stack_depth():
head = stack_context._state.contexts[1]
length = 0
while head is not None:
length += 1
head = head.old_contexts[1]
return length
@gen.engine
def inner(callback):
yield gen.Task(self.io_loop.add_callback)
callback()
@gen.engine
def outer():
for i in range(10):
yield gen.Task(inner)
stack_increase = _stack_depth() - initial_stack_depth
self.assertTrue(stack_increase <= 2)
self.stop()
initial_stack_depth = _stack_depth()
self.run_gen(outer)
def test_stack_context_leak_exception(self):
# same as previous, but with a function that exits with an exception
@gen.engine
def inner(callback):
yield gen.Task(self.io_loop.add_callback)
1 / 0
@gen.engine
def outer():
for i in range(10):
try:
yield gen.Task(inner)
except ZeroDivisionError:
pass
stack_increase = len(stack_context._state.contexts) - initial_stack_depth
self.assertTrue(stack_increase <= 2)
self.stop()
initial_stack_depth = len(stack_context._state.contexts)
self.run_gen(outer)
def function_with_stack_context(self, callback):
# Technically this function should stack_context.wrap its callback
# upon entry. However, it is very common for this step to be
# omitted.
def step2():
self.assertEqual(self.named_contexts, ['a'])
self.io_loop.add_callback(callback)
with stack_context.StackContext(self.named_context('a')):
self.io_loop.add_callback(step2)
@gen_test
def test_wait_transfer_stack_context(self):
# Wait should not pick up contexts from where callback was invoked,
# even if that function improperly fails to wrap its callback.
cb = yield gen.Callback('k1')
self.function_with_stack_context(cb)
self.assertEqual(self.named_contexts, [])
yield gen.Wait('k1')
self.assertEqual(self.named_contexts, [])
@gen_test
def test_task_transfer_stack_context(self):
yield gen.Task(self.function_with_stack_context)
self.assertEqual(self.named_contexts, [])
def test_raise_after_stop(self):
# This pattern will be used in the following tests so make sure
# the exception propagates as expected.
@gen.engine
def f():
self.stop()
1 / 0
with self.assertRaises(ZeroDivisionError):
self.run_gen(f)
def test_sync_raise_return(self):
# gen.Return is allowed in @gen.engine, but it may not be used
# to return a value.
@gen.engine
def f():
self.stop(42)
raise gen.Return()
result = self.run_gen(f)
self.assertEqual(result, 42)
def test_async_raise_return(self):
@gen.engine
def f():
yield gen.Task(self.io_loop.add_callback)
self.stop(42)
raise gen.Return()
result = self.run_gen(f)
self.assertEqual(result, 42)
def test_sync_raise_return_value(self):
@gen.engine
def f():
raise gen.Return(42)
with self.assertRaises(gen.ReturnValueIgnoredError):
self.run_gen(f)
def test_sync_raise_return_value_tuple(self):
@gen.engine
def f():
raise gen.Return((1, 2))
with self.assertRaises(gen.ReturnValueIgnoredError):
self.run_gen(f)
def test_async_raise_return_value(self):
@gen.engine
def f():
yield gen.Task(self.io_loop.add_callback)
raise gen.Return(42)
with self.assertRaises(gen.ReturnValueIgnoredError):
self.run_gen(f)
def test_async_raise_return_value_tuple(self):
@gen.engine
def f():
yield gen.Task(self.io_loop.add_callback)
raise gen.Return((1, 2))
with self.assertRaises(gen.ReturnValueIgnoredError):
self.run_gen(f)
def test_return_value(self):
# It is an error to apply @gen.engine to a function that returns
# a value.
@gen.engine
def f():
return 42
with self.assertRaises(gen.ReturnValueIgnoredError):
self.run_gen(f)
def test_return_value_tuple(self):
# It is an error to apply @gen.engine to a function that returns
# a value.
@gen.engine
def f():
return (1, 2)
with self.assertRaises(gen.ReturnValueIgnoredError):
self.run_gen(f)
@skipNotCPython
def test_task_refcounting(self):
# On CPython, tasks and their arguments should be released immediately
# without waiting for garbage collection.
@gen.engine
def f():
class Foo(object):
pass
arg = Foo()
self.arg_ref = weakref.ref(arg)
task = gen.Task(self.io_loop.add_callback, arg=arg)
self.task_ref = weakref.ref(task)
yield task
self.stop()
self.run_gen(f)
self.assertIs(self.arg_ref(), None)
self.assertIs(self.task_ref(), None)
class GenCoroutineTest(AsyncTestCase):
def setUp(self):
# Stray StopIteration exceptions can lead to tests exiting prematurely,
# so we need explicit checks here to make sure the tests run all
# the way through.
self.finished = False
super(GenCoroutineTest, self).setUp()
def tearDown(self):
super(GenCoroutineTest, self).tearDown()
assert self.finished
@gen_test
def test_sync_gen_return(self):
@gen.coroutine
def f():
raise gen.Return(42)
result = yield f()
self.assertEqual(result, 42)
self.finished = True
@gen_test
def test_async_gen_return(self):
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
raise gen.Return(42)
result = yield f()
self.assertEqual(result, 42)
self.finished = True
@gen_test
def test_sync_return(self):
@gen.coroutine
def f():
return 42
result = yield f()
self.assertEqual(result, 42)
self.finished = True
@skipBefore33
@gen_test
def test_async_return(self):
# It is a compile-time error to return a value in a generator
# before Python 3.3, so we must test this with exec.
# Flatten the real global and local namespace into our fake globals:
# it's all global from the perspective of f().
global_namespace = dict(globals(), **locals())
local_namespace = {}
exec(textwrap.dedent("""
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
return 42
"""), global_namespace, local_namespace)
result = yield local_namespace['f']()
self.assertEqual(result, 42)
self.finished = True
@skipBefore33
@gen_test
def test_async_early_return(self):
# A yield statement exists but is not executed, which means
# this function "returns" via an exception. This exception
# doesn't happen before the exception handling is set up.
global_namespace = dict(globals(), **locals())
local_namespace = {}
exec(textwrap.dedent("""
@gen.coroutine
def f():
if True:
return 42
yield gen.Task(self.io_loop.add_callback)
"""), global_namespace, local_namespace)
result = yield local_namespace['f']()
self.assertEqual(result, 42)
self.finished = True
@gen_test
def test_sync_return_no_value(self):
@gen.coroutine
def f():
return
result = yield f()
self.assertEqual(result, None)
self.finished = True
@gen_test
def test_async_return_no_value(self):
# Without a return value we don't need python 3.3.
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
return
result = yield f()
self.assertEqual(result, None)
self.finished = True
@gen_test
def test_sync_raise(self):
@gen.coroutine
def f():
1 / 0
# The exception is raised when the future is yielded
# (or equivalently when its result method is called),
# not when the function itself is called).
future = f()
with self.assertRaises(ZeroDivisionError):
yield future
self.finished = True
@gen_test
def test_async_raise(self):
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
1 / 0
future = f()
with self.assertRaises(ZeroDivisionError):
yield future
self.finished = True
@gen_test
def test_pass_callback(self):
@gen.coroutine
def f():
raise gen.Return(42)
result = yield gen.Task(f)
self.assertEqual(result, 42)
self.finished = True
@gen_test
def test_replace_yieldpoint_exception(self):
# Test exception handling: a coroutine can catch one exception
# raised by a yield point and raise a different one.
@gen.coroutine
def f1():
1 / 0
@gen.coroutine
def f2():
try:
yield f1()
except ZeroDivisionError:
raise KeyError()
future = f2()
with self.assertRaises(KeyError):
yield future
self.finished = True
@gen_test
def test_swallow_yieldpoint_exception(self):
# Test exception handling: a coroutine can catch an exception
# raised by a yield point and not raise a different one.
@gen.coroutine
def f1():
1 / 0
@gen.coroutine
def f2():
try:
yield f1()
except ZeroDivisionError:
raise gen.Return(42)
result = yield f2()
self.assertEqual(result, 42)
self.finished = True
@gen_test
def test_replace_context_exception(self):
# Test exception handling: exceptions thrown into the stack context
# can be caught and replaced.
# Note that this test and the following are for behavior that is
# not really supported any more: coroutines no longer create a
# stack context automatically; but one is created after the first
# yield point.
@gen.coroutine
def f2():
yield gen.Task(self.io_loop.add_callback)
self.io_loop.add_callback(lambda: 1 / 0)
try:
yield gen.Task(self.io_loop.add_timeout,
self.io_loop.time() + 10)
except ZeroDivisionError:
raise KeyError()
future = f2()
with self.assertRaises(KeyError):
yield future
self.finished = True
@gen_test
def test_swallow_context_exception(self):
# Test exception handling: exceptions thrown into the stack context
# can be caught and ignored.
@gen.coroutine
def f2():
yield gen.Task(self.io_loop.add_callback)
self.io_loop.add_callback(lambda: 1 / 0)
try:
yield gen.Task(self.io_loop.add_timeout,
self.io_loop.time() + 10)
except ZeroDivisionError:
raise gen.Return(42)
result = yield f2()
self.assertEqual(result, 42)
self.finished = True
class GenSequenceHandler(RequestHandler):
@asynchronous
@gen.engine
def get(self):
self.io_loop = self.request.connection.stream.io_loop
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
self.write("1")
self.io_loop.add_callback((yield gen.Callback("k2")))
yield gen.Wait("k2")
self.write("2")
# reuse an old key
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
self.finish("3")
class GenCoroutineSequenceHandler(RequestHandler):
@gen.coroutine
def get(self):
self.io_loop = self.request.connection.stream.io_loop
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
self.write("1")
self.io_loop.add_callback((yield gen.Callback("k2")))
yield gen.Wait("k2")
self.write("2")
# reuse an old key
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
self.finish("3")
class GenCoroutineUnfinishedSequenceHandler(RequestHandler):
@asynchronous
@gen.coroutine
def get(self):
self.io_loop = self.request.connection.stream.io_loop
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
self.write("1")
self.io_loop.add_callback((yield gen.Callback("k2")))
yield gen.Wait("k2")
self.write("2")
# reuse an old key
self.io_loop.add_callback((yield gen.Callback("k1")))
yield gen.Wait("k1")
# just write, don't finish
self.write("3")
class GenTaskHandler(RequestHandler):
@asynchronous
@gen.engine
def get(self):
io_loop = self.request.connection.stream.io_loop
client = AsyncHTTPClient(io_loop=io_loop)
response = yield gen.Task(client.fetch, self.get_argument('url'))
response.rethrow()
self.finish(b"got response: " + response.body)
class GenExceptionHandler(RequestHandler):
@asynchronous
@gen.engine
def get(self):
# This test depends on the order of the two decorators.
io_loop = self.request.connection.stream.io_loop
yield gen.Task(io_loop.add_callback)
raise Exception("oops")
class GenCoroutineExceptionHandler(RequestHandler):
@gen.coroutine
def get(self):
# This test depends on the order of the two decorators.
io_loop = self.request.connection.stream.io_loop
yield gen.Task(io_loop.add_callback)
raise Exception("oops")
class GenYieldExceptionHandler(RequestHandler):
@asynchronous
@gen.engine
def get(self):
io_loop = self.request.connection.stream.io_loop
# Test the interaction of the two stack_contexts.
def fail_task(callback):
io_loop.add_callback(lambda: 1 / 0)
try:
yield gen.Task(fail_task)
raise Exception("did not get expected exception")
except ZeroDivisionError:
self.finish('ok')
class UndecoratedCoroutinesHandler(RequestHandler):
@gen.coroutine
def prepare(self):
self.chunks = []
yield gen.Task(IOLoop.current().add_callback)
self.chunks.append('1')
@gen.coroutine
def get(self):
self.chunks.append('2')
yield gen.Task(IOLoop.current().add_callback)
self.chunks.append('3')
yield gen.Task(IOLoop.current().add_callback)
self.write(''.join(self.chunks))
class AsyncPrepareErrorHandler(RequestHandler):
@gen.coroutine
def prepare(self):
yield gen.Task(IOLoop.current().add_callback)
raise HTTPError(403)
def get(self):
self.finish('ok')
class GenWebTest(AsyncHTTPTestCase):
def get_app(self):
return Application([
('/sequence', GenSequenceHandler),
('/coroutine_sequence', GenCoroutineSequenceHandler),
('/coroutine_unfinished_sequence',
GenCoroutineUnfinishedSequenceHandler),
('/task', GenTaskHandler),
('/exception', GenExceptionHandler),
('/coroutine_exception', GenCoroutineExceptionHandler),
('/yield_exception', GenYieldExceptionHandler),
('/undecorated_coroutine', UndecoratedCoroutinesHandler),
('/async_prepare_error', AsyncPrepareErrorHandler),
])
def test_sequence_handler(self):
response = self.fetch('/sequence')
self.assertEqual(response.body, b"123")
def test_coroutine_sequence_handler(self):
response = self.fetch('/coroutine_sequence')
self.assertEqual(response.body, b"123")
def test_coroutine_unfinished_sequence_handler(self):
response = self.fetch('/coroutine_unfinished_sequence')
self.assertEqual(response.body, b"123")
def test_task_handler(self):
response = self.fetch('/task?url=%s' % url_escape(self.get_url('/sequence')))
self.assertEqual(response.body, b"got response: 123")
def test_exception_handler(self):
# Make sure we get an error and not a timeout
with ExpectLog(app_log, "Uncaught exception GET /exception"):
response = self.fetch('/exception')
self.assertEqual(500, response.code)
def test_coroutine_exception_handler(self):
# Make sure we get an error and not a timeout
with ExpectLog(app_log, "Uncaught exception GET /coroutine_exception"):
response = self.fetch('/coroutine_exception')
self.assertEqual(500, response.code)
def test_yield_exception_handler(self):
response = self.fetch('/yield_exception')
self.assertEqual(response.body, b'ok')
def test_undecorated_coroutines(self):
response = self.fetch('/undecorated_coroutine')
self.assertEqual(response.body, b'123')
def test_async_prepare_error_handler(self):
response = self.fetch('/async_prepare_error')
self.assertEqual(response.code, 403)
class WithTimeoutTest(AsyncTestCase):
@gen_test
def test_timeout(self):
with self.assertRaises(gen.TimeoutError):
yield gen.with_timeout(datetime.timedelta(seconds=0.1),
Future())
@gen_test
def test_completes_before_timeout(self):
future = Future()
self.io_loop.add_timeout(datetime.timedelta(seconds=0.1),
lambda: future.set_result('asdf'))
result = yield gen.with_timeout(datetime.timedelta(seconds=3600),
future)
self.assertEqual(result, 'asdf')
@gen_test
def test_fails_before_timeout(self):
future = Future()
self.io_loop.add_timeout(
datetime.timedelta(seconds=0.1),
lambda: future.set_exception(ZeroDivisionError))
with self.assertRaises(ZeroDivisionError):
yield gen.with_timeout(datetime.timedelta(seconds=3600), future)
@gen_test
def test_already_resolved(self):
future = Future()
future.set_result('asdf')
result = yield gen.with_timeout(datetime.timedelta(seconds=3600),
future)
self.assertEqual(result, 'asdf')
@unittest.skipIf(futures is None, 'futures module not present')
@gen_test
def test_timeout_concurrent_future(self):
with futures.ThreadPoolExecutor(1) as executor:
with self.assertRaises(gen.TimeoutError):
yield gen.with_timeout(self.io_loop.time(),
executor.submit(time.sleep, 0.1))
@unittest.skipIf(futures is None, 'futures module not present')
@gen_test
def test_completed_concurrent_future(self):
with futures.ThreadPoolExecutor(1) as executor:
yield gen.with_timeout(datetime.timedelta(seconds=3600),
executor.submit(lambda: None))
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding:utf-8 -*-
# Created by Hans-Thomas on 2011-05-28.
#=============================================================================
# db.py --- Tests database
#=============================================================================
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import functools
import hashlib
import inspect
import os
import re
import shutil
import sqlite3
from contextlib import contextmanager
import six
from six.moves import zip
from .status import Status, ok
try:
import pytz
except ImportError:
have_pytz = False
else:
have_pytz = True
class DatabaseError(Exception):
pass
class DoesNotExist(DatabaseError):
pass
class RunDoesNotExist(DoesNotExist):
pass
class ResultDoesNotExist(DoesNotExist):
pass
class TestDoesNotExist(DoesNotExist):
pass
def with_cursor(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
cursor = kwargs.pop('cursor', None)
if cursor is None:
with self.transaction() as cursor:
return method(self, cursor, *args, **kwargs)
else:
return method(self, cursor, *args, **kwargs)
return wrapper
class Database(object):
_TABLES = dict(
run='''run(
id INTEGER PRIMARY KEY AUTOINCREMENT,
started TIMESTAMP,
finished TIMESTAMP,
wasSuccessful BOOLEAN,
testsRun INTEGER,
full BOOLEAN,
errors INTEGER,
failures INTEGER,
skipped INTEGER,
expectedFailures INTEGER,
unexpectedSuccesses INTEGER
)''',
test='''test(
name VARCHAR PRIMARY KEY,
test VARCHAR,
suite VARCHAR,
hash VARCHAR,
runs INTEGER,
average_time TIMEDELTA
)''',
result='''result(
name VARCHAR REFERENCES test(name),
started TIMESTAMP,
finished TIMESTAMP,
run_id INTEGER REFERENCES run(id),
status VARCHAR
)''',
version='''version(
id VARCHAR PRIMARY KEY
)''',
)
name_re = re.compile(r'(?P<test>[^\s]+)\s+\((?P<suite>[^)]+)\)')
version = '1'
def __init__(self, path=None, basedir=None, name='.autocheck.db'):
if path is None:
basedir = os.getcwd() if basedir is None else basedir
self.path = os.path.join(basedir, name)
else:
self.path = path
self.connection = None
self.current_run_id = None
def _connect(self):
self.connection = sqlite3.connect(
self.path,
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES,
)
self.connection.row_factory = sqlite3.Row
def connect(self):
self._connect()
current_version = self.current_version()
if current_version != self.version:
self.migrate(current_version)
def close(self):
self.connection.close()
self.connection = None
@property
def is_connected(self):
return self.connection is not None
def ensure_connection(self):
if not self.is_connected:
self.connect()
self.setup()
@contextmanager
def transaction(self):
self.ensure_connection()
try:
yield self.connection.cursor()
except:
self.connection.rollback()
raise
else:
self.connection.commit()
@with_cursor
def current_version(self, cursor):
self.create_table('version', cursor=cursor)
try:
cursor.execute('SELECT id FROM version ORDER BY id DESC LIMIT 1')
except sqlite3.OperationalError:
return '0'
row = cursor.fetchone()
if row is None:
cursor.execute(
'INSERT INTO version(id) VALUES (?)', [self.version])
return self.version
return row[0]
def migrate(self, current_version):
print('migrating {} -> {} ...'.format(
current_version, self.version), end='')
self.close()
new_path = '{}.bak-{}-{}'.format(self.path,
current_version, self.version)
os.rename(self.path, new_path)
shutil.copy2(new_path, self.path)
self._connect()
if current_version == '0':
self.migrate_0_1()
print('done.')
@with_cursor
def migrate_0_1(self, cursor):
self.create_table('version', cursor=cursor)
cursor.execute('INSERT INTO version(id) VALUES (?)', [self.version])
cursor.execute('ALTER TABLE test ADD COLUMN test VARCHAR')
cursor.execute('ALTER TABLE test ADD COLUMN suite VARCHAR')
cursor.execute('SELECT name FROM test')
for row in cursor.fetchall():
name = row[0]
data = self.name_re.match(name).groupdict()
data['name'] = name
cursor.execute(
'UPDATE test SET test=:test, suite=:suite WHERE name=:name',
data)
@with_cursor
def setup(self, cursor):
cursor.execute('PRAGMA foreign_keys = ON')
for name in self._TABLES:
self.create_table(name, cursor=cursor)
@with_cursor
def create_table(self, cursor, name):
cursor.execute('CREATE TABLE IF NOT EXISTS %s' % self._TABLES[name])
@with_cursor
def add_run(self, cursor):
cursor.execute('INSERT INTO run(started) VALUES (?)', [self.utcnow()])
self.current_run_id = cursor.lastrowid
@property
def total_runs(self):
with self.transaction() as cursor:
cursor.execute('SELECT count(*) FROM run')
count = cursor.fetchone()[0]
return count
@with_cursor
def get_run(self, cursor, run_id=None):
if run_id is None:
run_id = self.current_run_id
if run_id is None:
raise RunDoesNotExist()
cursor.execute('SELECT * FROM run WHERE id = ?', [run_id])
run = cursor.fetchone()
if run is None:
raise RunDoesNotExist(run_id)
return Row(run)
@with_cursor
def get_test(self, cursor, name):
cursor.execute('SELECT * FROM test WHERE name=?', (name,))
test = cursor.fetchone()
if test is None:
raise TestDoesNotExist(name)
return Row(test)
@with_cursor
def get_or_create_test(self, cursor, test_object):
name = str(test_object)
data = self.name_re.match(name).groupdict()
try:
test = self.get_test(name, cursor=cursor)
except TestDoesNotExist:
cursor.execute(
'INSERT INTO test(name,test,suite,hash,runs,average_time) '
'VALUES (?,?,?,?,?,?)',
(name, data['test'], data['suite'],
source_hash(test_object), 0, 0))
test = self.get_test(name, cursor=cursor)
return test
@with_cursor
def add_result(self, cursor, test_object, started, finished, status):
test = self.get_or_create_test(test_object, cursor=cursor)
cursor.execute(
'INSERT INTO result(run_id,name,started,finished,status) '
'VALUES (?,?,?,?,?)',
(self.current_run_id, test['name'], started, finished, status))
runs = test['runs'] + 1
average_time = test['average_time']
if status == ok.key:
if average_time is None:
average_time = finished - started
else:
total_time = average_time * (runs - 1) + (finished - started)
average_time = total_time / runs
cursor.execute(
'UPDATE test SET runs=?,hash=?,average_time=? WHERE name=?',
(runs, source_hash(test_object), average_time, str(test_object)))
@with_cursor
def add_results(self, cursor, results):
for test_object, started, finished, status in results:
self.add_result(test_object, self.to_utc(
started), self.to_utc(finished), status, cursor=cursor)
@with_cursor
def get_last_result(self, cursor, name):
cursor.execute(
'SELECT * FROM result WHERE name=? ORDER BY finished DESC LIMIT 1',
(name,))
result = cursor.fetchone()
if result is None:
raise ResultDoesNotExist(name)
return Row(result)
@with_cursor
def total_runs_by_test_name(self, cursor, name):
return self.get_test(name, cursor=cursor)['runs']
@with_cursor
def get_result_count(self, cursor, run_id, status=None):
if status is None:
cursor.execute(
'SELECT count(*) FROM result WHERE run_id=?', [run_id])
else:
cursor.execute(
'SELECT count(*) FROM result WHERE status=? AND run_id=?',
(status, run_id))
return cursor.fetchone()[0]
@with_cursor
def get_result_counts(self, cursor, run_id):
for status in Status.ordered:
if status.name != ok.name:
result_count = self.get_result_count(
run_id, status.key, cursor=cursor)
yield status.name_plural, result_count
@with_cursor
def finish_run(self, cursor, full):
run_id = self.current_run_id
data = dict(self.get_result_counts(run_id, cursor=cursor))
data.update(
run_id=run_id,
finished=self.utcnow(),
wasSuccessful=data['errors'] == data['failures'] == 0,
testsRun=self.get_result_count(run_id, cursor=cursor),
full=full,
)
cursor.execute('''UPDATE run SET
finished=:finished,
wasSuccessful=:wasSuccessful,
testsRun=:testsRun,
full=:full,
errors=:errors,
failures=:failures,
skipped=:skipped,
expectedFailures=:expectedFailures,
unexpectedSuccesses=:unexpectedSuccesses WHERE id=:run_id''', data)
run = self.get_run(run_id, cursor=cursor)
self.clean_history(cursor=cursor)
return run
@with_cursor
def get_last_run_id(self, cursor, where=''):
cursor.execute(
'SELECT id FROM run %s ORDER BY finished DESC LIMIT 1' % where)
run_id = cursor.fetchone()
if run_id is not None:
return run_id[0]
@with_cursor
def get_last_successful_run_id(self, cursor):
return self.get_last_run_id('WHERE wasSuccessful=1', cursor=cursor)
@with_cursor
def get_last_successful_full_run_id(self, cursor):
return self.get_last_run_id(
'WHERE wasSuccessful=1 AND full=1', cursor=cursor)
@with_cursor
def get_last_run_ids(self, cursor):
return (
self.get_last_run_id(cursor=cursor),
self.get_last_successful_run_id(cursor=cursor),
self.get_last_successful_full_run_id(cursor=cursor),
)
@with_cursor
def collect_results_after(self, cursor, run_id, status=ok.key,
exclude=True):
cursor.execute('''SELECT DISTINCT name FROM result WHERE run_id IN (
SELECT id FROM run WHERE started>(
SELECT started FROM run WHERE id=?
)) AND status%s?''' % ('=', '!=')[bool(exclude)], (run_id, status))
for row in cursor.fetchall():
yield row[0]
@with_cursor
def failures(self, cursor):
last, successful, full = self.get_last_run_ids(cursor=cursor)
if last != successful:
return set(self.collect_results_after(full, cursor=cursor))
else:
return set()
@with_cursor
def source_has_changed(self, cursor, test_object):
new_hash = source_hash(test_object)
try:
old_hash = self.get_test(str(test_object), cursor=cursor)['hash']
except TestDoesNotExist:
return True
else:
return old_hash != new_hash
@with_cursor
def collect_changes(self, cursor, tests):
for test_object in tests:
if self.source_has_changed(test_object, cursor=cursor):
yield str(test_object)
@with_cursor
def candidates(self, cursor, tests):
failures = self.failures(cursor=cursor)
changes = set(self.collect_changes(tests, cursor=cursor))
return failures | changes
@with_cursor
def should_run_again(self, cursor):
last, successful, full = self.get_last_run_ids(cursor=cursor)
if last is None:
return False
elif last != successful:
return False
elif last == full:
return False
else:
return True
@with_cursor
def clean_history(self, cursor):
run_id = self.get_last_successful_full_run_id(cursor=cursor)
cursor.execute(
'DELETE FROM result WHERE finished'
' < (SELECT started FROM run WHERE id=?)',
(run_id,))
cursor.execute(
'DELETE FROM run WHERE'
' (SELECT count(*) FROM result WHERE run_id=run.id)=0')
@with_cursor
def stats(self, cursor, suite=None):
if suite is None:
cursor.execute('SELECT * FROM test ORDER BY average_time')
else:
cursor.execute(
'SELECT * FROM test WHERE suite=? ORDER BY average_time',
[suite])
for row in cursor.fetchall():
data = dict(zip(row.keys(), row))
data['time'] = data['average_time'].total_seconds()
yield data
@with_cursor
def stats_grouped(self, cursor):
cursor.execute(
'SELECT suite, total(average_time) as time FROM test '
'GROUP BY suite ORDER BY time')
for row in cursor.fetchall():
suite = dict(zip(row.keys(), row))
suite['tests'] = list(
self.stats(cursor=cursor, suite=suite['suite']))
yield suite
def to_utc(self, datetime):
return pytz.utc.localize(datetime) if have_pytz else datetime
def utcnow(self):
return self.to_utc(datetime.datetime.utcnow())
class Row(object):
def __init__(self, row):
self._row = row
def __getitem__(self, key):
return self._row[str(key)]
def source_hash(test_object):
try:
test_method = getattr(test_object, test_object._testMethodName)
except AttributeError:
return ''
try:
source = test_method.__self__.getsource()
except AttributeError:
source = inspect.getsource(test_method)
if six.PY3:
source = source.encode('utf-8')
return hashlib.sha1(source).hexdigest()
def timedelta_to_float(delta):
if hasattr(delta, 'total_seconds'):
return delta.total_seconds()
else:
seconds = delta.seconds + delta.days * 24. * 3600.
return (delta.microseconds + (seconds * 10**6)) / 10**6
def adapt_timedelta(delta):
return str(timedelta_to_float(delta))
def convert_timedelta(s):
return datetime.timedelta(seconds=float(s))
sqlite3.register_adapter(datetime.timedelta, adapt_timedelta)
sqlite3.register_converter(str('timedelta'), convert_timedelta)
def convert_boolean(s):
return bool(int(s))
sqlite3.register_converter(str('boolean'), convert_boolean)
#.............................................................................
# db.py
|
|
"""SCons.SConf
Autoconf-like configuration support.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/SConf.py 5023 2010/06/14 22:05:46 scons"
import SCons.compat
import io
import os
import re
import sys
import traceback
import SCons.Action
import SCons.Builder
import SCons.Errors
import SCons.Job
import SCons.Node.FS
import SCons.Taskmaster
import SCons.Util
import SCons.Warnings
import SCons.Conftest
from SCons.Debug import Trace
# Turn off the Conftest error logging
SCons.Conftest.LogInputFiles = 0
SCons.Conftest.LogErrorMessages = 0
# Set
build_type = None
build_types = ['clean', 'help']
def SetBuildType(type):
global build_type
build_type = type
# to be set, if we are in dry-run mode
dryrun = 0
AUTO=0 # use SCons dependency scanning for up-to-date checks
FORCE=1 # force all tests to be rebuilt
CACHE=2 # force all tests to be taken from cache (raise an error, if necessary)
cache_mode = AUTO
def SetCacheMode(mode):
"""Set the Configure cache mode. mode must be one of "auto", "force",
or "cache"."""
global cache_mode
if mode == "auto":
cache_mode = AUTO
elif mode == "force":
cache_mode = FORCE
elif mode == "cache":
cache_mode = CACHE
else:
raise ValueError("SCons.SConf.SetCacheMode: Unknown mode " + mode)
progress_display = SCons.Util.display # will be overwritten by SCons.Script
def SetProgressDisplay(display):
"""Set the progress display to use (called from SCons.Script)"""
global progress_display
progress_display = display
SConfFS = None
_ac_build_counter = 0 # incremented, whenever TryBuild is called
_ac_config_logs = {} # all config.log files created in this build
_ac_config_hs = {} # all config.h files created in this build
sconf_global = None # current sconf object
def _createConfigH(target, source, env):
t = open(str(target[0]), "w")
defname = re.sub('[^A-Za-z0-9_]', '_', str(target[0]).upper())
t.write("""#ifndef %(DEFNAME)s_SEEN
#define %(DEFNAME)s_SEEN
""" % {'DEFNAME' : defname})
t.write(source[0].get_contents())
t.write("""
#endif /* %(DEFNAME)s_SEEN */
""" % {'DEFNAME' : defname})
t.close()
def _stringConfigH(target, source, env):
return "scons: Configure: creating " + str(target[0])
def CreateConfigHBuilder(env):
"""Called just before the building targets phase begins."""
if len(_ac_config_hs) == 0:
return
action = SCons.Action.Action(_createConfigH,
_stringConfigH)
sconfigHBld = SCons.Builder.Builder(action=action)
env.Append( BUILDERS={'SConfigHBuilder':sconfigHBld} )
for k in _ac_config_hs.keys():
env.SConfigHBuilder(k, env.Value(_ac_config_hs[k]))
class SConfWarning(SCons.Warnings.Warning):
pass
SCons.Warnings.enableWarningClass(SConfWarning)
# some error definitions
class SConfError(SCons.Errors.UserError):
def __init__(self,msg):
SCons.Errors.UserError.__init__(self,msg)
class ConfigureDryRunError(SConfError):
"""Raised when a file or directory needs to be updated during a Configure
process, but the user requested a dry-run"""
def __init__(self,target):
if not isinstance(target, SCons.Node.FS.File):
msg = 'Cannot create configure directory "%s" within a dry-run.' % str(target)
else:
msg = 'Cannot update configure test "%s" within a dry-run.' % str(target)
SConfError.__init__(self,msg)
class ConfigureCacheError(SConfError):
"""Raised when a use explicitely requested the cache feature, but the test
is run the first time."""
def __init__(self,target):
SConfError.__init__(self, '"%s" is not yet built and cache is forced.' % str(target))
# define actions for building text files
def _createSource( target, source, env ):
fd = open(str(target[0]), "w")
fd.write(source[0].get_contents())
fd.close()
def _stringSource( target, source, env ):
return (str(target[0]) + ' <-\n |' +
source[0].get_contents().replace( '\n', "\n |" ) )
class SConfBuildInfo(SCons.Node.FS.FileBuildInfo):
"""
Special build info for targets of configure tests. Additional members
are result (did the builder succeed last time?) and string, which
contains messages of the original build phase.
"""
result = None # -> 0/None -> no error, != 0 error
string = None # the stdout / stderr output when building the target
def set_build_result(self, result, string):
self.result = result
self.string = string
class Streamer(object):
"""
'Sniffer' for a file-like writable object. Similar to the unix tool tee.
"""
def __init__(self, orig):
self.orig = orig
self.s = io.StringIO()
def write(self, str):
if self.orig:
self.orig.write(str)
self.s.write(str)
def writelines(self, lines):
for l in lines:
self.write(l + '\n')
def getvalue(self):
"""
Return everything written to orig since the Streamer was created.
"""
return self.s.getvalue()
def flush(self):
if self.orig:
self.orig.flush()
self.s.flush()
class SConfBuildTask(SCons.Taskmaster.AlwaysTask):
"""
This is almost the same as SCons.Script.BuildTask. Handles SConfErrors
correctly and knows about the current cache_mode.
"""
def display(self, message):
if sconf_global.logstream:
sconf_global.logstream.write("scons: Configure: " + message + "\n")
def display_cached_string(self, bi):
"""
Logs the original builder messages, given the SConfBuildInfo instance
bi.
"""
if not isinstance(bi, SConfBuildInfo):
SCons.Warnings.warn(SConfWarning,
"The stored build information has an unexpected class: %s" % bi.__class__)
else:
self.display("The original builder output was:\n" +
(" |" + str(bi.string)).replace("\n", "\n |"))
def failed(self):
# check, if the reason was a ConfigureDryRunError or a
# ConfigureCacheError and if yes, reraise the exception
exc_type = self.exc_info()[0]
if issubclass(exc_type, SConfError):
raise
elif issubclass(exc_type, SCons.Errors.BuildError):
# we ignore Build Errors (occurs, when a test doesn't pass)
# Clear the exception to prevent the contained traceback
# to build a reference cycle.
self.exc_clear()
else:
self.display('Caught exception while building "%s":\n' %
self.targets[0])
try:
excepthook = sys.excepthook
except AttributeError:
# Earlier versions of Python don't have sys.excepthook...
def excepthook(type, value, tb):
traceback.print_tb(tb)
print type, value
excepthook(*self.exc_info())
return SCons.Taskmaster.Task.failed(self)
def collect_node_states(self):
# returns (is_up_to_date, cached_error, cachable)
# where is_up_to_date is 1, if the node(s) are up_to_date
# cached_error is 1, if the node(s) are up_to_date, but the
# build will fail
# cachable is 0, if some nodes are not in our cache
T = 0
changed = False
cached_error = False
cachable = True
for t in self.targets:
if T: Trace('%s' % (t))
bi = t.get_stored_info().binfo
if isinstance(bi, SConfBuildInfo):
if T: Trace(': SConfBuildInfo')
if cache_mode == CACHE:
t.set_state(SCons.Node.up_to_date)
if T: Trace(': set_state(up_to-date)')
else:
if T: Trace(': get_state() %s' % t.get_state())
if T: Trace(': changed() %s' % t.changed())
if (t.get_state() != SCons.Node.up_to_date and t.changed()):
changed = True
if T: Trace(': changed %s' % changed)
cached_error = cached_error or bi.result
else:
if T: Trace(': else')
# the node hasn't been built in a SConf context or doesn't
# exist
cachable = False
changed = ( t.get_state() != SCons.Node.up_to_date )
if T: Trace(': changed %s' % changed)
if T: Trace('\n')
return (not changed, cached_error, cachable)
def execute(self):
if not self.targets[0].has_builder():
return
sconf = sconf_global
is_up_to_date, cached_error, cachable = self.collect_node_states()
if cache_mode == CACHE and not cachable:
raise ConfigureCacheError(self.targets[0])
elif cache_mode == FORCE:
is_up_to_date = 0
if cached_error and is_up_to_date:
self.display("Building \"%s\" failed in a previous run and all "
"its sources are up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
raise SCons.Errors.BuildError # will be 'caught' in self.failed
elif is_up_to_date:
self.display("\"%s\" is up to date." % str(self.targets[0]))
binfo = self.targets[0].get_stored_info().binfo
self.display_cached_string(binfo)
elif dryrun:
raise ConfigureDryRunError(self.targets[0])
else:
# note stdout and stderr are the same here
s = sys.stdout = sys.stderr = Streamer(sys.stdout)
try:
env = self.targets[0].get_build_env()
if cache_mode == FORCE:
# Set up the Decider() to force rebuilds by saying
# that every source has changed. Note that we still
# call the environment's underlying source decider so
# that the correct .sconsign info will get calculated
# and keep the build state consistent.
def force_build(dependency, target, prev_ni,
env_decider=env.decide_source):
env_decider(dependency, target, prev_ni)
return True
if env.decide_source.func_code is not force_build.func_code:
env.Decider(force_build)
env['PSTDOUT'] = env['PSTDERR'] = s
try:
sconf.cached = 0
self.targets[0].build()
finally:
sys.stdout = sys.stderr = env['PSTDOUT'] = \
env['PSTDERR'] = sconf.logstream
except KeyboardInterrupt:
raise
except SystemExit:
exc_value = sys.exc_info()[1]
raise SCons.Errors.ExplicitExit(self.targets[0],exc_value.code)
except Exception, e:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(1, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
raise e
else:
for t in self.targets:
binfo = t.get_binfo()
binfo.__class__ = SConfBuildInfo
binfo.set_build_result(0, s.getvalue())
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = binfo
#sconsign_entry.ninfo = self.get_ninfo()
# We'd like to do this as follows:
# t.store_info(binfo)
# However, we need to store it as an SConfBuildInfo
# object, and store_info() will turn it into a
# regular FileNodeInfo if the target is itself a
# regular File.
sconsign = t.dir.sconsign()
sconsign.set_entry(t.name, sconsign_entry)
sconsign.merge()
class SConfBase(object):
"""This is simply a class to represent a configure context. After
creating a SConf object, you can call any tests. After finished with your
tests, be sure to call the Finish() method, which returns the modified
environment.
Some words about caching: In most cases, it is not necessary to cache
Test results explicitely. Instead, we use the scons dependency checking
mechanism. For example, if one wants to compile a test program
(SConf.TryLink), the compiler is only called, if the program dependencies
have changed. However, if the program could not be compiled in a former
SConf run, we need to explicitely cache this error.
"""
def __init__(self, env, custom_tests = {}, conf_dir='$CONFIGUREDIR',
log_file='$CONFIGURELOG', config_h = None, _depth = 0):
"""Constructor. Pass additional tests in the custom_tests-dictinary,
e.g. custom_tests={'CheckPrivate':MyPrivateTest}, where MyPrivateTest
defines a custom test.
Note also the conf_dir and log_file arguments (you may want to
build tests in the VariantDir, not in the SourceDir)
"""
global SConfFS
if not SConfFS:
SConfFS = SCons.Node.FS.default_fs or \
SCons.Node.FS.FS(env.fs.pathTop)
if sconf_global is not None:
raise SCons.Errors.UserError
self.env = env
if log_file is not None:
log_file = SConfFS.File(env.subst(log_file))
self.logfile = log_file
self.logstream = None
self.lastTarget = None
self.depth = _depth
self.cached = 0 # will be set, if all test results are cached
# add default tests
default_tests = {
'CheckCC' : CheckCC,
'CheckCXX' : CheckCXX,
'CheckSHCC' : CheckSHCC,
'CheckSHCXX' : CheckSHCXX,
'CheckFunc' : CheckFunc,
'CheckType' : CheckType,
'CheckTypeSize' : CheckTypeSize,
'CheckDeclaration' : CheckDeclaration,
'CheckHeader' : CheckHeader,
'CheckCHeader' : CheckCHeader,
'CheckCXXHeader' : CheckCXXHeader,
'CheckLib' : CheckLib,
'CheckLibWithHeader' : CheckLibWithHeader,
}
self.AddTests(default_tests)
self.AddTests(custom_tests)
self.confdir = SConfFS.Dir(env.subst(conf_dir))
if config_h is not None:
config_h = SConfFS.File(config_h)
self.config_h = config_h
self._startup()
def Finish(self):
"""Call this method after finished with your tests:
env = sconf.Finish()
"""
self._shutdown()
return self.env
def Define(self, name, value = None, comment = None):
"""
Define a pre processor symbol name, with the optional given value in the
current config header.
If value is None (default), then #define name is written. If value is not
none, then #define name value is written.
comment is a string which will be put as a C comment in the
header, to explain the meaning of the value (appropriate C comments /* and
*/ will be put automatically."""
lines = []
if comment:
comment_str = "/* %s */" % comment
lines.append(comment_str)
if value is not None:
define_str = "#define %s %s" % (name, value)
else:
define_str = "#define %s" % name
lines.append(define_str)
lines.append('')
self.config_h_text = self.config_h_text + '\n'.join(lines)
def BuildNodes(self, nodes):
"""
Tries to build the given nodes immediately. Returns 1 on success,
0 on error.
"""
if self.logstream is not None:
# override stdout / stderr to write in log file
oldStdout = sys.stdout
sys.stdout = self.logstream
oldStderr = sys.stderr
sys.stderr = self.logstream
# the engine assumes the current path is the SConstruct directory ...
old_fs_dir = SConfFS.getcwd()
old_os_dir = os.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=1)
# Because we take responsibility here for writing out our
# own .sconsign info (see SConfBuildTask.execute(), above),
# we override the store_info() method with a null place-holder
# so we really control how it gets written.
for n in nodes:
n.store_info = n.do_not_store_info
ret = 1
try:
# ToDo: use user options for calc
save_max_drift = SConfFS.get_max_drift()
SConfFS.set_max_drift(0)
tm = SCons.Taskmaster.Taskmaster(nodes, SConfBuildTask)
# we don't want to build tests in parallel
jobs = SCons.Job.Jobs(1, tm )
jobs.run()
for n in nodes:
state = n.get_state()
if (state != SCons.Node.executed and
state != SCons.Node.up_to_date):
# the node could not be built. we return 0 in this case
ret = 0
finally:
SConfFS.set_max_drift(save_max_drift)
os.chdir(old_os_dir)
SConfFS.chdir(old_fs_dir, change_os_dir=0)
if self.logstream is not None:
# restore stdout / stderr
sys.stdout = oldStdout
sys.stderr = oldStderr
return ret
def pspawn_wrapper(self, sh, escape, cmd, args, env):
"""Wrapper function for handling piped spawns.
This looks to the calling interface (in Action.py) like a "normal"
spawn, but associates the call with the PSPAWN variable from
the construction environment and with the streams to which we
want the output logged. This gets slid into the construction
environment as the SPAWN variable so Action.py doesn't have to
know or care whether it's spawning a piped command or not.
"""
return self.pspawn(sh, escape, cmd, args, env, self.logstream, self.logstream)
def TryBuild(self, builder, text = None, extension = ""):
"""Low level TryBuild implementation. Normally you don't need to
call that - you can use TryCompile / TryLink / TryRun instead
"""
global _ac_build_counter
# Make sure we have a PSPAWN value, and save the current
# SPAWN value.
try:
self.pspawn = self.env['PSPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing PSPAWN construction variable.')
try:
save_spawn = self.env['SPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing SPAWN construction variable.')
nodesToBeBuilt = []
f = "conftest_" + str(_ac_build_counter)
pref = self.env.subst( builder.builder.prefix )
suff = self.env.subst( builder.builder.suffix )
target = self.confdir.File(pref + f + suff)
try:
# Slide our wrapper into the construction environment as
# the SPAWN function.
self.env['SPAWN'] = self.pspawn_wrapper
sourcetext = self.env.Value(text)
if text is not None:
textFile = self.confdir.File(f + extension)
textFileNode = self.env.SConfSourceBuilder(target=textFile,
source=sourcetext)
nodesToBeBuilt.extend(textFileNode)
source = textFileNode
else:
source = None
nodes = builder(target = target, source = source)
if not SCons.Util.is_List(nodes):
nodes = [nodes]
nodesToBeBuilt.extend(nodes)
result = self.BuildNodes(nodesToBeBuilt)
finally:
self.env['SPAWN'] = save_spawn
_ac_build_counter = _ac_build_counter + 1
if result:
self.lastTarget = nodes[0]
else:
self.lastTarget = None
return result
def TryAction(self, action, text = None, extension = ""):
"""Tries to execute the given action with optional source file
contents <text> and optional source file extension <extension>,
Returns the status (0 : failed, 1 : ok) and the contents of the
output file.
"""
builder = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS = {'SConfActionBuilder' : builder} )
ok = self.TryBuild(self.env.SConfActionBuilder, text, extension)
del self.env['BUILDERS']['SConfActionBuilder']
if ok:
outputStr = self.lastTarget.get_contents()
return (1, outputStr)
return (0, "")
def TryCompile( self, text, extension):
"""Compiles the program given in text to an env.Object, using extension
as file extension (e.g. '.c'). Returns 1, if compilation was
successful, 0 otherwise. The target is saved in self.lastTarget (for
further processing).
"""
return self.TryBuild(self.env.Object, text, extension)
def TryLink( self, text, extension ):
"""Compiles the program given in text to an executable env.Program,
using extension as file extension (e.g. '.c'). Returns 1, if
compilation was successful, 0 otherwise. The target is saved in
self.lastTarget (for further processing).
"""
return self.TryBuild(self.env.Program, text, extension )
def TryRun(self, text, extension ):
"""Compiles and runs the program given in text, using extension
as file extension (e.g. '.c'). Returns (1, outputStr) on success,
(0, '') otherwise. The target (a file containing the program's stdout)
is saved in self.lastTarget (for further processing).
"""
ok = self.TryLink(text, extension)
if( ok ):
prog = self.lastTarget
pname = prog.path
output = self.confdir.File(os.path.basename(pname)+'.out')
node = self.env.Command(output, prog, [ [ pname, ">", "${TARGET}"] ])
ok = self.BuildNodes(node)
if ok:
outputStr = output.get_contents()
return( 1, outputStr)
return (0, "")
class TestWrapper(object):
"""A wrapper around Tests (to ensure sanity)"""
def __init__(self, test, sconf):
self.test = test
self.sconf = sconf
def __call__(self, *args, **kw):
if not self.sconf.active:
raise SCons.Errors.UserError
context = CheckContext(self.sconf)
ret = self.test(context, *args, **kw)
if self.sconf.config_h is not None:
self.sconf.config_h_text = self.sconf.config_h_text + context.config_h
context.Result("error: no result")
return ret
def AddTest(self, test_name, test_instance):
"""Adds test_class to this SConf instance. It can be called with
self.test_name(...)"""
setattr(self, test_name, SConfBase.TestWrapper(test_instance, self))
def AddTests(self, tests):
"""Adds all the tests given in the tests dictionary to this SConf
instance
"""
for name in tests.keys():
self.AddTest(name, tests[name])
def _createDir( self, node ):
dirName = str(node)
if dryrun:
if not os.path.isdir( dirName ):
raise ConfigureDryRunError(dirName)
else:
if not os.path.isdir( dirName ):
os.makedirs( dirName )
node._exists = 1
def _startup(self):
"""Private method. Set up logstream, and set the environment
variables necessary for a piped build
"""
global _ac_config_logs
global sconf_global
global SConfFS
self.lastEnvFs = self.env.fs
self.env.fs = SConfFS
self._createDir(self.confdir)
self.confdir.up().add_ignore( [self.confdir] )
if self.logfile is not None and not dryrun:
# truncate logfile, if SConf.Configure is called for the first time
# in a build
if self.logfile in _ac_config_logs:
log_mode = "a"
else:
_ac_config_logs[self.logfile] = None
log_mode = "w"
fp = open(str(self.logfile), log_mode)
self.logstream = SCons.Util.Unbuffered(fp)
# logfile may stay in a build directory, so we tell
# the build system not to override it with a eventually
# existing file with the same name in the source directory
self.logfile.dir.add_ignore( [self.logfile] )
tb = traceback.extract_stack()[-3-self.depth]
old_fs_dir = SConfFS.getcwd()
SConfFS.chdir(SConfFS.Top, change_os_dir=0)
self.logstream.write('file %s,line %d:\n\tConfigure(confdir = %s)\n' %
(tb[0], tb[1], str(self.confdir)) )
SConfFS.chdir(old_fs_dir)
else:
self.logstream = None
# we use a special builder to create source files from TEXT
action = SCons.Action.Action(_createSource,
_stringSource)
sconfSrcBld = SCons.Builder.Builder(action=action)
self.env.Append( BUILDERS={'SConfSourceBuilder':sconfSrcBld} )
self.config_h_text = _ac_config_hs.get(self.config_h, "")
self.active = 1
# only one SConf instance should be active at a time ...
sconf_global = self
def _shutdown(self):
"""Private method. Reset to non-piped spawn"""
global sconf_global, _ac_config_hs
if not self.active:
raise SCons.Errors.UserError("Finish may be called only once!")
if self.logstream is not None and not dryrun:
self.logstream.write("\n")
self.logstream.close()
self.logstream = None
# remove the SConfSourceBuilder from the environment
blds = self.env['BUILDERS']
del blds['SConfSourceBuilder']
self.env.Replace( BUILDERS=blds )
self.active = 0
sconf_global = None
if not self.config_h is None:
_ac_config_hs[self.config_h] = self.config_h_text
self.env.fs = self.lastEnvFs
class CheckContext(object):
"""Provides a context for configure tests. Defines how a test writes to the
screen and log file.
A typical test is just a callable with an instance of CheckContext as
first argument:
def CheckCustom(context, ...)
context.Message('Checking my weird test ... ')
ret = myWeirdTestFunction(...)
context.Result(ret)
Often, myWeirdTestFunction will be one of
context.TryCompile/context.TryLink/context.TryRun. The results of
those are cached, for they are only rebuild, if the dependencies have
changed.
"""
def __init__(self, sconf):
"""Constructor. Pass the corresponding SConf instance."""
self.sconf = sconf
self.did_show_result = 0
# for Conftest.py:
self.vardict = {}
self.havedict = {}
self.headerfilename = None
self.config_h = "" # config_h text will be stored here
# we don't regenerate the config.h file after each test. That means,
# that tests won't be able to include the config.h file, and so
# they can't do an #ifdef HAVE_XXX_H. This shouldn't be a major
# issue, though. If it turns out, that we need to include config.h
# in tests, we must ensure, that the dependencies are worked out
# correctly. Note that we can't use Conftest.py's support for config.h,
# cause we will need to specify a builder for the config.h file ...
def Message(self, text):
"""Inform about what we are doing right now, e.g.
'Checking for SOMETHING ... '
"""
self.Display(text)
self.sconf.cached = 1
self.did_show_result = 0
def Result(self, res):
"""Inform about the result of the test. res may be an integer or a
string. In case of an integer, the written text will be 'yes' or 'no'.
The result is only displayed when self.did_show_result is not set.
"""
if isinstance(res, (int, bool)):
if res:
text = "yes"
else:
text = "no"
elif isinstance(res, str):
text = res
else:
raise TypeError("Expected string, int or bool, got " + str(type(res)))
if self.did_show_result == 0:
# Didn't show result yet, do it now.
self.Display(text + "\n")
self.did_show_result = 1
def TryBuild(self, *args, **kw):
return self.sconf.TryBuild(*args, **kw)
def TryAction(self, *args, **kw):
return self.sconf.TryAction(*args, **kw)
def TryCompile(self, *args, **kw):
return self.sconf.TryCompile(*args, **kw)
def TryLink(self, *args, **kw):
return self.sconf.TryLink(*args, **kw)
def TryRun(self, *args, **kw):
return self.sconf.TryRun(*args, **kw)
def __getattr__( self, attr ):
if( attr == 'env' ):
return self.sconf.env
elif( attr == 'lastTarget' ):
return self.sconf.lastTarget
else:
raise AttributeError("CheckContext instance has no attribute '%s'" % attr)
#### Stuff used by Conftest.py (look there for explanations).
def BuildProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Program, text, ext)
def CompileProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.Object, text, ext)
def CompileSharedObject(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $SHCC, $CPPFLAGS, etc.
return not self.TryBuild(self.env.SharedObject, text, ext)
def RunProg(self, text, ext):
self.sconf.cached = 1
# TODO: should use self.vardict for $CC, $CPPFLAGS, etc.
st, out = self.TryRun(text, ext)
return not st, out
def AppendLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Append(LIBS = lib_name_list)
return oldLIBS
def PrependLIBS(self, lib_name_list):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Prepend(LIBS = lib_name_list)
return oldLIBS
def SetLIBS(self, val):
oldLIBS = self.env.get( 'LIBS', [] )
self.env.Replace(LIBS = val)
return oldLIBS
def Display(self, msg):
if self.sconf.cached:
# We assume that Display is called twice for each test here
# once for the Checking for ... message and once for the result.
# The self.sconf.cached flag can only be set between those calls
msg = "(cached) " + msg
self.sconf.cached = 0
progress_display(msg, append_newline=0)
self.Log("scons: Configure: " + msg + "\n")
def Log(self, msg):
if self.sconf.logstream is not None:
self.sconf.logstream.write(msg)
#### End of stuff used by Conftest.py.
def SConf(*args, **kw):
if kw.get(build_type, True):
kw['_depth'] = kw.get('_depth', 0) + 1
for bt in build_types:
try:
del kw[bt]
except KeyError:
pass
return SConfBase(*args, **kw)
else:
return SCons.Util.Null()
def CheckFunc(context, function_name, header = None, language = None):
res = SCons.Conftest.CheckFunc(context, function_name, header = header, language = language)
context.did_show_result = 1
return not res
def CheckType(context, type_name, includes = "", language = None):
res = SCons.Conftest.CheckType(context, type_name,
header = includes, language = language)
context.did_show_result = 1
return not res
def CheckTypeSize(context, type_name, includes = "", language = None, expect = None):
res = SCons.Conftest.CheckTypeSize(context, type_name,
header = includes, language = language,
expect = expect)
context.did_show_result = 1
return res
def CheckDeclaration(context, declaration, includes = "", language = None):
res = SCons.Conftest.CheckDeclaration(context, declaration,
includes = includes,
language = language)
context.did_show_result = 1
return not res
def createIncludesFromHeaders(headers, leaveLast, include_quotes = '""'):
# used by CheckHeader and CheckLibWithHeader to produce C - #include
# statements from the specified header (list)
if not SCons.Util.is_List(headers):
headers = [headers]
l = []
if leaveLast:
lastHeader = headers[-1]
headers = headers[:-1]
else:
lastHeader = None
for s in headers:
l.append("#include %s%s%s\n"
% (include_quotes[0], s, include_quotes[1]))
return ''.join(l), lastHeader
def CheckHeader(context, header, include_quotes = '<>', language = None):
"""
A test for a C or C++ header file.
"""
prog_prefix, hdr_to_check = \
createIncludesFromHeaders(header, 1, include_quotes)
res = SCons.Conftest.CheckHeader(context, hdr_to_check, prog_prefix,
language = language,
include_quotes = include_quotes)
context.did_show_result = 1
return not res
def CheckCC(context):
res = SCons.Conftest.CheckCC(context)
context.did_show_result = 1
return not res
def CheckCXX(context):
res = SCons.Conftest.CheckCXX(context)
context.did_show_result = 1
return not res
def CheckSHCC(context):
res = SCons.Conftest.CheckSHCC(context)
context.did_show_result = 1
return not res
def CheckSHCXX(context):
res = SCons.Conftest.CheckSHCXX(context)
context.did_show_result = 1
return not res
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCHeader(context, header, include_quotes = '""'):
"""
A test for a C header file.
"""
return CheckHeader(context, header, include_quotes, language = "C")
# Bram: Make this function obsolete? CheckHeader() is more generic.
def CheckCXXHeader(context, header, include_quotes = '""'):
"""
A test for a C++ header file.
"""
return CheckHeader(context, header, include_quotes, language = "C++")
def CheckLib(context, library = None, symbol = "main",
header = None, language = None, autoadd = 1):
"""
A test for a library. See also CheckLibWithHeader.
Note that library may also be None to test whether the given symbol
compiles without flags.
"""
if library == []:
library = [None]
if not SCons.Util.is_List(library):
library = [library]
# ToDo: accept path for the library
res = SCons.Conftest.CheckLib(context, library, symbol, header = header,
language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
# XXX
# Bram: Can only include one header and can't use #ifdef HAVE_HEADER_H.
def CheckLibWithHeader(context, libs, header, language,
call = None, autoadd = 1):
# ToDo: accept path for library. Support system header files.
"""
Another (more sophisticated) test for a library.
Checks, if library and header is available for language (may be 'C'
or 'CXX'). Call maybe be a valid expression _with_ a trailing ';'.
As in CheckLib, we support library=None, to test if the call compiles
without extra link flags.
"""
prog_prefix, dummy = \
createIncludesFromHeaders(header, 0)
if libs == []:
libs = [None]
if not SCons.Util.is_List(libs):
libs = [libs]
res = SCons.Conftest.CheckLib(context, libs, None, prog_prefix,
call = call, language = language, autoadd = autoadd)
context.did_show_result = 1
return not res
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
"""Support for the Lovelace UI."""
import logging
import voluptuous as vol
from homeassistant.components import frontend
from homeassistant.config import async_hass_config_yaml, async_process_component_config
from homeassistant.const import CONF_FILENAME, CONF_MODE, CONF_RESOURCES
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import collection, config_validation as cv
from homeassistant.helpers.service import async_register_admin_service
from homeassistant.helpers.typing import ConfigType, HomeAssistantType, ServiceCallType
from homeassistant.loader import async_get_integration
from . import dashboard, resources, websocket
from .const import (
CONF_ICON,
CONF_REQUIRE_ADMIN,
CONF_SHOW_IN_SIDEBAR,
CONF_TITLE,
CONF_URL_PATH,
DASHBOARD_BASE_CREATE_FIELDS,
DEFAULT_ICON,
DOMAIN,
MODE_STORAGE,
MODE_YAML,
RESOURCE_CREATE_FIELDS,
RESOURCE_RELOAD_SERVICE_SCHEMA,
RESOURCE_SCHEMA,
RESOURCE_UPDATE_FIELDS,
SERVICE_RELOAD_RESOURCES,
STORAGE_DASHBOARD_CREATE_FIELDS,
STORAGE_DASHBOARD_UPDATE_FIELDS,
url_slug,
)
from .system_health import system_health_info # NOQA
_LOGGER = logging.getLogger(__name__)
CONF_DASHBOARDS = "dashboards"
YAML_DASHBOARD_SCHEMA = vol.Schema(
{
**DASHBOARD_BASE_CREATE_FIELDS,
vol.Required(CONF_MODE): MODE_YAML,
vol.Required(CONF_FILENAME): cv.path,
}
)
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DOMAIN, default={}): vol.Schema(
{
vol.Optional(CONF_MODE, default=MODE_STORAGE): vol.All(
vol.Lower, vol.In([MODE_YAML, MODE_STORAGE])
),
vol.Optional(CONF_DASHBOARDS): cv.schema_with_slug_keys(
YAML_DASHBOARD_SCHEMA,
slug_validator=url_slug,
),
vol.Optional(CONF_RESOURCES): [RESOURCE_SCHEMA],
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Set up the Lovelace commands."""
mode = config[DOMAIN][CONF_MODE]
yaml_resources = config[DOMAIN].get(CONF_RESOURCES)
frontend.async_register_built_in_panel(hass, DOMAIN, config={"mode": mode})
async def reload_resources_service_handler(service_call: ServiceCallType) -> None:
"""Reload yaml resources."""
try:
conf = await async_hass_config_yaml(hass)
except HomeAssistantError as err:
_LOGGER.error(err)
return
integration = await async_get_integration(hass, DOMAIN)
config = await async_process_component_config(hass, conf, integration)
resource_collection = await create_yaml_resource_col(
hass, config[DOMAIN].get(CONF_RESOURCES)
)
hass.data[DOMAIN]["resources"] = resource_collection
if mode == MODE_YAML:
default_config = dashboard.LovelaceYAML(hass, None, None)
resource_collection = await create_yaml_resource_col(hass, yaml_resources)
async_register_admin_service(
hass,
DOMAIN,
SERVICE_RELOAD_RESOURCES,
reload_resources_service_handler,
schema=RESOURCE_RELOAD_SERVICE_SCHEMA,
)
else:
default_config = dashboard.LovelaceStorage(hass, None)
if yaml_resources is not None:
_LOGGER.warning(
"Lovelace is running in storage mode. Define resources via user interface"
)
resource_collection = resources.ResourceStorageCollection(hass, default_config)
collection.StorageCollectionWebsocket(
resource_collection,
"lovelace/resources",
"resource",
RESOURCE_CREATE_FIELDS,
RESOURCE_UPDATE_FIELDS,
).async_setup(hass, create_list=False)
hass.components.websocket_api.async_register_command(
websocket.websocket_lovelace_config
)
hass.components.websocket_api.async_register_command(
websocket.websocket_lovelace_save_config
)
hass.components.websocket_api.async_register_command(
websocket.websocket_lovelace_delete_config
)
hass.components.websocket_api.async_register_command(
websocket.websocket_lovelace_resources
)
hass.components.websocket_api.async_register_command(
websocket.websocket_lovelace_dashboards
)
hass.data[DOMAIN] = {
# We store a dictionary mapping url_path: config. None is the default.
"dashboards": {None: default_config},
"resources": resource_collection,
"yaml_dashboards": config[DOMAIN].get(CONF_DASHBOARDS, {}),
}
if hass.config.safe_mode:
return True
async def storage_dashboard_changed(change_type, item_id, item):
"""Handle a storage dashboard change."""
url_path = item[CONF_URL_PATH]
if change_type == collection.CHANGE_REMOVED:
frontend.async_remove_panel(hass, url_path)
await hass.data[DOMAIN]["dashboards"].pop(url_path).async_delete()
return
if change_type == collection.CHANGE_ADDED:
existing = hass.data[DOMAIN]["dashboards"].get(url_path)
if existing:
_LOGGER.warning(
"Cannot register panel at %s, it is already defined in %s",
url_path,
existing,
)
return
hass.data[DOMAIN]["dashboards"][url_path] = dashboard.LovelaceStorage(
hass, item
)
update = False
else:
hass.data[DOMAIN]["dashboards"][url_path].config = item
update = True
try:
_register_panel(hass, url_path, MODE_STORAGE, item, update)
except ValueError:
_LOGGER.warning("Failed to %s panel %s from storage", change_type, url_path)
# Process YAML dashboards
for url_path, dashboard_conf in hass.data[DOMAIN]["yaml_dashboards"].items():
# For now always mode=yaml
config = dashboard.LovelaceYAML(hass, url_path, dashboard_conf)
hass.data[DOMAIN]["dashboards"][url_path] = config
try:
_register_panel(hass, url_path, MODE_YAML, dashboard_conf, False)
except ValueError:
_LOGGER.warning("Panel url path %s is not unique", url_path)
# Process storage dashboards
dashboards_collection = dashboard.DashboardsCollection(hass)
dashboards_collection.async_add_listener(storage_dashboard_changed)
await dashboards_collection.async_load()
collection.StorageCollectionWebsocket(
dashboards_collection,
"lovelace/dashboards",
"dashboard",
STORAGE_DASHBOARD_CREATE_FIELDS,
STORAGE_DASHBOARD_UPDATE_FIELDS,
).async_setup(hass, create_list=False)
return True
async def create_yaml_resource_col(hass, yaml_resources):
"""Create yaml resources collection."""
if yaml_resources is None:
default_config = dashboard.LovelaceYAML(hass, None, None)
try:
ll_conf = await default_config.async_load(False)
except HomeAssistantError:
pass
else:
if CONF_RESOURCES in ll_conf:
_LOGGER.warning(
"Resources need to be specified in your configuration.yaml. Please see the docs"
)
yaml_resources = ll_conf[CONF_RESOURCES]
return resources.ResourceYAMLCollection(yaml_resources or [])
@callback
def _register_panel(hass, url_path, mode, config, update):
"""Register a panel."""
kwargs = {
"frontend_url_path": url_path,
"require_admin": config[CONF_REQUIRE_ADMIN],
"config": {"mode": mode},
"update": update,
}
if config[CONF_SHOW_IN_SIDEBAR]:
kwargs["sidebar_title"] = config[CONF_TITLE]
kwargs["sidebar_icon"] = config.get(CONF_ICON, DEFAULT_ICON)
frontend.async_register_built_in_panel(hass, DOMAIN, **kwargs)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""PyLNP main library."""
from __future__ import print_function, unicode_literals, absolute_import
import sys
from tkgui.tkgui import TkGui
import distutils.dir_util as dir_util
import fnmatch
import glob
import os
import re
import shutil
import subprocess
import tempfile
import time
from datetime import datetime
import errorlog
from threading import Thread
from settings import DFConfiguration
from json_config import JSONConfiguration
try: # Python 2
# pylint:disable=import-error
from urllib2 import urlopen, URLError, Request
except ImportError: # Python 3
# pylint:disable=import-error, no-name-in-module
from urllib.request import urlopen, Request
from urllib.error import URLError
BASEDIR = '.'
VERSION = '0.5.1'
class PyLNP(object):
"""
PyLNP library class.
Acts as an abstraction layer between the UI and the Dwarf Fortress
instance.
"""
def __init__(self):
"""Constructor for the PyLNP library."""
self.bundle = ''
if hasattr(sys, 'frozen'):
os.chdir(os.path.dirname(sys.executable))
if sys.platform == 'win32':
self.bundle = 'win'
elif sys.platform.startswith('linux'):
self.bundle = 'linux'
elif sys.platform == 'darwin':
self.bundle = 'osx'
# OS X bundles start in different directory
os.chdir('../../..')
else:
os.chdir(os.path.dirname(os.path.abspath(__file__)))
errorlog.start()
self.lnp_dir = self.identify_folder_name(BASEDIR, 'LNP')
if not os.path.isdir(self.lnp_dir):
print('WARNING: LNP folder is missing!', file=sys.stderr)
self.keybinds_dir = self.identify_folder_name(self.lnp_dir, 'Keybinds')
self.graphics_dir = self.identify_folder_name(self.lnp_dir, 'Graphics')
self.utils_dir = self.identify_folder_name(self.lnp_dir, 'Utilities')
self.colors_dir = self.identify_folder_name(self.lnp_dir, 'Colors')
self.embarks_dir = self.identify_folder_name(self.lnp_dir, 'Embarks')
self.folders = []
self.df_dir = ''
self.settings = None
self.init_dir = ''
self.save_dir = ''
self.autorun = []
self.running = {}
config_file = 'PyLNP.json'
if os.access(os.path.join(self.lnp_dir, 'PyLNP.json'), os.F_OK):
config_file = os.path.join(self.lnp_dir, 'PyLNP.json')
self.config = JSONConfiguration(config_file)
self.userconfig = JSONConfiguration('PyLNP.user')
self.load_autorun()
self.find_df_folder()
self.new_version = None
self.ui = TkGui(self)
self.check_update()
self.ui.start()
@staticmethod
def identify_folder_name(base, name):
"""
Allows folder names to be lowercase on case-sensitive systems.
Returns "base/name" where name is lowercase if the lower case version
exists and the standard case version does not.
Params:
base
The path containing the desired folder.
name
The standard case name of the desired folder.
"""
normal = os.path.join(base, name)
lower = os.path.join(base, name.lower())
if os.path.isdir(lower) and not os.path.isdir(normal):
return lower
return normal
def load_params(self):
"""Loads settings from the selected Dwarf Fortress instance."""
try:
self.settings.read_settings()
except IOError:
sys.excepthook(*sys.exc_info())
msg = ("Failed to read settings, "
"{0} not really a DF dir?").format(self.df_dir)
raise IOError(msg)
def save_params(self):
"""Saves settings to the selected Dwarf Fortress instance."""
self.settings.write_settings()
def save_config(self):
"""Saves LNP configuration."""
self.userconfig.save_data()
def restore_defaults(self):
"""Copy default settings into the selected Dwarf Fortress instance."""
shutil.copy(
os.path.join(self.lnp_dir, 'Defaults', 'init.txt'),
os.path.join(self.init_dir, 'init.txt')
)
shutil.copy(
os.path.join(self.lnp_dir, 'Defaults', 'd_init.txt'),
os.path.join(self.init_dir, 'd_init.txt')
)
self.load_params()
def run_df(self, force=False):
"""Launches Dwarf Fortress."""
result = None
if sys.platform == 'win32':
result = self.run_program(
os.path.join(self.df_dir, 'Dwarf Fortress.exe'), force, True)
else:
# Linux/OSX: Run DFHack if available
if os.path.isfile(os.path.join(self.df_dir, 'dfhack')):
result = self.run_program(
os.path.join(self.df_dir, 'dfhack'), force, True, True)
if result == False:
raise Exception('Failed to launch a new terminal.')
else:
result = self.run_program(os.path.join(self.df_dir, 'df'))
for prog in self.autorun:
if os.access(os.path.join(self.utils_dir, prog), os.F_OK):
self.run_program(os.path.join(self.utils_dir, prog))
if self.userconfig.get_bool('autoClose'):
sys.exit()
return result
def run_program(self, path, force=False, is_df=False, spawn_terminal=False):
"""
Launches an external program.
Params:
path
The path of the program to launch.
spawn_terminal
Whether or not to spawn a new terminal for this app.
Used only for DFHack.
"""
try:
path = os.path.abspath(path)
workdir = os.path.dirname(path)
run_args = path
nonchild = False
if spawn_terminal:
if sys.platform.startswith('linux'):
script = 'xdg-terminal'
if self.bundle == "linux":
script = os.path.join(sys._MEIPASS, script)
if force or self.check_program_not_running(path, True):
retcode = subprocess.call(
[os.path.abspath(script), path],
cwd=os.path.dirname(path))
return retcode == 0
self.ui.on_program_running(path, is_df)
return None
elif sys.platform == 'darwin':
nonchild = True
run_args = ['open', '-a', 'Terminal.app', path]
elif path.endswith('.jar'): # Explicitly launch JAR files with Java
run_args = ['java', '-jar', os.path.basename(path)]
elif path.endswith('.app'): # OS X application bundle
nonchild = True
run_args = ['open', path]
workdir = path
if force or self.check_program_not_running(path, nonchild):
self.running[path] = subprocess.Popen(run_args, cwd=workdir)
return True
self.ui.on_program_running(path, is_df)
return None
except OSError:
sys.excepthook(*sys.exc_info())
return False
def check_program_not_running(self, path, nonchild=False):
"""
Returns True if a program is not currently running.
Params:
path
The path of the program.
nonchild
If set to True, attempts to check for the process among all
running processes, not just known child processes. Used for
DFHack on Linux and OS X; currently unsupported for Windows.
"""
if nonchild:
ps = subprocess.Popen('ps axww', shell=True, stdout=subprocess.PIPE)
s = ps.stdout.read()
ps.wait()
return path not in s
else:
if path not in self.running:
return True
else:
self.running[path].poll()
return self.running[path].returncode is not None
def open_folder_idx(self, i):
"""Opens the folder specified by index i, as listed in PyLNP.json."""
open_folder(os.path.join(
BASEDIR, self.config['folders'][i][1].replace(
'<df>', self.df_dir)))
def open_savegames(self):
"""Opens the save game folder."""
open_folder(self.save_dir)
def open_utils(self):
"""Opens the utilities folder."""
open_folder(self.utils_dir)
def open_graphics(self):
"""Opens the graphics pack folder."""
open_folder(self.graphics_dir)
@staticmethod
def open_main_folder():
"""Opens the folder containing the program."""
open_folder('.')
def open_lnp_folder(self):
"""Opens the folder containing data for the LNP."""
open_folder(self.lnp_dir)
def open_df_folder(self):
"""Opens the Dwarf Fortress folder."""
open_folder(self.df_dir)
def open_init_folder(self):
"""Opens the init folder in the selected Dwarf Fortress instance."""
open_folder(self.init_dir)
def open_link_idx(self, i):
"""Opens the link specified by index i, as listed in PyLNP.json."""
self.open_url(self.config['links'][i][1])
@staticmethod
def open_url(url):
"""Launches a web browser to the Dwarf Fortress webpage."""
import webbrowser
webbrowser.open(url)
def find_df_folder(self):
"""Locates all suitable Dwarf Fortress installations (folders starting
with "Dwarf Fortress" or "df")"""
self.folders = folders = tuple([
o for o in
glob.glob(os.path.join(BASEDIR, 'Dwarf Fortress*')) +
glob.glob(os.path.join(BASEDIR, 'df*')) if os.path.isdir(o)
])
self.df_dir = ''
if len(folders) == 1:
self.set_df_folder(folders[0])
def set_df_folder(self, path):
"""
Selects the Dwarf Fortress instance to operate on.
:param path: The path of the Dwarf Fortress instance to use.
"""
self.df_dir = os.path.abspath(path)
self.init_dir = os.path.join(self.df_dir, 'data', 'init')
self.save_dir = os.path.join(self.df_dir, 'data', 'save')
self.settings = DFConfiguration(self.df_dir)
self.install_extras()
self.load_params()
self.read_hacks()
@staticmethod
def get_text_files(directory):
"""
Returns a list of .txt files in <directory>.
Excludes all filenames beginning with "readme" (case-insensitive).
Params:
directory
The directory to search.
"""
temp = glob.glob(os.path.join(directory, '*.txt'))
result = []
for f in temp:
if not os.path.basename(f).lower().startswith('readme'):
result.append(f)
return result
def read_keybinds(self):
"""Returns a list of keybinding files."""
return tuple([
os.path.basename(o) for o in self.get_text_files(self.keybinds_dir)
])
def read_graphics(self):
"""Returns a list of graphics directories."""
packs = [
os.path.basename(o) for o in
glob.glob(os.path.join(self.graphics_dir, '*')) if
os.path.isdir(o)]
result = []
for p in packs:
font = self.settings.read_value(os.path.join(
self.graphics_dir, p, 'data', 'init', 'init.txt'), 'FONT')
graphics = self.settings.read_value(
os.path.join(self.graphics_dir, p, 'data', 'init', 'init.txt'),
'GRAPHICS_FONT')
result.append((p, font, graphics))
return tuple(result)
def current_pack(self):
"""
Returns the currently installed graphics pack.
If the pack cannot be identified, returns "FONT/GRAPHICS_FONT".
"""
packs = self.read_graphics()
for p in packs:
if (self.settings.FONT == p[1] and
self.settings.GRAPHICS_FONT == p[2]):
return p[0]
return str(self.settings.FONT)+'/'+str(self.settings.GRAPHICS_FONT)
@staticmethod
def read_utility_lists(path):
"""
Reads a list of filenames from a utility list (e.g. include.txt).
:param path: The file to read.
"""
result = []
try:
util_file = open(path)
for line in util_file:
for match in re.findall(r'\[(.+)\]', line):
result.append(match)
except IOError:
pass
return result
def read_utilities(self):
"""Returns a list of utility programs."""
exclusions = self.read_utility_lists(os.path.join(
self.utils_dir, 'exclude.txt'))
# Allow for an include list of filenames that will be treated as valid
# utilities. Useful for e.g. Linux, where executables rarely have
# extensions.
inclusions = self.read_utility_lists(os.path.join(
self.utils_dir, 'include.txt'))
progs = []
patterns = ['*.jar'] # Java applications
if sys.platform in ['windows', 'win32']:
patterns.append('*.exe') # Windows executables
patterns.append('*.bat') # Batch files
else:
patterns.append('*.sh') # Shell scripts for Linux and OS X
for root, dirnames, filenames in os.walk(self.utils_dir):
if sys.platform == 'darwin':
for dirname in dirnames:
if fnmatch.fnmatch(dirname, '*.app'):
# OS X application bundles are really directories
progs.append(os.path.relpath(
os.path.join(root, dirname),
os.path.join(self.utils_dir)))
for filename in filenames:
if ((
any(fnmatch.fnmatch(filename, p) for p in patterns) or
filename in inclusions) and
filename not in exclusions):
progs.append(os.path.relpath(
os.path.join(root, filename),
os.path.join(self.utils_dir)))
return progs
def read_embarks(self):
"""Returns a list of embark profiles."""
return tuple([
os.path.basename(o) for o in self.get_text_files(self.embarks_dir)])
def toggle_autoclose(self):
"""Toggle automatic closing of the UI when launching DF."""
self.userconfig['autoClose'] = not self.userconfig.get_bool('autoClose')
self.userconfig.save_data()
def toggle_autorun(self, item):
"""
Toggles autorun for the specified item.
Params:
item
The item to toggle autorun for.
"""
if item in self.autorun:
self.autorun.remove(item)
else:
self.autorun.append(item)
self.save_autorun()
def load_autorun(self):
"""Loads autorun settings."""
self.autorun = []
try:
for line in open(os.path.join(self.utils_dir, 'autorun.txt')):
self.autorun.append(line)
except IOError:
pass
def save_autorun(self):
"""Saves autorun settings."""
autofile = open(os.path.join(self.utils_dir, 'autorun.txt'), 'w')
autofile.write("\n".join(self.autorun))
autofile.close()
def cycle_option(self, field):
"""
Cycles an option field between its possible values.
:param field: The field to cycle.
"""
self.settings.cycle_item(field)
self.save_params()
def set_option(self, field, value):
"""
Sets a field to a specific value.
Params:
field
The field to set.
value
The new value for the field.
"""
self.settings.set_value(field, value)
self.save_params()
def load_keybinds(self, filename):
"""
Overwrites Dwarf Fortress keybindings from a file.
Params:
filename
The keybindings file to use.
"""
if not filename.endswith('.txt'):
filename = filename + '.txt'
target = os.path.join(self.init_dir, 'interface.txt')
shutil.copyfile(os.path.join(self.keybinds_dir, filename), target)
def keybind_exists(self, filename):
"""
Returns whether or not a keybindings file already exists.
Params:
filename
The filename to check.
"""
if not filename.endswith('.txt'):
filename = filename + '.txt'
return os.access(os.path.join(self.keybinds_dir, filename), os.F_OK)
def save_keybinds(self, filename):
"""
Save current keybindings to a file.
Params:
filename
The name of the new keybindings file.
"""
if not filename.endswith('.txt'):
filename = filename + '.txt'
filename = os.path.join(self.keybinds_dir, filename)
shutil.copyfile(os.path.join(self.init_dir, 'interface.txt'), filename)
self.read_keybinds()
def delete_keybinds(self, filename):
"""
Deletes a keybindings file.
Params:
filename
The filename to delete.
"""
if not filename.endswith('.txt'):
filename = filename + '.txt'
os.remove(os.path.join(self.keybinds_dir, filename))
def install_graphics(self, pack):
"""
Installs the graphics pack located in LNP/Graphics/<pack>.
Params:
pack
The name of the pack to install.
Returns:
True if successful,
False if an exception occured
None if required files are missing (raw/graphics, data/init)
"""
gfx_dir = os.path.join(self.graphics_dir, pack)
if (os.path.isdir(gfx_dir) and
os.path.isdir(os.path.join(gfx_dir, 'raw', 'graphics')) and
os.path.isdir(os.path.join(gfx_dir, 'data', 'init'))):
try:
# Delete old graphics
if os.path.isdir(os.path.join(self.df_dir, 'raw', 'graphics')):
dir_util.remove_tree(
os.path.join(self.df_dir, 'raw', 'graphics'))
# Copy new raws
dir_util.copy_tree(
os.path.join(gfx_dir, 'raw'),
os.path.join(self.df_dir, 'raw'))
if os.path.isdir(os.path.join(self.df_dir, 'data', 'art')):
dir_util.remove_tree(
os.path.join(self.df_dir, 'data', 'art'))
dir_util.copy_tree(
os.path.join(gfx_dir, 'data', 'art'),
os.path.join(self.df_dir, 'data', 'art'))
self.patch_inits(gfx_dir)
shutil.copyfile(
os.path.join(gfx_dir, 'data', 'init', 'colors.txt'),
os.path.join(self.df_dir, 'data', 'init', 'colors.txt'))
try: # TwbT support
os.remove(os.path.join(
self.df_dir, 'data', 'init', 'overrides.txt'))
except:
pass
try: # TwbT support
shutil.copyfile(
os.path.join(gfx_dir, 'data', 'init', 'overrides.txt'),
os.path.join(
self.df_dir, 'data', 'init', 'overrides.txt'))
except:
pass
except Exception:
sys.excepthook(*sys.exc_info())
return False
else:
return True
else:
return None
self.load_params()
def patch_inits(self, gfx_dir):
"""
Installs init files from a graphics pack by selectively changing
specific fields. All settings outside of the mentioned fields are
preserved.
TODO: Consider if there's a better option than listing all fields
explicitly...
"""
d_init_fields = [
'WOUND_COLOR_NONE', 'WOUND_COLOR_MINOR',
'WOUND_COLOR_INHIBITED', 'WOUND_COLOR_FUNCTION_LOSS',
'WOUND_COLOR_BROKEN', 'WOUND_COLOR_MISSING', 'SKY', 'CHASM',
'PILLAR_TILE',
# Tracks
'TRACK_N', 'TRACK_S', 'TRACK_E', 'TRACK_W', 'TRACK_NS',
'TRACK_NE', 'TRACK_NW', 'TRACK_SE', 'TRACK_SW', 'TRACK_EW',
'TRACK_NSE', 'TRACK_NSW', 'TRACK_NEW', 'TRACK_SEW',
'TRACK_NSEW', 'TRACK_RAMP_N', 'TRACK_RAMP_S', 'TRACK_RAMP_E',
'TRACK_RAMP_W', 'TRACK_RAMP_NS', 'TRACK_RAMP_NE',
'TRACK_RAMP_NW', 'TRACK_RAMP_SE', 'TRACK_RAMP_SW',
'TRACK_RAMP_EW', 'TRACK_RAMP_NSE', 'TRACK_RAMP_NSW',
'TRACK_RAMP_NEW', 'TRACK_RAMP_SEW', 'TRACK_RAMP_NSEW',
# Trees
'TREE_ROOT_SLOPING', 'TREE_TRUNK_SLOPING',
'TREE_ROOT_SLOPING_DEAD', 'TREE_TRUNK_SLOPING_DEAD',
'TREE_ROOTS', 'TREE_ROOTS_DEAD', 'TREE_BRANCHES',
'TREE_BRANCHES_DEAD', 'TREE_SMOOTH_BRANCHES',
'TREE_SMOOTH_BRANCHES_DEAD', 'TREE_TRUNK_PILLAR',
'TREE_TRUNK_PILLAR_DEAD', 'TREE_CAP_PILLAR',
'TREE_CAP_PILLAR_DEAD', 'TREE_TRUNK_N', 'TREE_TRUNK_S',
'TREE_TRUNK_N_DEAD', 'TREE_TRUNK_S_DEAD', 'TREE_TRUNK_EW',
'TREE_TRUNK_EW_DEAD', 'TREE_CAP_WALL_N', 'TREE_CAP_WALL_S',
'TREE_CAP_WALL_N_DEAD', 'TREE_CAP_WALL_S_DEAD', 'TREE_TRUNK_E',
'TREE_TRUNK_W', 'TREE_TRUNK_E_DEAD', 'TREE_TRUNK_W_DEAD',
'TREE_TRUNK_NS', 'TREE_TRUNK_NS_DEAD', 'TREE_CAP_WALL_E',
'TREE_CAP_WALL_W', 'TREE_CAP_WALL_E_DEAD',
'TREE_CAP_WALL_W_DEAD', 'TREE_TRUNK_NW', 'TREE_CAP_WALL_NW',
'TREE_TRUNK_NW_DEAD', 'TREE_CAP_WALL_NW_DEAD', 'TREE_TRUNK_NE',
'TREE_CAP_WALL_NE', 'TREE_TRUNK_NE_DEAD',
'TREE_CAP_WALL_NE_DEAD', 'TREE_TRUNK_SW', 'TREE_CAP_WALL_SW',
'TREE_TRUNK_SW_DEAD', 'TREE_CAP_WALL_SW_DEAD', 'TREE_TRUNK_SE',
'TREE_CAP_WALL_SE', 'TREE_TRUNK_SE_DEAD',
'TREE_CAP_WALL_SE_DEAD', 'TREE_TRUNK_NSE',
'TREE_TRUNK_NSE_DEAD', 'TREE_TRUNK_NSW', 'TREE_TRUNK_NSW_DEAD',
'TREE_TRUNK_NEW', 'TREE_TRUNK_NEW_DEAD', 'TREE_TRUNK_SEW',
'TREE_TRUNK_SEW_DEAD', 'TREE_TRUNK_NSEW',
'TREE_TRUNK_NSEW_DEAD', 'TREE_TRUNK_BRANCH_N',
'TREE_TRUNK_BRANCH_N_DEAD', 'TREE_TRUNK_BRANCH_S',
'TREE_TRUNK_BRANCH_S_DEAD', 'TREE_TRUNK_BRANCH_E',
'TREE_TRUNK_BRANCH_E_DEAD', 'TREE_TRUNK_BRANCH_W',
'TREE_TRUNK_BRANCH_W_DEAD', 'TREE_BRANCH_NS',
'TREE_BRANCH_NS_DEAD', 'TREE_BRANCH_EW', 'TREE_BRANCH_EW_DEAD',
'TREE_BRANCH_NW', 'TREE_BRANCH_NW_DEAD', 'TREE_BRANCH_NE',
'TREE_BRANCH_NE_DEAD', 'TREE_BRANCH_SW', 'TREE_BRANCH_SW_DEAD',
'TREE_BRANCH_SE', 'TREE_BRANCH_SE_DEAD', 'TREE_BRANCH_NSE',
'TREE_BRANCH_NSE_DEAD', 'TREE_BRANCH_NSW',
'TREE_BRANCH_NSW_DEAD', 'TREE_BRANCH_NEW',
'TREE_BRANCH_NEW_DEAD', 'TREE_BRANCH_SEW',
'TREE_BRANCH_SEW_DEAD', 'TREE_BRANCH_NSEW',
'TREE_BRANCH_NSEW_DEAD', 'TREE_TWIGS', 'TREE_TWIGS_DEAD',
'TREE_CAP_RAMP', 'TREE_CAP_RAMP_DEAD', 'TREE_CAP_FLOOR1',
'TREE_CAP_FLOOR2', 'TREE_CAP_FLOOR1_DEAD',
'TREE_CAP_FLOOR2_DEAD', 'TREE_CAP_FLOOR3', 'TREE_CAP_FLOOR4',
'TREE_CAP_FLOOR3_DEAD', 'TREE_CAP_FLOOR4_DEAD',
'TREE_TRUNK_INTERIOR', 'TREE_TRUNK_INTERIOR_DEAD']
init_fields = [
'FONT', 'FULLFONT', 'GRAPHICS', 'GRAPHICS_FONT',
'GRAPHICS_FULLFONT', 'TRUETYPE']
self.settings.read_file(
os.path.join(gfx_dir, 'data', 'init', 'init.txt'), init_fields,
False)
self.settings.read_file(
os.path.join(gfx_dir, 'data', 'init', 'd_init.txt'), d_init_fields,
False)
self.save_params()
def update_savegames(self):
"""Update save games with current raws."""
saves = [
o for o in glob.glob(os.path.join(self.save_dir, '*'))
if os.path.isdir(o) and not o.endswith('current')]
count = 0
if saves:
for save in saves:
count = count + 1
# Delete old graphics
if os.path.isdir(os.path.join(save, 'raw', 'graphics')):
dir_util.remove_tree(os.path.join(save, 'raw', 'graphics'))
# Copy new raws
dir_util.copy_tree(
os.path.join(self.df_dir, 'raw'),
os.path.join(save, 'raw'))
return count
def simplify_graphics(self):
"""Removes unnecessary files from all graphics packs."""
for pack in self.read_graphics():
self.simplify_pack(pack)
def simplify_pack(self, pack):
"""
Removes unnecessary files from LNP/Graphics/<pack>.
Params:
pack
The pack to simplify.
Returns:
The number of files removed if successful
False if an exception occurred
None if folder is empty
"""
pack = os.path.join(self.graphics_dir, pack)
files_before = sum(len(f) for (_, _, f) in os.walk(pack))
if files_before == 0:
return None
tmp = tempfile.mkdtemp()
try:
dir_util.copy_tree(pack, tmp)
if os.path.isdir(pack):
dir_util.remove_tree(pack)
os.makedirs(pack)
os.makedirs(os.path.join(pack, 'data', 'art'))
os.makedirs(os.path.join(pack, 'raw', 'graphics'))
os.makedirs(os.path.join(pack, 'raw', 'objects'))
os.makedirs(os.path.join(pack, 'data', 'init'))
dir_util.copy_tree(
os.path.join(tmp, 'data', 'art'),
os.path.join(pack, 'data', 'art'))
dir_util.copy_tree(
os.path.join(tmp, 'raw', 'graphics'),
os.path.join(pack, 'raw', 'graphics'))
dir_util.copy_tree(
os.path.join(tmp, 'raw', 'objects'),
os.path.join(pack, 'raw', 'objects'))
shutil.copyfile(
os.path.join(tmp, 'data', 'init', 'colors.txt'),
os.path.join(pack, 'data', 'init', 'colors.txt'))
shutil.copyfile(
os.path.join(tmp, 'data', 'init', 'init.txt'),
os.path.join(pack, 'data', 'init', 'init.txt'))
shutil.copyfile(
os.path.join(tmp, 'data', 'init', 'd_init.txt'),
os.path.join(pack, 'data', 'init', 'd_init.txt'))
shutil.copyfile(
os.path.join(tmp, 'data', 'init', 'overrides.txt'),
os.path.join(pack, 'data', 'init', 'overrides.txt'))
except IOError:
sys.excepthook(*sys.exc_info())
retval = False
else:
files_after = sum(len(f) for (_, _, f) in os.walk(pack))
retval = files_after - files_before
if os.path.isdir(tmp):
dir_util.remove_tree(tmp)
return retval
def install_extras(self):
"""
Installs extra utilities to the Dwarf Fortress folder, if this has not
yet been done.
"""
extras_dir = os.path.join(self.lnp_dir, 'Extras')
if not os.path.isdir(extras_dir):
return
install_file = os.path.join(self.df_dir, 'PyLNP{0}.txt'.format(VERSION))
if not os.access(install_file, os.F_OK):
dir_util.copy_tree(extras_dir, self.df_dir)
textfile = open(install_file, 'w')
textfile.write(
'PyLNP V{0} extras installed!\nTime: {1}'.format(
VERSION, datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
textfile.close()
def updates_configured(self):
"""Returns True if update checking have been configured."""
return self.config.get_string('updates/checkURL') != ''
def check_update(self):
"""Checks for updates using the URL specified in PyLNP.json."""
if not self.updates_configured():
return
if self.userconfig.get_number('updateDays') == -1:
return
if self.userconfig.get_number('nextUpdate') < time.time():
t = Thread(target=self.perform_update_check)
t.daemon = True
t.start()
def perform_update_check(self):
"""Performs the actual update check. Runs in a thread."""
try:
req = Request(
self.config.get_string('updates/checkURL'),
headers={'User-Agent':'PyLNP'})
version_text = urlopen(req, timeout=3).read()
# Note: versionRegex must capture the version number in a group
new_version = re.search(
self.config.get_string('updates/versionRegex'),
version_text).group(1)
if new_version != self.config.get_string('updates/packVersion'):
self.new_version = new_version
self.ui.on_update_available()
except URLError as ex:
print(
"Error checking for updates: " + str(ex.reason),
file=sys.stderr)
except:
pass
def next_update(self, days):
"""Sets the next update check to occur in <days> days."""
self.userconfig['nextUpdate'] = (time.time() + days * 24 * 60 * 60)
self.userconfig['updateDays'] = days
self.save_config()
def start_update(self):
"""Launches a webbrowser to the specified update URL."""
self.open_url(self.config.get_string('updates/downloadURL'))
def read_colors(self):
"""Returns a list of color schemes."""
return tuple([
os.path.splitext(os.path.basename(p))[0] for p in
self.get_text_files(self.colors_dir)])
def get_colors(self, colorscheme=None):
"""
Returns RGB tuples for all 16 colors in <colorscheme>.txt, or
data/init/colors.txt if no scheme is provided."""
result = []
f = os.path.join(self.df_dir, 'data', 'init', 'colors.txt')
if colorscheme is not None:
f = os.path.join(self.lnp_dir, 'colors', colorscheme+'.txt')
for c in [
'BLACK', 'BLUE', 'GREEN', 'CYAN', 'RED', 'MAGENTA', 'BROWN',
'LGRAY', 'DGRAY', 'LBLUE', 'LGREEN', 'LCYAN', 'LRED',
'LMAGENTA', 'YELLOW', 'WHITE']:
result.append((
int(self.settings.read_value(f, c+'_R')),
int(self.settings.read_value(f, c+'_G')),
int(self.settings.read_value(f, c+'_B'))))
return result
def load_colors(self, filename):
"""
Replaces the current DF color scheme.
Params:
filename
The name of the new colorscheme to install (filename without
extension).
"""
if not filename.endswith('.txt'):
filename = filename + '.txt'
shutil.copyfile(
os.path.join(self.lnp_dir, 'colors', filename),
os.path.join(self.init_dir, 'colors.txt'))
def save_colors(self, filename):
"""
Save current keybindings to a file.
Params:
filename
The name of the new keybindings file.
"""
if not filename.endswith('.txt'):
filename = filename + '.txt'
filename = os.path.join(self.colors_dir, filename)
shutil.copyfile(os.path.join(self.init_dir, 'colors.txt'), filename)
self.read_colors()
def color_exists(self, filename):
"""
Returns whether or not a color scheme already exists.
Params:
filename
The filename to check.
"""
if not filename.endswith('.txt'):
filename = filename + '.txt'
return os.access(os.path.join(self.colors_dir, filename), os.F_OK)
def delete_colors(self, filename):
"""
Deletes a color scheme file.
Params:
filename
The filename to delete.
"""
if not filename.endswith('.txt'):
filename = filename + '.txt'
os.remove(os.path.join(self.colors_dir, filename))
def read_hacks(self):
"""Reads which hacks are enabled."""
try:
f = open(os.path.join(self.df_dir, 'PyLNP_dfhack_onload.init'))
hacklines = f.readlines()
for h in self.get_hacks().values():
h['enabled'] = h['command']+'\n' in hacklines
f.close()
except IOError:
for h in self.get_hacks().values():
h['enabled'] = False
def get_hacks(self):
"""Returns dict of available hacks."""
return self.config.get_dict('dfhack')
def get_hack(self, title):
"""
Returns the hack titled <title>, or None if this does not exist.
Params:
title
The title of the hack.
"""
try:
return self.get_hacks()[title]
except KeyError:
return None
def toggle_hack(self, name):
"""
Toggles the hack <name>.
Params:
name
The name of the hack to toggle.
"""
self.get_hack(name)['enabled'] = not self.get_hack(name)['enabled']
self.rebuild_hacks()
def rebuild_hacks(self):
"""Rebuilds PyLNP_dfhack_onload.init with the enabled hacks."""
f = open(os.path.join(self.df_dir, 'PyLNP_dfhack_onload.init'), 'w')
f.write('# Generated by PyLNP\n\n')
for k, h in self.get_hacks().items():
if h['enabled']:
f.write('# '+str(k)+'\n')
f.write('# '+str(h['tooltip'])+'\n')
f.write(h['command']+'\n\n')
f.flush()
f.close()
def install_embarks(self, files):
"""
Installs a list of embark profiles.
Params:
files
List of files to install.
"""
out = open(os.path.join(self.init_dir, 'embark_profiles.txt'), 'w')
for f in files:
embark = open(os.path.join(self.embarks_dir, f))
out.write(embark.read()+"\n\n")
out.flush()
out.close()
def open_folder(path):
"""
Opens a folder in the system file manager.
Params:
path
The folder path to open.
"""
# http://stackoverflow.com/q/6631299
path = os.path.normpath(path)
try:
if sys.platform == 'darwin':
subprocess.check_call(['open', '--', path])
elif sys.platform.startswith('linux'):
subprocess.check_call(['xdg-open', path])
elif sys.platform in ['windows', 'win32']:
subprocess.check_call(['explorer', path])
except Exception:
pass
if __name__ == "__main__":
PyLNP()
# vim:expandtab
|
|
import re
from mysql import connector
from config import get_config as _get_config
from flask import g
# In ascending access level order
ACCESS_LEVELS = ['public', 'user', 'admin']
class Context(object):
def __init__(self, cnx, access_level, current_username):
self.cnx = cnx
self.access_level = access_level
self.current_username = current_username
def inject_context(min_level='public'):
def _provides_context(f):
def wrapper(*args, **kwargs):
access_level = 'public'
current_username = None
if g and g.get('user', None):
current_username = g.user['UserName']
if g.user['Status'] == 'admin':
access_level = 'admin'
else:
access_level = 'user'
if ACCESS_LEVELS.index(access_level) < ACCESS_LEVELS.index(min_level):
raise Exception("INSUFFICIENT_DB_PERMISSION")
config = _get_config(access_level)
cnx = connector.connect(**config)
context = Context(cnx, access_level, current_username)
if access_level == 'admin':
return f(context, *args, **kwargs)
return f(context, *args)
return wrapper
return _provides_context
def _get_short_file_name(filename):
match = re.match(r'^(.+)_.+_.+.stl$', filename)
if match:
groups = match.groups()
return str(groups[0]) + '.stl'
return ''
def _make_user_dict(raw_user):
return {
'FirstName': raw_user[0],
'LastName': raw_user[1],
'UserEmail': raw_user[2],
'UserName': raw_user[3],
'UserPass': raw_user[4],
'Gender': raw_user[5],
'Birthdate': raw_user[6],
'Status': raw_user[7]
}
def _make_print_dict(raw_print):
return {
'PrintId': raw_print[0],
'UserName': raw_print[1],
'CreationTimestamp': raw_print[2],
'LastUpdateTimestamp': raw_print[3],
'FileName': raw_print[4],
'ShortFileName': _get_short_file_name(raw_print[4]),
'Status': raw_print[5],
'FileSize': raw_print[6],
}
def _make_full_print_dict(raw_print):
return {
'PrintId': raw_print[0],
'UserName': raw_print[1],
'CreationTimestamp': raw_print[2],
'LastUpdateTimestamp': raw_print[3],
'FileName': raw_print[4],
'ShortFileName': _get_short_file_name(raw_print[4]),
'Status': raw_print[5],
'FileSize': raw_print[6],
'Material': raw_print[7],
'Cost': raw_print[8],
'Divisions': raw_print[9],
}
def _make_chat_dict(raw_chat):
return {
'MessageId': raw_chat[0],
'UserNameFrom': raw_chat[1],
'UserNameTo': raw_chat[2],
'MessageTimestamp': raw_chat[3],
'Message': raw_chat[4],
}
@inject_context()
def insert_user(context, user):
cnx = context.cnx
query = ("INSERT INTO users(FirstName, LastName, UserEmail, UserName, UserPass, Gender, Birthdate, Status) "
"values(%(FirstName)s, %(LastName)s, %(UserEmail)s, %(UserName)s, "
"%(UserPass)s, %(Gender)s, %(Birthdate)s, %(Status)s)")
cursor = cnx.cursor()
success = True
try:
cursor.execute(query, user)
cnx.commit()
except connector.Error:
success = False
cursor.close()
cnx.close()
return success
@inject_context()
def get_user(context, user_name):
cnx = context.cnx
query = ("SELECT FirstName, LastName, UserEmail, UserName, UserPass, Gender, Birthdate, Status "
"FROM users WHERE UserName = %(UserName)s LIMIT 1")
cursor = cnx.cursor()
cursor.execute(query, {'UserName': user_name})
raw_user = cursor.fetchone()
cursor.close()
cnx.close()
if raw_user is not None:
return _make_user_dict(raw_user)
return None
@inject_context()
def get_user_by_email(context, user_email):
cnx = context.cnx
query = ("SELECT FirstName, LastName, UserEmail, UserName, UserPass, Gender, Birthdate, Status "
"FROM users WHERE UserEmail = %(UserEmail)s LIMIT 1")
cursor = cnx.cursor()
cursor.execute(query, {'UserEmail': user_email})
raw_user = cursor.fetchone()
cursor.close()
cnx.close()
if raw_user is not None:
return _make_user_dict(raw_user)
return None
@inject_context(min_level='admin')
def get_all_users(context):
cnx = context.cnx
query = ("SELECT FirstName, LastName, UserEmail, UserName, UserPass, Gender, Birthdate, Status "
"FROM users order by UserName")
cursor = cnx.cursor()
cursor.execute(query)
users = []
for raw_user in cursor:
user = _make_user_dict(raw_user)
del user['UserPass']
users.append(user)
cursor.close()
cnx.close()
return users
@inject_context(min_level='admin')
def update_user_status(context, user_name, new_status):
cnx = context.cnx
query = ("UPDATE users SET Status = %(NewStatus)s "
"WHERE UserName = %(UserName)s LIMIT 1")
cursor = cnx.cursor()
try:
cursor.execute(query, {'UserName': user_name, 'NewStatus': new_status})
cnx.commit()
success = cursor.rowcount == 1
except connector.Error:
success = False
cursor.close()
cnx.close()
return success
@inject_context()
def update_user_password(context, user_name, new_password):
cnx = context.cnx
query = ("UPDATE users SET UserPass = %(NewPassword)s "
"WHERE UserName = %(UserName)s LIMIT 1")
cursor = cnx.cursor()
try:
cursor.execute(query, {'UserName': user_name, 'NewPassword': new_password})
cnx.commit()
success = cursor.rowcount == 1
except connector.Error:
success = False
cursor.close()
cnx.close()
return success
@inject_context(min_level='user')
def insert_print(context, file_print):
cnx = context.cnx
file_print['UserName'] = context.current_username
query = ("INSERT INTO prints("
"UserName, CreationTimestamp, FileName, Status, FileSize) "
"values("
"%(UserName)s, %(CreationTimestamp)s, %(FileName)s, %(Status)s, %(FileSize)s)")
cursor = cnx.cursor()
try:
cursor.execute(query, file_print)
cnx.commit()
success = True
except connector.Error as e:
print e
success = False
cursor.close()
cnx.close()
return success
@inject_context(min_level='user')
def update_print_status(context, print_id, new_status, ignore_username=False):
cnx = context.cnx
query_admin = ("UPDATE prints SET Status = %(NewStatus)s "
"WHERE PrintId = %(PrintId)s LIMIT 1")
query_user = ("UPDATE prints SET Status = %(NewStatus)s "
"WHERE PrintId = %(PrintId)s and WHERE UserName = %(UserName)s LIMIT 1")
query = query_admin if ignore_username else query_user
cursor = cnx.cursor()
try:
cursor.execute(query, {'PrintId': print_id, 'NewStatus': new_status, 'UserName': context.current_username})
cnx.commit()
success = cursor.rowcount == 1
except connector.Error:
success = False
cursor.close()
cnx.close()
return success
@inject_context(min_level='user')
def get_print(context, print_id):
cnx = context.cnx
query = ("SELECT PrintId, UserName, CreationTimestamp, LastUpdateTimestamp, FileName, Status, FileSize "
"FROM prints WHERE PrintId = %(PrintId)s and UserName = %(UserName)s LIMIT 1")
cursor = cnx.cursor()
cursor.execute(query, {'PrintId': print_id, 'UserName': context.current_username})
raw_print = cursor.fetchone()
cursor.close()
cnx.close()
if raw_print is not None:
return _make_print_dict(raw_print)
return None
@inject_context(min_level='user')
def get_all_prints(context, all_users=False):
cnx = context.cnx
query = ("SELECT PrintId, UserName, CreationTimestamp, LastUpdateTimestamp, FileName, Status, FileSize "
"FROM prints " + ("WHERE UserName = %(UserName)s " if not all_users else "") +
"ORDER BY LastUpdateTimestamp DESC limit 10")
cursor = cnx.cursor()
cursor.execute(query, {'UserName': context.current_username})
prints = []
for raw_print in cursor:
prints.append(_make_print_dict(raw_print))
cursor.close()
cnx.close()
return prints
@inject_context(min_level='user')
def insert_new_chat_message(context, message, timestamp, username_to='group:admin'):
cnx = context.cnx
query = ("INSERT INTO chat("
"UserNameFrom, UserNameTo, MessageTimestamp, Message) values("
"%(UserNameFrom)s, %(UserNameTo)s, %(MessageTimestamp)s, %(Message)s)")
cursor = cnx.cursor()
try:
cursor.execute(query,
{'UserNameFrom': context.current_username,
'UserNameTo': username_to,
'MessageTimestamp': timestamp,
'Message': message})
cnx.commit()
success = cursor.rowcount == 1
except connector.Error:
success = False
cursor.close()
cnx.close()
return success
@inject_context(min_level='user')
def get_user_messages(context, username=None):
username = context.current_username if username is None else username
cnx = context.cnx
query = ("SELECT MessageId, UserNameFrom, UserNameTo, MessageTimestamp, Message "
"FROM chat WHERE UserNameTo = %(UserName)s or UserNameFrom = %(UserName)s "
"ORDER BY MessageTimestamp DESC limit 30")
cursor = cnx.cursor()
cursor.execute(query, {'UserName': username})
chats = []
for raw_chat in cursor:
chats.append(_make_chat_dict(raw_chat))
cursor.close()
cnx.close()
return chats
def manager_get_all_prints(status):
query = ("SELECT PrintId, UserName, CreationTimestamp, LastUpdateTimestamp, FileName, Status, FileSize, Material, Cost, Divisions "
"FROM prints WHERE Status = %(Status)s "
"ORDER BY LastUpdateTimestamp ASC")
config = _get_config('admin')
cnx = connector.connect(**config)
cursor = cnx.cursor()
cursor.execute(query, {'Status': status})
prints = []
for raw_print in cursor:
prints.append(_make_full_print_dict(raw_print))
cursor.close()
cnx.close()
return prints
def manager_update_print(p):
config = _get_config('admin')
cnx = connector.connect(**config)
query = (
"UPDATE prints SET "
"Status = %(Status)s, "
"Material = %(Material)s, "
"Divisions = %(Divisions)s, "
"Cost = %(Cost)s "
"WHERE PrintId = %(PrintId)s LIMIT 1")
cursor = cnx.cursor()
try:
cursor.execute(query, p)
cnx.commit()
success = cursor.rowcount == 1
except connector.Error:
success = False
cursor.close()
cnx.close()
return success
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import os
import sys
import tempfile
import unittest
import pytest
from numpy.testing import assert_array_equal
import numpy as np
import pyarrow as pa
from pyarrow.feather import (read_feather, write_feather,
read_table, FeatherReader, FeatherDataset)
from pyarrow.lib import FeatherWriter
try:
from pandas.util.testing import assert_frame_equal
import pandas as pd
except ImportError:
pass
# TODO(wesm): The Feather tests currently are tangled with pandas
# dependency. We should isolate the pandas-depending parts and mark those with
# pytest.mark.pandas
pytestmark = pytest.mark.pandas
def random_path(prefix='feather_'):
return tempfile.mktemp(prefix=prefix)
class TestFeatherReader(unittest.TestCase):
def setUp(self):
self.test_files = []
def tearDown(self):
for path in self.test_files:
try:
os.remove(path)
except os.error:
pass
def test_file_not_exist(self):
with pytest.raises(pa.ArrowIOError):
FeatherReader('test_invalid_file')
def _get_null_counts(self, path, columns=None):
reader = FeatherReader(path)
counts = []
for i in range(reader.num_columns):
col = reader.get_column(i)
name = reader.get_column_name(i)
if columns is None or name in columns:
counts.append(col.null_count)
return counts
def _check_pandas_roundtrip(self, df, expected=None, path=None,
columns=None, null_counts=None,
use_threads=False):
if path is None:
path = random_path()
self.test_files.append(path)
write_feather(df, path)
if not os.path.exists(path):
raise Exception('file not written')
result = read_feather(path, columns, use_threads=use_threads)
if expected is None:
expected = df
assert_frame_equal(result, expected)
if null_counts is None:
null_counts = np.zeros(len(expected.columns))
np.testing.assert_array_equal(self._get_null_counts(path, columns),
null_counts)
def _assert_error_on_write(self, df, exc, path=None):
# check that we are raising the exception
# on writing
if path is None:
path = random_path()
self.test_files.append(path)
def f():
write_feather(df, path)
pytest.raises(exc, f)
def test_dataset(self):
num_values = (100, 100)
num_files = 5
paths = [random_path() for i in range(num_files)]
df = pd.DataFrame(np.random.randn(*num_values),
columns=['col_' + str(i)
for i in range(num_values[1])])
self.test_files.extend(paths)
for index, path in enumerate(paths):
rows = (index * (num_values[0] // num_files),
(index + 1) * (num_values[0] // num_files))
writer = FeatherWriter()
writer.open(path)
for col in range(num_values[1]):
writer.write_array(df.columns[col],
df.iloc[rows[0]:rows[1], col])
writer.close()
data = FeatherDataset(paths).read_pandas()
assert_frame_equal(data, df)
def test_num_columns_attr(self):
df0 = pd.DataFrame({})
df1 = pd.DataFrame({
'foo': [1, 2, 3, 4, 5]
})
df2 = pd.DataFrame({
'foo': [1, 2, 3, 4, 5],
'bar': [1, 2, 3, 4, 5]
})
for df, ncols in zip([df0, df1, df2], [0, 1, 2]):
path = random_path()
self.test_files.append(path)
write_feather(df, path)
reader = FeatherReader(path)
assert reader.num_columns == ncols
def test_num_rows_attr(self):
df = pd.DataFrame({'foo': [1, 2, 3, 4, 5]})
path = random_path()
self.test_files.append(path)
write_feather(df, path)
reader = FeatherReader(path)
assert reader.num_rows == len(df)
df = pd.DataFrame({})
path = random_path()
self.test_files.append(path)
write_feather(df, path)
reader = FeatherReader(path)
assert reader.num_rows == 0
def test_float_no_nulls(self):
data = {}
numpy_dtypes = ['f4', 'f8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randn(num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_read_table(self):
num_values = (100, 100)
path = random_path()
self.test_files.append(path)
writer = FeatherWriter()
writer.open(path)
values = np.random.randint(0, 100, size=num_values)
for i in range(100):
writer.write_array('col_' + str(i), values[:, i])
writer.close()
data = pd.DataFrame(values,
columns=['col_' + str(i) for i in range(100)])
table = pa.Table.from_pandas(data)
result = read_table(path)
assert_frame_equal(table.to_pandas(), result.to_pandas())
def test_float_nulls(self):
num_values = 100
path = random_path()
self.test_files.append(path)
writer = FeatherWriter()
writer.open(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = ['f4', 'f8']
expected_cols = []
null_counts = []
for name in dtypes:
values = np.random.randn(num_values).astype(name)
writer.write_array(name, values, null_mask)
values[null_mask] = np.nan
expected_cols.append(values)
null_counts.append(null_mask.sum())
writer.close()
ex_frame = pd.DataFrame(dict(zip(dtypes, expected_cols)),
columns=dtypes)
result = read_feather(path)
assert_frame_equal(result, ex_frame)
assert_array_equal(self._get_null_counts(path), null_counts)
def test_integer_no_nulls(self):
data = {}
numpy_dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_platform_numpy_integers(self):
data = {}
numpy_dtypes = ['longlong']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df)
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
path = random_path()
self.test_files.append(path)
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
writer = FeatherWriter()
writer.open(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
writer.write_array(name, values, null_mask)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
writer.close()
result = read_feather(path)
assert_frame_equal(result, ex_frame)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
self._check_pandas_roundtrip(df)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
path = random_path()
self.test_files.append(path)
num_values = 100
np.random.seed(0)
writer = FeatherWriter()
writer.open(path)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
writer.write_array('bools', values, mask)
expected = values.astype(object)
expected[mask] = None
writer.close()
ex_frame = pd.DataFrame({'bools': expected})
result = read_feather(path)
assert_frame_equal(result, ex_frame)
def test_buffer_bounds_error(self):
# ARROW-1676
path = random_path()
self.test_files.append(path)
for i in range(16, 256):
values = pa.array([None] + list(range(i)), type=pa.float64())
writer = FeatherWriter()
writer.open(path)
writer.write_array('arr', values)
writer.close()
result = read_feather(path)
expected = pd.DataFrame({'arr': values.to_pandas()})
assert_frame_equal(result, expected)
self._check_pandas_roundtrip(expected, null_counts=[1])
def test_boolean_object_nulls(self):
repeats = 100
arr = np.array([False, None, True] * repeats, dtype=object)
df = pd.DataFrame({'bools': arr})
self._check_pandas_roundtrip(df, null_counts=[1 * repeats])
def test_delete_partial_file_on_error(self):
if sys.platform == 'win32':
pytest.skip('Windows hangs on to file handle for some reason')
class CustomClass(object):
pass
# strings will fail
df = pd.DataFrame(
{
'numbers': range(5),
'strings': [b'foo', None, u'bar', CustomClass(), np.nan]},
columns=['numbers', 'strings'])
path = random_path()
try:
write_feather(df, path)
except Exception:
pass
assert not os.path.exists(path)
def test_strings(self):
repeats = 1000
# Mixed bytes, unicode, strings coerced to binary
values = [b'foo', None, u'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
ex_values = [b'foo', None, b'bar', b'qux', np.nan]
expected = pd.DataFrame({'strings': ex_values * repeats})
self._check_pandas_roundtrip(df, expected, null_counts=[2 * repeats])
# embedded nulls are ok
values = ['foo', None, 'bar', 'qux', None]
df = pd.DataFrame({'strings': values * repeats})
expected = pd.DataFrame({'strings': values * repeats})
self._check_pandas_roundtrip(df, expected, null_counts=[2 * repeats])
values = ['foo', None, 'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
expected = pd.DataFrame({'strings': values * repeats})
self._check_pandas_roundtrip(df, expected, null_counts=[2 * repeats])
def test_empty_strings(self):
df = pd.DataFrame({'strings': [''] * 10})
self._check_pandas_roundtrip(df)
def test_all_none(self):
df = pd.DataFrame({'all_none': [None] * 10})
self._check_pandas_roundtrip(df, null_counts=[10])
def test_all_null_category(self):
# ARROW-1188
df = pd.DataFrame({"A": (1, 2, 3), "B": (None, None, None)})
df = df.assign(B=df.B.astype("category"))
self._check_pandas_roundtrip(df, null_counts=[0, 3])
def test_multithreaded_read(self):
data = {'c{0}'.format(i): [''] * 10
for i in range(100)}
df = pd.DataFrame(data)
self._check_pandas_roundtrip(df, use_threads=True)
def test_nan_as_null(self):
# Create a nan that is not numpy.nan
values = np.array(['foo', np.nan, np.nan * 2, 'bar'] * 10)
df = pd.DataFrame({'strings': values})
self._check_pandas_roundtrip(df)
def test_category(self):
repeats = 1000
values = ['foo', None, u'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
df['strings'] = df['strings'].astype('category')
values = ['foo', None, 'bar', 'qux', None]
expected = pd.DataFrame({'strings': pd.Categorical(values * repeats)})
self._check_pandas_roundtrip(df, expected,
null_counts=[2 * repeats])
def test_timestamp(self):
df = pd.DataFrame({'naive': pd.date_range('2016-03-28', periods=10)})
df['with_tz'] = (df.naive.dt.tz_localize('utc')
.dt.tz_convert('America/Los_Angeles'))
self._check_pandas_roundtrip(df)
def test_timestamp_with_nulls(self):
df = pd.DataFrame({'test': [pd.datetime(2016, 1, 1),
None,
pd.datetime(2016, 1, 3)]})
df['with_tz'] = df.test.dt.tz_localize('utc')
self._check_pandas_roundtrip(df, null_counts=[1, 1])
@pytest.mark.xfail(reason="not supported ATM",
raises=NotImplementedError)
def test_timedelta_with_nulls(self):
df = pd.DataFrame({'test': [pd.Timedelta('1 day'),
None,
pd.Timedelta('3 day')]})
self._check_pandas_roundtrip(df, null_counts=[1, 1])
def test_out_of_float64_timestamp_with_nulls(self):
df = pd.DataFrame(
{'test': pd.DatetimeIndex([1451606400000000001,
None, 14516064000030405])})
df['with_tz'] = df.test.dt.tz_localize('utc')
self._check_pandas_roundtrip(df, null_counts=[1, 1])
def test_non_string_columns(self):
df = pd.DataFrame({0: [1, 2, 3, 4],
1: [True, False, True, False]})
expected = df.rename(columns=str)
self._check_pandas_roundtrip(df, expected)
@pytest.mark.skipif(not os.path.supports_unicode_filenames,
reason='unicode filenames not supported')
def test_unicode_filename(self):
# GH #209
name = (b'Besa_Kavaj\xc3\xab.feather').decode('utf-8')
df = pd.DataFrame({'foo': [1, 2, 3, 4]})
self._check_pandas_roundtrip(df, path=random_path(prefix=name))
def test_read_columns(self):
data = {'foo': [1, 2, 3, 4],
'boo': [5, 6, 7, 8],
'woo': [1, 3, 5, 7]}
columns = list(data.keys())[1:3]
df = pd.DataFrame(data)
expected = pd.DataFrame({c: data[c] for c in columns})
self._check_pandas_roundtrip(df, expected, columns=columns)
def test_overwritten_file(self):
path = random_path()
self.test_files.append(path)
num_values = 100
np.random.seed(0)
values = np.random.randint(0, 10, size=num_values)
write_feather(pd.DataFrame({'ints': values}), path)
df = pd.DataFrame({'ints': values[0: num_values//2]})
self._check_pandas_roundtrip(df, path=path)
def test_filelike_objects(self):
from io import BytesIO
buf = BytesIO()
# the copy makes it non-strided
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=['a', 'b', 'c']).copy()
write_feather(df, buf)
buf.seek(0)
result = read_feather(buf)
assert_frame_equal(result, df)
def test_sparse_dataframe(self):
# GH #221
data = {'A': [0, 1, 2],
'B': [1, 0, 1]}
df = pd.DataFrame(data).to_sparse(fill_value=1)
expected = df.to_dense()
self._check_pandas_roundtrip(df, expected)
def test_duplicate_columns(self):
# https://github.com/wesm/feather/issues/53
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=list('aaa')).copy()
self._assert_error_on_write(df, ValueError)
def test_unsupported(self):
# https://github.com/wesm/feather/issues/240
# serializing actual python objects
# period
df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)})
self._assert_error_on_write(df, ValueError)
# non-strings
df = pd.DataFrame({'a': ['a', 1, 2.0]})
self._assert_error_on_write(df, TypeError)
@pytest.mark.slow
def test_large_dataframe(self):
df = pd.DataFrame({'A': np.arange(400000000)})
self._check_pandas_roundtrip(df)
@pytest.mark.large_memory
def test_chunked_binary_error_message():
# ARROW-3058: As Feather does not yet support chunked columns, we at least
# make sure it's clear to the user what is going on
# 2^31 + 1 bytes
values = [b'x'] + [
b'x' * (1 << 20)
] * 2 * (1 << 10)
df = pd.DataFrame({'byte_col': values})
with pytest.raises(ValueError, match="'byte_col' exceeds 2GB maximum "
"capacity of a Feather binary column. This restriction "
"may be lifted in the future"):
write_feather(df, io.BytesIO())
|
|
""" Copyright 2015 Kris Steinhoff, The University of Michigan
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ConfigParser, json, logging, os, requests, sys, time, urlparse
class BoxApiError(Exception):
pass
class BoxApi(object):
def __init__(self, config_dir=None, **config):
self._logger = logging.getLogger("box.api")
if self._logger.getEffectiveLevel() <= logging.DEBUG:
# turn on HTTP logging if log level is DEBUG
import httplib
httplib.HTTPConnection.debuglevel = 1
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(self._logger.getEffectiveLevel())
requests_log.propagate = True
if config_dir is not None:
self._config_dir = config_dir
else:
self._config_dir = os.path.dirname(os.path.realpath(__file__))
self._settings_file = os.path.join(self._config_dir, "settings.conf")
self._auth_file = os.path.join(self._config_dir, "auth.conf")
config_files = (self._settings_file, self._auth_file)
self._logger.debug("config_files: %s" % str(config_files))
config_from_file = ConfigParser.RawConfigParser()
config_from_file.read(config_files)
# TODO config validation:
self._config = dict(config_from_file.items("box") + config.items())
self._session = requests.Session()
try:
self._auth = dict(config_from_file.items("box auth"))
self._session.headers.update({"Authorization": "Bearer {access}".format(access=self.get_access_token())})
except:
msg = "initialization: could not set auth"
self._logger.error(msg)
self._session.headers.update({"Accept": "application/json"})
def v1query(self, action, params=None):
"""A convenience method to make using the legacy V1 API easier to use."""
from xml.dom import minidom
class BoxApiV1Response(object):
def __init__(self, resp):
self._dom = minidom.parseString(resp.text)
def find(self, node):
return self._dom.getElementsByTagName(node)[0].firstChild.nodeValue
if params is None:
params = {}
params['action'] = action
params['api_key'] = self._config["client_id"]
params['auth_token'] = self._auth["v1auth.token"]
resp = requests.get("https://www.box.net/api/1.0/rest", params=params)
return BoxApiV1Response(resp)
def request(self, method, url, append_url=True, retry_on=(429,), max_attempts=10, **kwargs):
if append_url:
url = "https://api.box.com/2.0/"+ url.strip("/")
self._logger.debug("URL: %s" % url)
self._logger.debug("method: %s" % method)
attempt_count = 0
sleep_time = 0
# retry the request until a good status code is received or we exhausted our attempt limit:
while attempt_count < max_attempts:
attempt_count += 1
if attempt_count > 1:
# set the sleep time to an exponentially increasing value in case we're being throttled:
sleep_time = 2 ** attempt_count;
self._logger.warn("Response code \"%d \"from \"%s\". Sleeping for %d seconds before retrying." % (r.status_code, r.url, sleep_time))
#time.sleep(sleep_time)
r = self._session.request(method, url, **kwargs)
if r.status_code == 401:
if self.refresh_tokens():
self._logger.info("Response code \"%d \"from \"%s\". Tokens refreshed, retrying." % (r.status_code, r.url))
attempt_count -= 1
elif r.status_code not in retry_on:
return r
msg = "Response code \"%d \"from \"%s\". Giving up after %d attempts." % (r.status_code, r.url, attempt_count)
self._logger.error(msg)
raise BoxApiError(msg)
def get_access_token(self):
expires = time.strptime(self._auth["access.token.expires"], "%Y-%m-%dT%H:%M:%S")
if time.mktime(expires) - time.mktime(time.localtime()) < 60:
self.refresh_tokens()
return self._auth["access.token"]
def refresh_tokens(self):
self._logger.info("refreshing tokens")
tokens_resp = requests.request("POST", "https://www.box.com/api/oauth2/token", data={"grant_type": "refresh_token", "refresh_token": self._auth["refresh.token"], "client_id": self._config["client_id"], "client_secret": self._config["client_secret"]})
tokens = json.loads(tokens_resp.text)
if tokens_resp.status_code == requests.codes.ok:
self.save_tokens(tokens)
return True
else:
msg = "Refreshing tokens failed (\"%s\").\n" % tokens["error_description"]
self._logger.error(msg)
raise BoxApiError(msg)
def save_tokens(self, tokens=None, v1token=None):
# Create new config section
auth_config = ConfigParser.RawConfigParser()
auth_config.add_section("box auth")
# populate with existing auth values
if hasattr(self, "_auth"):
for k in sorted(self._auth.keys()):
auth_config.set("box auth", k, self._auth[k])
if v1token:
user = json.loads(requests.request("GET", "https://api.box.com/2.0/users/me", params={"fields": "login,role"}, headers={"Authorization": "BoxAuth api_key={api_key}&auth_token={auth_token}".format(api_key=self._config["client_id"], auth_token=v1token)}).text)
auth_config.set("box auth", "v1auth.token.user.role", user["role"])
auth_config.set("box auth", "v1auth.token.user.login", user["login"])
auth_config.set("box auth", "v1auth.token", v1token)
if tokens:
self._session.headers.update({"Authorization": "Bearer {access}".format(access=tokens["access_token"])})
user = json.loads(self.request("GET", "/users/me", params={"fields": "login,role"}).text)
auth_config.set("box auth", "user.role", user["role"])
auth_config.set("box auth", "user.login", user["login"])
auth_config.set("box auth", "access.token", tokens["access_token"])
auth_config.set("box auth", "access.token.expires", time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(time.time() + tokens["expires_in"])))
auth_config.set("box auth", "refresh.token", tokens["refresh_token"])
auth_config.set("box auth", "refresh.token.expires", time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(time.time() + 1209600)))
self._auth = dict(auth_config.items("box auth"))
save_to = self._auth_file
save_to_fp = open(save_to, "w")
save_to_fp.write("# This file is maintained by the box python module. DO NOT MODIFY.\n\n")
auth_config.write(save_to_fp)
if __name__ == "__main__":
def get_oauth2_tokens(api):
import BaseHTTPServer, random, webbrowser
http_server_address = ("127.0.0.1", 8000)
global oauth_info
oauth_info = {}
oauth_info["code"] = ""
oauth_info["state"] = str(random.randint(876546, 34567898765))
auth_url = "https://www.box.com/api/oauth2/authorize?response_type=code&client_id={client_id}&redirect_uri=http://{redirect_server}:{redirect_port}/&state={state}".format(client_id=api._config["client_id"], state=oauth_info["state"], redirect_server=http_server_address[0], redirect_port=http_server_address[1])
print "Authenticate here: {url}".format(url=auth_url)
webbrowser.open(auth_url)
class auth_code_handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
global oauth_info
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
qs = urlparse.parse_qs(self.path)
try:
if oauth_info["state"] != qs["/?state"][0]: # "/?" prefix is a hack, TODO: fix it maybe
self.wfile.write("ERROR: state parameter mismatch.")
print "ERROR: state parameter mismatch"
print "state parameter mismatch"
sys.exit(1)
oauth_info["code"] = qs["code"][0]
except:
self.wfile.write("ERROR: parameters (\"state\" and \"code\") not found")
print "ERROR: parameters (\"state\" and \"code\") not found"
sys.exit(1)
self.wfile.write("return to {script_name}\n".format(script_name=sys.argv[0]))
def log_message(self, format, *args):
# supress log messages to stderr
return
httpd = BaseHTTPServer.HTTPServer(('127.0.0.1', 8000), auth_code_handler)
httpd.handle_request()
tokens = json.loads(requests.post("https://www.box.com/api/oauth2/token", data={"grant_type": "authorization_code", "code": oauth_info["code"], "client_id": api._config["client_id"], "client_secret": api._config["client_secret"]}).text)
api.save_tokens(tokens) # TODO make this optional and add param to save_tokens, to specify path
print "SUCCESS: auth tokens saved."
def get_auth_token(api):
from xml.dom import minidom
import webbrowser
get_ticket = requests.get("https://www.box.net/api/1.0/rest?action=get_ticket&api_key={api_key}".format(api_key=api._config["client_id"]))
ticket_dom = minidom.parseString(get_ticket.text)
ticket = ticket_dom.getElementsByTagName('ticket')[0].firstChild.nodeValue
auth_url = "https://www.box.com/api/1.0/auth/{ticket}".format(ticket=ticket)
print "Authenticate here: {url}".format(url=auth_url)
webbrowser.open(auth_url)
raw_input("Then press enter...")
print ""
get_token = requests.get("https://www.box.net/api/1.0/rest?action=get_auth_token&api_key={api_key}&ticket={ticket}".format(api_key=api._config["client_id"], ticket=ticket))
token_dom = minidom.parseString(get_token.text)
auth_token = token_dom.getElementsByTagName("auth_token")[0].firstChild.nodeValue
api.save_tokens(v1token=auth_token) # TODO make this optional and add param to save_tokens, to specify path
print "SUCCESS: auth tokens saved."
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [ auth | v1auth | access_token | get ] [options]")
parser.add_option("-v", action="count", default=0, dest="verbosity")
(options, args) = parser.parse_args()
logging.basicConfig() # you need to initialize logging, otherwise sys.argvyou will not see anything from requests
logging.getLogger().setLevel(logging.WARN - int(options.verbosity) * 10)
api = BoxApi()
if len(args) >= 1:
if args[0] == "auth":
get_oauth2_tokens(api)
elif args[0] == "v1auth":
get_auth_token(api)
elif args[0] == "access_token":
print api._auth["access.token"]
elif args[0] == "get":
r = api.request("GET", args[1])
print json.dumps(r.json(), indent=True)
elif args[0] == "refresh":
api.refresh_tokens()
else:
parser.print_help()
sys.exit(1)
else:
parser.print_help()
sys.exit(1)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import re
import sys
from setuptools import find_packages, setup, Command
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
_VERSION = '1.3.0'
REQUIRED_PACKAGES = [
'numpy >= 1.11.0',
'six >= 1.10.0',
'protobuf >= 3.3.0',
'tensorflow-tensorboard >= 0.1.0, < 0.2.0',
]
project_name = 'tensorflow'
if '--project_name' in sys.argv:
project_name_idx = sys.argv.index('--project_name')
project_name = sys.argv[project_name_idx + 1]
sys.argv.remove('--project_name')
sys.argv.pop(project_name_idx)
# python3 requires wheel 0.26
if sys.version_info.major == 3:
REQUIRED_PACKAGES.append('wheel >= 0.26')
else:
REQUIRED_PACKAGES.append('wheel')
# mock comes with unittest.mock for python3, need to install for python2
REQUIRED_PACKAGES.append('mock >= 2.0.0')
# weakref.finalize was introduced in Python 3.4
if sys.version_info < (3, 4):
REQUIRED_PACKAGES.append('backports.weakref >= 1.0rc1')
# pylint: disable=line-too-long
CONSOLE_SCRIPTS = [
'saved_model_cli = tensorflow.python.tools.saved_model_cli:main',
# We need to keep the TensorBoard command, even though the console script
# is now declared by the tensorboard pip package. If we remove the
# TensorBoard command, pip will inappropriately remove it during install,
# even though the command is not removed, just moved to a different wheel.
'tensorboard = tensorboard.main:main',
]
# pylint: enable=line-too-long
TEST_PACKAGES = [
'scipy >= 0.15.1',
]
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
class InstallCommand(InstallCommandBase):
"""Override the dir where the headers go."""
def finalize_options(self):
ret = InstallCommandBase.finalize_options(self)
self.install_headers = os.path.join(self.install_purelib,
'tensorflow', 'include')
return ret
class InstallHeaders(Command):
"""Override how headers are copied.
The install_headers that comes with setuptools copies all files to
the same directory. But we need the files to be in a specific directory
hierarchy for -I <include_dir> to work correctly.
"""
description = 'install C/C++ header files'
user_options = [('install-dir=', 'd',
'directory to install header files to'),
('force', 'f',
'force installation (overwrite existing files)'),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def mkdir_and_copy_file(self, header):
install_dir = os.path.join(self.install_dir, os.path.dirname(header))
# Get rid of some extra intervening directories so we can have fewer
# directories for -I
install_dir = re.sub('/google/protobuf/src', '', install_dir)
# Copy eigen code into tensorflow/include.
# A symlink would do, but the wheel file that gets created ignores
# symlink within the directory hierarchy.
# NOTE(keveman): Figure out how to customize bdist_wheel package so
# we can do the symlink.
if 'external/eigen_archive/' in install_dir:
extra_dir = install_dir.replace('external/eigen_archive', '')
if not os.path.exists(extra_dir):
self.mkpath(extra_dir)
self.copy_file(header, extra_dir)
if not os.path.exists(install_dir):
self.mkpath(install_dir)
return self.copy_file(header, install_dir)
def run(self):
hdrs = self.distribution.headers
if not hdrs:
return
self.mkpath(self.install_dir)
for header in hdrs:
(out, _) = self.mkdir_and_copy_file(header)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
def find_files(pattern, root):
"""Return all the files matching pattern below root dir."""
for path, _, files in os.walk(root):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
matches = ['../' + x for x in find_files('*', 'external') if '.py' not in x]
matches += ['../' + x for x in find_files('*', '_solib_k8') if '.py' not in x]
if os.name == 'nt':
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.pyd'
else:
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.so'
headers = (list(find_files('*.h', 'tensorflow/core')) +
list(find_files('*.h', 'tensorflow/stream_executor')) +
list(find_files('*.h', 'google/protobuf/src')) +
list(find_files('*', 'third_party/eigen3')) +
list(find_files('*', 'external/eigen_archive')))
setup(
name=project_name,
version=_VERSION.replace('-', ''),
description='TensorFlow helps the tensors flow',
long_description='',
url='http://tensorflow.org/',
author='Google Inc.',
author_email='opensource@google.com',
# Contained modules and scripts.
packages=find_packages(),
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
headers=headers,
install_requires=REQUIRED_PACKAGES,
tests_require=REQUIRED_PACKAGES + TEST_PACKAGES,
# Add in any packaged data.
include_package_data=True,
package_data={
'tensorflow': [
EXTENSION_NAME,
] + matches,
},
zip_safe=False,
distclass=BinaryDistribution,
cmdclass={
'install_headers': InstallHeaders,
'install': InstallCommand,
},
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
license='Apache 2.0',
keywords='tensorflow tensor machine learning',)
|
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains classes to wrap Python VTK to make nice molecular plots.
"""
import itertools
import math
import os
import subprocess
import time
import numpy as np
try:
import vtk
from vtk import vtkInteractorStyleTrackballCamera
except ImportError:
# VTK not present. The Camera is to set object to avoid errors in unittest.
vtk = None
vtkInteractorStyleTrackballCamera = object
from monty.dev import requires
from monty.serialization import loadfn
from pymatgen.core.periodic_table import Species
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.util.coord import in_coord_list
module_dir = os.path.dirname(os.path.abspath(__file__))
EL_COLORS = loadfn(os.path.join(module_dir, "ElementColorSchemes.yaml"))
class StructureVis:
"""
Provides Structure object visualization using VTK.
"""
@requires(vtk, "Visualization requires the installation of VTK with Python bindings.")
def __init__(
self,
element_color_mapping=None,
show_unit_cell=True,
show_bonds=False,
show_polyhedron=True,
poly_radii_tol_factor=0.5,
excluded_bonding_elements=None,
):
"""
Constructs a Structure Visualization.
Args:
element_color_mapping: Optional color mapping for the elements,
as a dict of {symbol: rgb tuple}. For example, {"Fe": (255,
123,0), ....} If None is specified, a default based on
Jmol"s color scheme is used.
show_unit_cell: Set to False to not show the unit cell
boundaries. Defaults to True.
show_bonds: Set to True to show bonds. Defaults to True.
show_polyhedron: Set to True to show polyhedrons. Defaults to
False.
poly_radii_tol_factor: The polyhedron and bonding code uses the
ionic radii of the elements or species to determine if two
atoms are bonded. This specifies a tolerance scaling factor
such that atoms which are (1 + poly_radii_tol_factor) * sum
of ionic radii apart are still considered as bonded.
excluded_bonding_elements: List of atom types to exclude from
bonding determination. Defaults to an empty list. Useful
when trying to visualize a certain atom type in the
framework (e.g., Li in a Li-ion battery cathode material).
Useful keyboard shortcuts implemented.
h : Show help
A/a : Increase/decrease cell by one unit vector in a-direction
B/b : Increase/decrease cell by one unit vector in b-direction
C/c : Increase/decrease cell by one unit vector in c-direction
# : Toggle showing of polyhedrons
- : Toggle showing of bonds
[ : Decrease poly_radii_tol_factor by 0.05
] : Increase poly_radii_tol_factor by 0.05
r : Reset camera direction
o : Orthogonalize structure
Up/Down : Rotate view along Up direction by 90 clock/anticlockwise
Left/right : Rotate view along camera direction by 90
clock/anticlockwise
"""
# create a rendering window and renderer
self.ren = vtk.vtkRenderer()
self.ren_win = vtk.vtkRenderWindow()
self.ren_win.AddRenderer(self.ren)
self.ren.SetBackground(1, 1, 1)
self.title = "Structure Visualizer"
# create a renderwindowinteractor
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.SetRenderWindow(self.ren_win)
self.mapper_map = {}
self.structure = None
if element_color_mapping:
self.el_color_mapping = element_color_mapping
else:
self.el_color_mapping = EL_COLORS["VESTA"]
self.show_unit_cell = show_unit_cell
self.show_bonds = show_bonds
self.show_polyhedron = show_polyhedron
self.poly_radii_tol_factor = poly_radii_tol_factor
self.excluded_bonding_elements = excluded_bonding_elements if excluded_bonding_elements else []
self.show_help = True
self.supercell = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
self.redraw()
style = StructureInteractorStyle(self)
self.iren.SetInteractorStyle(style)
self.ren.parent = self
def rotate_view(self, axis_ind=0, angle=0):
"""
Rotate the camera view.
Args:
axis_ind: Index of axis to rotate. Defaults to 0, i.e., a-axis.
angle: Angle to rotate by. Defaults to 0.
"""
camera = self.ren.GetActiveCamera()
if axis_ind == 0:
camera.Roll(angle)
elif axis_ind == 1:
camera.Azimuth(angle)
else:
camera.Pitch(angle)
self.ren_win.Render()
def write_image(self, filename="image.png", magnification=1, image_format="png"):
"""
Save render window to an image.
Arguments:
filename:
filename to save to. Defaults to image.png.
magnification:
magnification. Use it to render high res images.
image_format:
choose between jpeg, png. Png is the default.
"""
render_large = vtk.vtkRenderLargeImage()
render_large.SetInput(self.ren)
if image_format == "jpeg":
writer = vtk.vtkJPEGWriter()
writer.SetQuality(80)
else:
writer = vtk.vtkPNGWriter()
render_large.SetMagnification(magnification)
writer.SetFileName(filename)
writer.SetInputConnection(render_large.GetOutputPort())
self.ren_win.Render()
writer.Write()
del render_large
def redraw(self, reset_camera=False):
"""
Redraw the render window.
Args:
reset_camera: Set to True to reset the camera to a
pre-determined default for each structure. Defaults to False.
"""
self.ren.RemoveAllViewProps()
self.picker = None
self.add_picker_fixed()
self.helptxt_mapper = vtk.vtkTextMapper()
tprops = self.helptxt_mapper.GetTextProperty()
tprops.SetFontSize(14)
tprops.SetFontFamilyToTimes()
tprops.SetColor(0, 0, 0)
if self.structure is not None:
self.set_structure(self.structure, reset_camera)
self.ren_win.Render()
def orthongonalize_structure(self):
"""
Orthogonalize the structure.
"""
if self.structure is not None:
self.set_structure(self.structure.copy(sanitize=True))
self.ren_win.Render()
def display_help(self):
"""
Display the help for various keyboard shortcuts.
"""
helptxt = [
"h : Toggle help",
"A/a, B/b or C/c : Increase/decrease cell by one a, b or c unit vector",
"# : Toggle showing of polyhedrons",
"-: Toggle showing of bonds",
"r : Reset camera direction",
"[/]: Decrease or increase poly_radii_tol_factor by 0.05. Value = " + str(self.poly_radii_tol_factor),
"Up/Down: Rotate view along Up direction by 90 clockwise/anticlockwise",
"Left/right: Rotate view along camera direction by 90 clockwise/anticlockwise",
"s: Save view to image.png",
"o: Orthogonalize structure",
]
self.helptxt_mapper.SetInput("\n".join(helptxt))
self.helptxt_actor.SetPosition(10, 10)
self.helptxt_actor.VisibilityOn()
def set_structure(self, structure, reset_camera=True, to_unit_cell=True):
"""
Add a structure to the visualizer.
Args:
structure: structure to visualize
reset_camera: Set to True to reset the camera to a default
determined based on the structure.
to_unit_cell: Whether or not to fall back sites into the unit cell.
"""
self.ren.RemoveAllViewProps()
has_lattice = hasattr(structure, "lattice")
if has_lattice:
s = Structure.from_sites(structure, to_unit_cell=to_unit_cell)
s.make_supercell(self.supercell, to_unit_cell=to_unit_cell)
else:
s = structure
inc_coords = []
for site in s:
self.add_site(site)
inc_coords.append(site.coords)
count = 0
labels = ["a", "b", "c"]
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
if has_lattice:
matrix = s.lattice.matrix
if self.show_unit_cell and has_lattice:
# matrix = s.lattice.matrix
self.add_text([0, 0, 0], "o")
for vec in matrix:
self.add_line((0, 0, 0), vec, colors[count])
self.add_text(vec, labels[count], colors[count])
count += 1
for (vec1, vec2) in itertools.permutations(matrix, 2):
self.add_line(vec1, vec1 + vec2)
for (vec1, vec2, vec3) in itertools.permutations(matrix, 3):
self.add_line(vec1 + vec2, vec1 + vec2 + vec3)
if self.show_bonds or self.show_polyhedron:
elements = sorted(s.composition.elements, key=lambda a: a.X)
anion = elements[-1]
def contains_anion(site):
for sp in site.species.keys():
if sp.symbol == anion.symbol:
return True
return False
anion_radius = anion.average_ionic_radius
for site in s:
exclude = False
max_radius = 0
color = np.array([0, 0, 0])
for sp, occu in site.species.items():
if sp.symbol in self.excluded_bonding_elements or sp == anion:
exclude = True
break
max_radius = max(max_radius, sp.average_ionic_radius)
color = color + occu * np.array(self.el_color_mapping.get(sp.symbol, [0, 0, 0]))
if not exclude:
max_radius = (1 + self.poly_radii_tol_factor) * (max_radius + anion_radius)
nn = structure.get_neighbors(site, float(max_radius))
nn_sites = []
for neighbor in nn:
if contains_anion(neighbor):
nn_sites.append(neighbor)
if not in_coord_list(inc_coords, neighbor.coords):
self.add_site(neighbor)
if self.show_bonds:
self.add_bonds(nn_sites, site)
if self.show_polyhedron:
color = [i / 255 for i in color]
self.add_polyhedron(nn_sites, site, color)
if self.show_help:
self.helptxt_actor = vtk.vtkActor2D()
self.helptxt_actor.VisibilityOn()
self.helptxt_actor.SetMapper(self.helptxt_mapper)
self.ren.AddActor(self.helptxt_actor)
self.display_help()
camera = self.ren.GetActiveCamera()
if reset_camera:
if has_lattice:
# Adjust the camera for best viewing
lengths = s.lattice.abc
pos = (matrix[1] + matrix[2]) * 0.5 + matrix[0] * max(lengths) / lengths[0] * 3.5
camera.SetPosition(pos)
camera.SetViewUp(matrix[2])
camera.SetFocalPoint((matrix[0] + matrix[1] + matrix[2]) * 0.5)
else:
origin = s.center_of_mass
max_site = max(s, key=lambda site: site.distance_from_point(origin))
camera.SetPosition(origin + 5 * (max_site.coords - origin))
camera.SetFocalPoint(s.center_of_mass)
self.structure = structure
self.title = s.composition.formula
def zoom(self, factor):
"""
Zoom the camera view by a factor.
"""
camera = self.ren.GetActiveCamera()
camera.Zoom(factor)
self.ren_win.Render()
def show(self):
"""
Display the visualizer.
"""
self.iren.Initialize()
self.ren_win.SetSize(800, 800)
self.ren_win.SetWindowName(self.title)
self.ren_win.Render()
self.iren.Start()
def add_site(self, site):
"""
Add a site to the render window. The site is displayed as a sphere, the
color of which is determined based on the element. Partially occupied
sites are displayed as a single element color, though the site info
still shows the partial occupancy.
Args:
site: Site to add.
"""
start_angle = 0
radius = 0
total_occu = 0
for specie, occu in site.species.items():
radius += occu * (
specie.ionic_radius
if isinstance(specie, Species) and specie.ionic_radius
else specie.average_ionic_radius
)
total_occu += occu
vis_radius = 0.2 + 0.002 * radius
for specie, occu in site.species.items():
if not specie:
color = (1, 1, 1)
elif specie.symbol in self.el_color_mapping:
color = [i / 255 for i in self.el_color_mapping[specie.symbol]]
mapper = self.add_partial_sphere(site.coords, vis_radius, color, start_angle, start_angle + 360 * occu)
self.mapper_map[mapper] = [site]
start_angle += 360 * occu
if total_occu < 1:
mapper = self.add_partial_sphere(
site.coords,
vis_radius,
(1, 1, 1),
start_angle,
start_angle + 360 * (1 - total_occu),
)
self.mapper_map[mapper] = [site]
def add_partial_sphere(self, coords, radius, color, start=0, end=360, opacity=1.0):
"""
Adding a partial sphere (to display partial occupancies.
Args:
coords (nd.array): Coordinates
radius (float): Radius of sphere
color (): Color of sphere.
start (float): Starting angle.
end (float): Ending angle.
opacity (float): Opacity.
"""
sphere = vtk.vtkSphereSource()
sphere.SetCenter(coords)
sphere.SetRadius(radius)
sphere.SetThetaResolution(18)
sphere.SetPhiResolution(18)
sphere.SetStartTheta(start)
sphere.SetEndTheta(end)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(sphere.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
actor.GetProperty().SetOpacity(opacity)
self.ren.AddActor(actor)
return mapper
def add_text(self, coords, text, color=(0, 0, 0)):
"""
Add text at a coordinate.
Args:
coords: Coordinates to add text at.
text: Text to place.
color: Color for text as RGB. Defaults to black.
"""
source = vtk.vtkVectorText()
source.SetText(text)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
follower = vtk.vtkFollower()
follower.SetMapper(mapper)
follower.GetProperty().SetColor(color)
follower.SetPosition(coords)
follower.SetScale(0.5)
self.ren.AddActor(follower)
follower.SetCamera(self.ren.GetActiveCamera())
def add_line(self, start, end, color=(0.5, 0.5, 0.5), width=1):
"""
Adds a line.
Args:
start: Starting coordinates for line.
end: Ending coordinates for line.
color: Color for text as RGB. Defaults to grey.
width: Width of line. Defaults to 1.
"""
source = vtk.vtkLineSource()
source.SetPoint1(start)
source.SetPoint2(end)
vertexIDs = vtk.vtkStringArray()
vertexIDs.SetNumberOfComponents(1)
vertexIDs.SetName("VertexIDs")
# Set the vertex labels
vertexIDs.InsertNextValue("a")
vertexIDs.InsertNextValue("b")
source.GetOutput().GetPointData().AddArray(vertexIDs)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
actor.GetProperty().SetLineWidth(width)
self.ren.AddActor(actor)
def add_polyhedron(
self,
neighbors,
center,
color,
opacity=1.0,
draw_edges=False,
edges_color=[0.0, 0.0, 0.0],
edges_linewidth=2,
):
"""
Adds a polyhedron.
Args:
neighbors: Neighbors of the polyhedron (the vertices).
center: The atom in the center of the polyhedron.
color: Color for text as RGB.
opacity: Opacity of the polyhedron
draw_edges: If set to True, the a line will be drawn at each edge
edges_color: Color of the line for the edges
edges_linewidth: Width of the line drawn for the edges
"""
points = vtk.vtkPoints()
conv = vtk.vtkConvexPointSet()
for i, n in enumerate(neighbors):
x, y, z = n.coords
points.InsertPoint(i, x, y, z)
conv.GetPointIds().InsertId(i, i)
grid = vtk.vtkUnstructuredGrid()
grid.Allocate(1, 1)
grid.InsertNextCell(conv.GetCellType(), conv.GetPointIds())
grid.SetPoints(points)
dsm = vtk.vtkDataSetMapper()
polysites = [center]
polysites.extend(neighbors)
self.mapper_map[dsm] = polysites
if vtk.VTK_MAJOR_VERSION <= 5:
dsm.SetInputConnection(grid.GetProducerPort())
else:
dsm.SetInputData(grid)
ac = vtk.vtkActor()
# ac.SetMapper(mapHull)
ac.SetMapper(dsm)
ac.GetProperty().SetOpacity(opacity)
if color == "element":
# If partial occupations are involved, the color of the specie with
# the highest occupation is used
myoccu = 0.0
for specie, occu in center.species.items():
if occu > myoccu:
myspecie = specie
myoccu = occu
color = [i / 255 for i in self.el_color_mapping[myspecie.symbol]]
ac.GetProperty().SetColor(color)
else:
ac.GetProperty().SetColor(color)
if draw_edges:
ac.GetProperty().SetEdgeColor(edges_color)
ac.GetProperty().SetLineWidth(edges_linewidth)
ac.GetProperty().EdgeVisibilityOn()
self.ren.AddActor(ac)
def add_triangle(
self,
neighbors,
color,
center=None,
opacity=0.4,
draw_edges=False,
edges_color=[0.0, 0.0, 0.0],
edges_linewidth=2,
):
"""
Adds a triangular surface between three atoms.
Args:
atoms: Atoms between which a triangle will be drawn.
color: Color for triangle as RGB.
center: The "central atom" of the triangle
opacity: opacity of the triangle
draw_edges: If set to True, the a line will be drawn at each edge
edges_color: Color of the line for the edges
edges_linewidth: Width of the line drawn for the edges
"""
points = vtk.vtkPoints()
triangle = vtk.vtkTriangle()
for ii in range(3):
points.InsertNextPoint(neighbors[ii].x, neighbors[ii].y, neighbors[ii].z)
triangle.GetPointIds().SetId(ii, ii)
triangles = vtk.vtkCellArray()
triangles.InsertNextCell(triangle)
# polydata object
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints(points)
trianglePolyData.SetPolys(triangles)
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(trianglePolyData)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
if color == "element":
if center is None:
raise ValueError(
"Color should be chosen according to the central atom, and central atom is not provided"
)
# If partial occupations are involved, the color of the specie with
# the highest occupation is used
myoccu = 0.0
for specie, occu in center.species.items():
if occu > myoccu:
myspecie = specie
myoccu = occu
color = [i / 255 for i in self.el_color_mapping[myspecie.symbol]]
ac.GetProperty().SetColor(color)
else:
ac.GetProperty().SetColor(color)
if draw_edges:
ac.GetProperty().SetEdgeColor(edges_color)
ac.GetProperty().SetLineWidth(edges_linewidth)
ac.GetProperty().EdgeVisibilityOn()
self.ren.AddActor(ac)
def add_faces(self, faces, color, opacity=0.35):
"""
Adding face of polygon.
Args:
faces (): Coordinates of the faces.
color (): Color.
opacity (float): Opacity
"""
for face in faces:
if len(face) == 3:
points = vtk.vtkPoints()
triangle = vtk.vtkTriangle()
for ii in range(3):
points.InsertNextPoint(face[ii][0], face[ii][1], face[ii][2])
triangle.GetPointIds().SetId(ii, ii)
triangles = vtk.vtkCellArray()
triangles.InsertNextCell(triangle)
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints(points)
trianglePolyData.SetPolys(triangles)
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInputConnection(trianglePolyData.GetProducerPort())
else:
mapper.SetInputData(trianglePolyData)
# mapper.SetInput(trianglePolyData)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
ac.GetProperty().SetColor(color)
self.ren.AddActor(ac)
elif len(face) > 3:
center = np.zeros(3, np.float_)
for site in face:
center += site
center /= np.float_(len(face))
for ii, f in enumerate(face):
points = vtk.vtkPoints()
triangle = vtk.vtkTriangle()
points.InsertNextPoint(f[0], f[1], f[2])
ii2 = np.mod(ii + 1, len(face))
points.InsertNextPoint(face[ii2][0], face[ii2][1], face[ii2][2])
points.InsertNextPoint(center[0], center[1], center[2])
for ii in range(3):
triangle.GetPointIds().SetId(ii, ii)
triangles = vtk.vtkCellArray()
triangles.InsertNextCell(triangle)
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints(points)
trianglePolyData.SetPolys(triangles)
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInputConnection(trianglePolyData.GetProducerPort())
else:
mapper.SetInputData(trianglePolyData)
# mapper.SetInput(trianglePolyData)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
ac.GetProperty().SetColor(color)
self.ren.AddActor(ac)
else:
raise ValueError("Number of points for a face should be >= 3")
def add_edges(self, edges, type="line", linewidth=2, color=[0.0, 0.0, 0.0]):
"""
Args:
edges (): List of edges
type ():
linewidth (): Width of line
color (nd.array/tuple): RGB color.
"""
points = vtk.vtkPoints()
lines = vtk.vtkCellArray()
for iedge, edge in enumerate(edges):
points.InsertPoint(2 * iedge, edge[0])
points.InsertPoint(2 * iedge + 1, edge[1])
lines.InsertNextCell(2)
lines.InsertCellPoint(2 * iedge)
lines.InsertCellPoint(2 * iedge + 1)
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetLines(lines)
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInputConnection(polydata.GetProducerPort())
else:
mapper.SetInputData(polydata)
# mapper.SetInput(polydata)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetColor(color)
ac.GetProperty().SetLineWidth(linewidth)
self.ren.AddActor(ac)
def add_bonds(self, neighbors, center, color=None, opacity=None, radius=0.1):
"""
Adds bonds for a site.
Args:
neighbors: Neighbors of the site.
center: The site in the center for all bonds.
color: Color of the tubes representing the bonds
opacity: Opacity of the tubes representing the bonds
radius: Radius of tube s representing the bonds
"""
points = vtk.vtkPoints()
points.InsertPoint(0, center.x, center.y, center.z)
n = len(neighbors)
lines = vtk.vtkCellArray()
for i in range(n):
points.InsertPoint(i + 1, neighbors[i].coords)
lines.InsertNextCell(2)
lines.InsertCellPoint(0)
lines.InsertCellPoint(i + 1)
pd = vtk.vtkPolyData()
pd.SetPoints(points)
pd.SetLines(lines)
tube = vtk.vtkTubeFilter()
if vtk.VTK_MAJOR_VERSION <= 5:
tube.SetInputConnection(pd.GetProducerPort())
else:
tube.SetInputData(pd)
tube.SetRadius(radius)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tube.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
if opacity is not None:
actor.GetProperty().SetOpacity(opacity)
if color is not None:
actor.GetProperty().SetColor(color)
self.ren.AddActor(actor)
def add_picker_fixed(self):
"""
Create a cell picker.Returns:
"""
picker = vtk.vtkCellPicker()
# Create a Python function to create the text for the text mapper used
# to display the results of picking.
def annotate_pick(obj, event):
if picker.GetCellId() < 0 and not self.show_help:
self.helptxt_actor.VisibilityOff()
else:
mapper = picker.GetMapper()
if mapper in self.mapper_map:
output = []
for site in self.mapper_map[mapper]:
row = [
f"{site.species_string} - ",
", ".join([f"{c:.3f}" for c in site.frac_coords]),
"[" + ", ".join([f"{c:.3f}" for c in site.coords]) + "]",
]
output.append("".join(row))
self.helptxt_mapper.SetInput("\n".join(output))
self.helptxt_actor.SetPosition(10, 10)
self.helptxt_actor.VisibilityOn()
self.show_help = False
self.picker = picker
picker.AddObserver("EndPickEvent", annotate_pick)
self.iren.SetPicker(picker)
def add_picker(self):
"""
Create a cell picker.
"""
picker = vtk.vtkCellPicker()
# Create a Python function to create the text for the text mapper used
# to display the results of picking.
source = vtk.vtkVectorText()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
follower = vtk.vtkFollower()
follower.SetMapper(mapper)
follower.GetProperty().SetColor((0, 0, 0))
follower.SetScale(0.2)
self.ren.AddActor(follower)
follower.SetCamera(self.ren.GetActiveCamera())
follower.VisibilityOff()
def annotate_pick(obj, event):
if picker.GetCellId() < 0:
follower.VisibilityOff()
else:
pick_pos = picker.GetPickPosition()
mapper = picker.GetMapper()
if mapper in self.mapper_map:
site = self.mapper_map[mapper]
output = [
site.species_string,
"Frac. coords: " + " ".join([f"{c:.4f}" for c in site.frac_coords]),
]
source.SetText("\n".join(output))
follower.SetPosition(pick_pos)
follower.VisibilityOn()
picker.AddObserver("EndPickEvent", annotate_pick)
self.picker = picker
self.iren.SetPicker(picker)
class StructureInteractorStyle(vtkInteractorStyleTrackballCamera):
"""
A custom interactor style for visualizing structures.
"""
def __init__(self, parent):
"""
Args:
parent ():
"""
self.parent = parent
self.AddObserver("LeftButtonPressEvent", self.leftButtonPressEvent)
self.AddObserver("MouseMoveEvent", self.mouseMoveEvent)
self.AddObserver("LeftButtonReleaseEvent", self.leftButtonReleaseEvent)
self.AddObserver("KeyPressEvent", self.keyPressEvent)
def leftButtonPressEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
self.mouse_motion = 0
self.OnLeftButtonDown()
def mouseMoveEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
self.mouse_motion = 1
self.OnMouseMove()
def leftButtonReleaseEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
ren = obj.GetCurrentRenderer()
iren = ren.GetRenderWindow().GetInteractor()
if self.mouse_motion == 0:
pos = iren.GetEventPosition()
iren.GetPicker().Pick(pos[0], pos[1], 0, ren)
self.OnLeftButtonUp()
def keyPressEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
parent = obj.GetCurrentRenderer().parent
sym = parent.iren.GetKeySym()
if sym in "ABCabc":
if sym == "A":
parent.supercell[0][0] += 1
elif sym == "B":
parent.supercell[1][1] += 1
elif sym == "C":
parent.supercell[2][2] += 1
elif sym == "a":
parent.supercell[0][0] = max(parent.supercell[0][0] - 1, 1)
elif sym == "b":
parent.supercell[1][1] = max(parent.supercell[1][1] - 1, 1)
elif sym == "c":
parent.supercell[2][2] = max(parent.supercell[2][2] - 1, 1)
parent.redraw()
elif sym == "numbersign":
parent.show_polyhedron = not parent.show_polyhedron
parent.redraw()
elif sym == "minus":
parent.show_bonds = not parent.show_bonds
parent.redraw()
elif sym == "bracketleft":
parent.poly_radii_tol_factor -= 0.05 if parent.poly_radii_tol_factor > 0 else 0
parent.redraw()
elif sym == "bracketright":
parent.poly_radii_tol_factor += 0.05
parent.redraw()
elif sym == "h":
parent.show_help = not parent.show_help
parent.redraw()
elif sym == "r":
parent.redraw(True)
elif sym == "s":
parent.write_image("image.png")
elif sym == "Up":
parent.rotate_view(1, 90)
elif sym == "Down":
parent.rotate_view(1, -90)
elif sym == "Left":
parent.rotate_view(0, -90)
elif sym == "Right":
parent.rotate_view(0, 90)
elif sym == "o":
parent.orthongonalize_structure()
parent.redraw()
self.OnKeyPress()
def make_movie(structures, output_filename="movie.mp4", zoom=1.0, fps=20, bitrate="10000k", quality=1, **kwargs):
"""
Generate a movie from a sequence of structures using vtk and ffmpeg.
Args:
structures ([Structure]): sequence of structures
output_filename (str): filename for structure output. defaults to
movie.mp4
zoom (float): A zoom to be applied to the visualizer. Defaults to 1.0.
fps (int): Frames per second for the movie. Defaults to 20.
bitrate (str): Video bitate. Defaults to "10000k" (fairly high
quality).
quality (int): A quality scale. Defaults to 1.
kwargs: Any kwargs supported by StructureVis to modify the images
generated.
"""
vis = StructureVis(**kwargs)
vis.show_help = False
vis.redraw()
vis.zoom(zoom)
sigfig = int(math.floor(math.log10(len(structures))) + 1)
filename = "image{0:0" + str(sigfig) + "d}.png"
for i, s in enumerate(structures):
vis.set_structure(s)
vis.write_image(filename.format(i), 3)
filename = "image%0" + str(sigfig) + "d.png"
args = [
"ffmpeg",
"-y",
"-i",
filename,
"-q:v",
str(quality),
"-r",
str(fps),
"-b:v",
str(bitrate),
output_filename,
]
with subprocess.Popen(args) as p:
p.communicate()
class MultiStructuresVis(StructureVis):
"""
Visualization for multiple structures.
"""
DEFAULT_ANIMATED_MOVIE_OPTIONS = {
"time_between_frames": 0.1,
"looping_type": "restart",
"number_of_loops": 1,
"time_between_loops": 1.0,
}
def __init__(
self,
element_color_mapping=None,
show_unit_cell=True,
show_bonds=False,
show_polyhedron=False,
poly_radii_tol_factor=0.5,
excluded_bonding_elements=None,
animated_movie_options=DEFAULT_ANIMATED_MOVIE_OPTIONS,
):
"""
Args:
element_color_mapping: Optional color mapping for the elements,
as a dict of {symbol: rgb tuple}. For example, {"Fe": (255,
123,0), ....} If None is specified, a default based on
Jmol"s color scheme is used.
show_unit_cell: Set to False to not show the unit cell
boundaries. Defaults to True.
show_bonds: Set to True to show bonds. Defaults to True.
show_polyhedron: Set to True to show polyhedrons. Defaults to
False.
poly_radii_tol_factor: The polyhedron and bonding code uses the
ionic radii of the elements or species to determine if two
atoms are bonded. This specifies a tolerance scaling factor
such that atoms which are (1 + poly_radii_tol_factor) * sum
of ionic radii apart are still considered as bonded.
excluded_bonding_elements: List of atom types to exclude from
bonding determination. Defaults to an empty list. Useful
when trying to visualize a certain atom type in the
framework (e.g., Li in a Li-ion battery cathode material).
animated_movie_options (): Used for moving.
"""
super().__init__(
element_color_mapping=element_color_mapping,
show_unit_cell=show_unit_cell,
show_bonds=show_bonds,
show_polyhedron=show_polyhedron,
poly_radii_tol_factor=poly_radii_tol_factor,
excluded_bonding_elements=excluded_bonding_elements,
)
self.warningtxt_actor = vtk.vtkActor2D()
self.infotxt_actor = vtk.vtkActor2D()
self.structures = None
style = MultiStructuresInteractorStyle(self)
self.iren.SetInteractorStyle(style)
self.istruct = 0
self.current_structure = None
self.set_animated_movie_options(animated_movie_options=animated_movie_options)
def set_structures(self, structures, tags=None):
"""
Add list of structures to the visualizer.
Args:
structures (List of Structures):
tags (): List of tags.
"""
self.structures = structures
self.istruct = 0
self.current_structure = self.structures[self.istruct]
self.tags = tags if tags is not None else []
self.all_radii = []
self.all_vis_radii = []
for struct in self.structures:
struct_radii = []
struct_vis_radii = []
for site in struct:
radius = 0
for specie, occu in site.species.items():
radius += occu * (
specie.ionic_radius
if isinstance(specie, Species) and specie.ionic_radius
else specie.average_ionic_radius
)
vis_radius = 0.2 + 0.002 * radius
struct_radii.append(radius)
struct_vis_radii.append(vis_radius)
self.all_radii.append(struct_radii)
self.all_vis_radii.append(struct_vis_radii)
self.set_structure(self.current_structure, reset_camera=True, to_unit_cell=False)
def set_structure(self, structure, reset_camera=True, to_unit_cell=False):
"""
Add a structure to the visualizer.
Args:
structure: structure to visualize
reset_camera: Set to True to reset the camera to a default
determined based on the structure.
to_unit_cell: Whether or not to fall back sites into the unit cell.
"""
super().set_structure(structure=structure, reset_camera=reset_camera, to_unit_cell=to_unit_cell)
self.apply_tags()
def apply_tags(self):
"""
Apply tags.
"""
tags = {}
for tag in self.tags:
istruct = tag.get("istruct", "all")
if istruct != "all":
if istruct != self.istruct:
continue
site_index = tag["site_index"]
color = tag.get("color", [0.5, 0.5, 0.5])
opacity = tag.get("opacity", 0.5)
if site_index == "unit_cell_all":
struct_radii = self.all_vis_radii[self.istruct]
for isite, site in enumerate(self.current_structure):
vis_radius = 1.5 * tag.get("radius", struct_radii[isite])
tags[(isite, (0, 0, 0))] = {
"radius": vis_radius,
"color": color,
"opacity": opacity,
}
continue
cell_index = tag["cell_index"]
if "radius" in tag:
vis_radius = tag["radius"]
elif "radius_factor" in tag:
vis_radius = tag["radius_factor"] * self.all_vis_radii[self.istruct][site_index]
else:
vis_radius = 1.5 * self.all_vis_radii[self.istruct][site_index]
tags[(site_index, cell_index)] = {
"radius": vis_radius,
"color": color,
"opacity": opacity,
}
for site_and_cell_index, tag_style in tags.items():
isite, cell_index = site_and_cell_index
site = self.current_structure[isite]
if cell_index == (0, 0, 0):
coords = site.coords
else:
fcoords = site.frac_coords + np.array(cell_index)
site_image = PeriodicSite(
site.species,
fcoords,
self.current_structure.lattice,
to_unit_cell=False,
coords_are_cartesian=False,
properties=site.properties,
)
self.add_site(site_image)
coords = site_image.coords
vis_radius = tag_style["radius"]
color = tag_style["color"]
opacity = tag_style["opacity"]
self.add_partial_sphere(
coords=coords,
radius=vis_radius,
color=color,
start=0,
end=360,
opacity=opacity,
)
def set_animated_movie_options(self, animated_movie_options=None):
"""
Args:
animated_movie_options ():
"""
if animated_movie_options is None:
self.animated_movie_options = self.DEFAULT_ANIMATED_MOVIE_OPTIONS.copy()
else:
self.animated_movie_options = self.DEFAULT_ANIMATED_MOVIE_OPTIONS.copy()
for key in animated_movie_options:
if key not in self.DEFAULT_ANIMATED_MOVIE_OPTIONS.keys():
raise ValueError("Wrong option for animated movie")
self.animated_movie_options.update(animated_movie_options)
def display_help(self):
"""
Display the help for various keyboard shortcuts.
"""
helptxt = [
"h : Toggle help",
"A/a, B/b or C/c : Increase/decrease cell by one a, b or c unit vector",
"# : Toggle showing of polyhedrons",
"-: Toggle showing of bonds",
"r : Reset camera direction",
"[/]: Decrease or increase poly_radii_tol_factor by 0.05. Value = " + str(self.poly_radii_tol_factor),
"Up/Down: Rotate view along Up direction by 90 clockwise/anticlockwise",
"Left/right: Rotate view along camera direction by 90 clockwise/anticlockwise",
"s: Save view to image.png",
"o: Orthogonalize structure",
"n: Move to next structure",
"p: Move to previous structure",
"m: Animated movie of the structures",
]
self.helptxt_mapper.SetInput("\n".join(helptxt))
self.helptxt_actor.SetPosition(10, 10)
self.helptxt_actor.VisibilityOn()
def display_warning(self, warning):
"""
Args:
warning (str): Warning
"""
self.warningtxt_mapper = vtk.vtkTextMapper()
tprops = self.warningtxt_mapper.GetTextProperty()
tprops.SetFontSize(14)
tprops.SetFontFamilyToTimes()
tprops.SetColor(1, 0, 0)
tprops.BoldOn()
tprops.SetJustificationToRight()
self.warningtxt = f"WARNING : {warning}"
self.warningtxt_actor = vtk.vtkActor2D()
self.warningtxt_actor.VisibilityOn()
self.warningtxt_actor.SetMapper(self.warningtxt_mapper)
self.ren.AddActor(self.warningtxt_actor)
self.warningtxt_mapper.SetInput(self.warningtxt)
winsize = self.ren_win.GetSize()
self.warningtxt_actor.SetPosition(winsize[0] - 10, 10)
self.warningtxt_actor.VisibilityOn()
def erase_warning(self):
"""
Remove warnings.
"""
self.warningtxt_actor.VisibilityOff()
def display_info(self, info):
"""
Args:
info (str): Information.
"""
self.infotxt_mapper = vtk.vtkTextMapper()
tprops = self.infotxt_mapper.GetTextProperty()
tprops.SetFontSize(14)
tprops.SetFontFamilyToTimes()
tprops.SetColor(0, 0, 1)
tprops.BoldOn()
tprops.SetVerticalJustificationToTop()
self.infotxt = f"INFO : {info}"
self.infotxt_actor = vtk.vtkActor2D()
self.infotxt_actor.VisibilityOn()
self.infotxt_actor.SetMapper(self.infotxt_mapper)
self.ren.AddActor(self.infotxt_actor)
self.infotxt_mapper.SetInput(self.infotxt)
winsize = self.ren_win.GetSize()
self.infotxt_actor.SetPosition(10, winsize[1] - 10)
self.infotxt_actor.VisibilityOn()
def erase_info(self):
"""
Erase all info.
"""
self.infotxt_actor.VisibilityOff()
class MultiStructuresInteractorStyle(StructureInteractorStyle):
"""
Interactor for MultiStructureVis.
"""
def __init__(self, parent):
"""
Args:
parent ():
"""
StructureInteractorStyle.__init__(self, parent=parent)
def keyPressEvent(self, obj, event):
"""
Args:
obj ():
event ():
"""
parent = obj.GetCurrentRenderer().parent
sym = parent.iren.GetKeySym()
if sym == "n":
if parent.istruct == len(parent.structures) - 1:
parent.display_warning("LAST STRUCTURE")
parent.ren_win.Render()
else:
parent.istruct += 1
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.erase_warning()
parent.ren_win.Render()
elif sym == "p":
if parent.istruct == 0:
parent.display_warning("FIRST STRUCTURE")
parent.ren_win.Render()
else:
parent.istruct -= 1
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.erase_warning()
parent.ren_win.Render()
elif sym == "m":
parent.istruct = 0
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.erase_warning()
parent.ren_win.Render()
nloops = parent.animated_movie_options["number_of_loops"]
tstep = parent.animated_movie_options["time_between_frames"]
tloops = parent.animated_movie_options["time_between_loops"]
if parent.animated_movie_options["looping_type"] == "restart":
loop_istructs = range(len(parent.structures))
elif parent.animated_movie_options["looping_type"] == "palindrome":
loop_istructs = range(len(parent.structures)) + range(len(parent.structures) - 2, -1, -1)
else:
raise ValueError('"looping_type" should be "restart" or "palindrome"')
for iloop in range(nloops):
for istruct in loop_istructs:
time.sleep(tstep)
parent.istruct = istruct
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.display_info(
"Animated movie : structure {:d}/{:d} "
"(loop {:d}/{:d})".format(istruct + 1, len(parent.structures), iloop + 1, nloops)
)
parent.ren_win.Render()
time.sleep(tloops)
parent.erase_info()
parent.display_info("Ended animated movie ...")
parent.ren_win.Render()
StructureInteractorStyle.keyPressEvent(self, obj, event)
|
|
"""
A module for DELPHI's Epidata API.
https://github.com/cmu-delphi/delphi-epidata
Notes:
- Requires the `requests` module.
- Compatible with Python 2 and 3.
"""
# External modules
import requests
# Because the API is stateless, the Epidata class only contains static methods
class Epidata:
"""An interface to DELPHI's Epidata API."""
# API base url
BASE_URL = 'https://delphi.midas.cs.cmu.edu/epidata/api.php'
# Helper function to cast values and/or ranges to strings
@staticmethod
def _listitem(value):
"""Cast values and/or range to a string."""
if isinstance(value, dict) and 'from' in value and 'to' in value:
return str(value['from']) + '-' + str(value['to'])
else:
return str(value)
# Helper function to build a list of values and/or ranges
@staticmethod
def _list(values):
"""Turn a list/tuple of values/ranges into a comma-separated string."""
if not isinstance(values, (list, tuple)):
values = [values]
return ','.join([Epidata._listitem(value) for value in values])
# Helper function to request and parse epidata
@staticmethod
def _request(params):
"""Request and parse epidata."""
try:
# API call
return requests.get(Epidata.BASE_URL, params).json()
except Exception as e:
# Something broke
return {'result': 0, 'message': 'error: ' + str(e)}
# Raise an Exception on error, otherwise return epidata
@staticmethod
def check(resp):
"""Raise an Exception on error, otherwise return epidata."""
if resp['result'] != 1:
msg, code = resp['message'], resp['result']
raise Exception('Error fetching epidata: %s. (result=%d)' % (msg, code))
return resp['epidata']
# Build a `range` object (ex: dates/epiweeks)
@staticmethod
def range(from_, to_):
"""Build a `range` object (ex: dates/epiweeks)."""
if to_ <= from_:
from_, to_ = to_, from_
return {'from': from_, 'to': to_}
# Fetch FluView data
@staticmethod
def fluview(regions, epiweeks, issues=None, lag=None, auth=None):
"""Fetch FluView data."""
# Check parameters
if regions is None or epiweeks is None:
raise Exception('`regions` and `epiweeks` are both required')
if issues is not None and lag is not None:
raise Exception('`issues` and `lag` are mutually exclusive')
# Set up request
params = {
'source': 'fluview',
'regions': Epidata._list(regions),
'epiweeks': Epidata._list(epiweeks),
}
if issues is not None:
params['issues'] = Epidata._list(issues)
if lag is not None:
params['lag'] = lag
if auth is not None:
params['auth'] = auth
# Make the API call
return Epidata._request(params)
# Fetch FluView clinical data
@staticmethod
def fluview_clinical(regions, epiweeks, issues=None, lag=None):
"""Fetch FluView clinical data."""
# Check parameters
if regions is None or epiweeks is None:
raise Exception('`regions` and `epiweeks` are both required')
if issues is not None and lag is not None:
raise Exception('`issues` and `lag` are mutually exclusive')
# Set up request
params = {
'source': 'fluview_clinical',
'regions': Epidata._list(regions),
'epiweeks': Epidata._list(epiweeks),
}
if issues is not None:
params['issues'] = Epidata._list(issues)
if lag is not None:
params['lag'] = lag
# Make the API call
return Epidata._request(params)
# Fetch FluSurv data
@staticmethod
def flusurv(locations, epiweeks, issues=None, lag=None):
"""Fetch FluSurv data."""
# Check parameters
if locations is None or epiweeks is None:
raise Exception('`locations` and `epiweeks` are both required')
if issues is not None and lag is not None:
raise Exception('`issues` and `lag` are mutually exclusive')
# Set up request
params = {
'source': 'flusurv',
'locations': Epidata._list(locations),
'epiweeks': Epidata._list(epiweeks),
}
if issues is not None:
params['issues'] = Epidata._list(issues)
if lag is not None:
params['lag'] = lag
# Make the API call
return Epidata._request(params)
# Fetch PAHO Dengue data
@staticmethod
def paho_dengue(regions, epiweeks, issues=None, lag=None):
"""Fetch PAHO Dengue data."""
# Check parameters
if regions is None or epiweeks is None:
raise Exception('`regions` and `epiweeks` are both required')
if issues is not None and lag is not None:
raise Exception('`issues` and `lag` are mutually exclusive')
# Set up request
params = {
'source': 'paho_dengue',
'regions': Epidata._list(regions),
'epiweeks': Epidata._list(epiweeks),
}
if issues is not None:
params['issues'] = Epidata._list(issues)
if lag is not None:
params['lag'] = lag
# Make the API call
return Epidata._request(params)
# Fetch Google Flu Trends data
@staticmethod
def gft(locations, epiweeks):
"""Fetch Google Flu Trends data."""
# Check parameters
if locations is None or epiweeks is None:
raise Exception('`locations` and `epiweeks` are both required')
# Set up request
params = {
'source': 'gft',
'locations': Epidata._list(locations),
'epiweeks': Epidata._list(epiweeks),
}
# Make the API call
return Epidata._request(params)
# Fetch Google Health Trends data
@staticmethod
def ght(auth, locations, epiweeks, query):
"""Fetch Google Health Trends data."""
# Check parameters
if auth is None or locations is None or epiweeks is None or query is None:
raise Exception('`auth`, `locations`, `epiweeks`, and `query` are all required')
# Set up request
params = {
'source': 'ght',
'auth': auth,
'locations': Epidata._list(locations),
'epiweeks': Epidata._list(epiweeks),
'query': query,
}
# Make the API call
return Epidata._request(params)
# Fetch HealthTweets data
@staticmethod
def twitter(auth, locations, dates=None, epiweeks=None):
"""Fetch HealthTweets data."""
# Check parameters
if auth is None or locations is None:
raise Exception('`auth` and `locations` are both required')
if not ((dates is None) ^ (epiweeks is None)):
raise Exception('exactly one of `dates` and `epiweeks` is required')
# Set up request
params = {
'source': 'twitter',
'auth': auth,
'locations': Epidata._list(locations),
}
if dates is not None:
params['dates'] = Epidata._list(dates)
if epiweeks is not None:
params['epiweeks'] = Epidata._list(epiweeks)
# Make the API call
return Epidata._request(params)
# Fetch Wikipedia access data
@staticmethod
def wiki(articles, dates=None, epiweeks=None, hours=None, language='en'):
"""Fetch Wikipedia access data."""
# Check parameters
if articles is None:
raise Exception('`articles` is required')
if not ((dates is None) ^ (epiweeks is None)):
raise Exception('exactly one of `dates` and `epiweeks` is required')
# Set up request
params = {
'source': 'wiki',
'articles': Epidata._list(articles),
'language': language,
}
if dates is not None:
params['dates'] = Epidata._list(dates)
if epiweeks is not None:
params['epiweeks'] = Epidata._list(epiweeks)
if hours is not None:
params['hours'] = Epidata._list(hours)
# Make the API call
return Epidata._request(params)
# Fetch CDC page hits
@staticmethod
def cdc(auth, epiweeks, locations):
"""Fetch CDC page hits."""
# Check parameters
if auth is None or epiweeks is None or locations is None:
raise Exception('`auth`, `epiweeks`, and `locations` are all required')
# Set up request
params = {
'source': 'cdc',
'auth': auth,
'epiweeks': Epidata._list(epiweeks),
'locations': Epidata._list(locations),
}
# Make the API call
return Epidata._request(params)
# Fetch Quidel data
@staticmethod
def quidel(auth, epiweeks, locations):
"""Fetch Quidel data."""
# Check parameters
if auth is None or epiweeks is None or locations is None:
raise Exception('`auth`, `epiweeks`, and `locations` are all required')
# Set up request
params = {
'source': 'quidel',
'auth': auth,
'epiweeks': Epidata._list(epiweeks),
'locations': Epidata._list(locations),
}
# Make the API call
return Epidata._request(params)
# Fetch NoroSTAT data (point data, no min/max)
@staticmethod
def norostat(auth, location, epiweeks):
"""Fetch NoroSTAT data (point data, no min/max)."""
# Check parameters
if auth is None or location is None or epiweeks is None:
raise Exception('`auth`, `location`, and `epiweeks` are all required')
# Set up request
params = {
'source': 'norostat',
'auth': auth,
'location': location,
'epiweeks': Epidata._list(epiweeks),
}
# Make the API call
return Epidata._request(params)
# Fetch NoroSTAT metadata
@staticmethod
def meta_norostat(auth):
"""Fetch NoroSTAT metadata."""
# Check parameters
if auth is None:
raise Exception('`auth` is required')
# Set up request
params = {
'source': 'meta_norostat',
'auth': auth,
}
# Make the API call
return Epidata._request(params)
# Fetch NIDSS flu data
@staticmethod
def nidss_flu(regions, epiweeks, issues=None, lag=None):
"""Fetch NIDSS flu data."""
# Check parameters
if regions is None or epiweeks is None:
raise Exception('`regions` and `epiweeks` are both required')
if issues is not None and lag is not None:
raise Exception('`issues` and `lag` are mutually exclusive')
# Set up request
params = {
'source': 'nidss_flu',
'regions': Epidata._list(regions),
'epiweeks': Epidata._list(epiweeks),
}
if issues is not None:
params['issues'] = Epidata._list(issues)
if lag is not None:
params['lag'] = lag
# Make the API call
return Epidata._request(params)
# Fetch NIDSS dengue data
@staticmethod
def nidss_dengue(locations, epiweeks):
"""Fetch NIDSS dengue data."""
# Check parameters
if locations is None or epiweeks is None:
raise Exception('`locations` and `epiweeks` are both required')
# Set up request
params = {
'source': 'nidss_dengue',
'locations': Epidata._list(locations),
'epiweeks': Epidata._list(epiweeks),
}
# Make the API call
return Epidata._request(params)
# Fetch Delphi's forecast
@staticmethod
def delphi(system, epiweek):
"""Fetch Delphi's forecast."""
# Check parameters
if system is None or epiweek is None:
raise Exception('`system` and `epiweek` are both required')
# Set up request
params = {
'source': 'delphi',
'system': system,
'epiweek': epiweek,
}
# Make the API call
return Epidata._request(params)
# Fetch Delphi's digital surveillance sensors
@staticmethod
def sensors(auth, names, locations, epiweeks):
"""Fetch Delphi's digital surveillance sensors."""
# Check parameters
if auth is None or names is None or locations is None or epiweeks is None:
raise Exception('`auth`, `names`, `locations`, and `epiweeks` are all required')
# Set up request
params = {
'source': 'sensors',
'auth': auth,
'names': Epidata._list(names),
'locations': Epidata._list(locations),
'epiweeks': Epidata._list(epiweeks),
}
# Make the API call
return Epidata._request(params)
# Fetch Delphi's dengue digital surveillance sensors
@staticmethod
def dengue_sensors(auth, names, locations, epiweeks):
"""Fetch Delphi's digital surveillance sensors."""
# Check parameters
if auth is None or names is None or locations is None or epiweeks is None:
raise Exception('`auth`, `names`, `locations`, and `epiweeks` are all required')
# Set up request
params = {
'source': 'dengue_sensors',
'auth': auth,
'names': Epidata._list(names),
'locations': Epidata._list(locations),
'epiweeks': Epidata._list(epiweeks),
}
# Make the API call
return Epidata._request(params)
# Fetch Delphi's wILI nowcast
@staticmethod
def nowcast(locations, epiweeks):
"""Fetch Delphi's wILI nowcast."""
# Check parameters
if locations is None or epiweeks is None:
raise Exception('`locations` and `epiweeks` are both required')
# Set up request
params = {
'source': 'nowcast',
'locations': Epidata._list(locations),
'epiweeks': Epidata._list(epiweeks),
}
# Make the API call
return Epidata._request(params)
# Fetch Delphi's dengue nowcast
@staticmethod
def dengue_nowcast(locations, epiweeks):
"""Fetch Delphi's dengue nowcast."""
# Check parameters
if locations is None or epiweeks is None:
raise Exception('`locations` and `epiweeks` are both required')
# Set up request
params = {
'source': 'dengue_nowcast',
'locations': Epidata._list(locations),
'epiweeks': Epidata._list(epiweeks),
}
# Make the API call
return Epidata._request(params)
# Fetch API metadata
@staticmethod
def meta():
"""Fetch API metadata."""
return Epidata._request({'source': 'meta'})
|
|
#
# /pycchecker/pycchecker.py
#
# Copyright (C) 2014-2015, Jia Jia
# License: BSD
#
import sys
import getopt
sys.path.extend(['.', '..'])
from pycparser import c_parser, c_ast, parse_file
banner = """
_ _
_ __ _ _ ___ ___| |__ ___ ___| | _____ _ __
| '_ \| | | |/ __/ __| '_ \ / _ \/ __| |/ / _ \ '__|
| |_) | |_| | (_| (__| | | | __/ (__| < __/ |
| .__/ \__, |\___\___|_| |_|\___|\___|_|\_\___|_|
|_| |___/
"""
NodeSizeTable = {
'int8' : 1,
'int16' : 2,
'int32' : 4,
'int64' : 8,
'int8_t' : 1,
'int16_t' : 2,
'int32_t' : 4,
'int64_t' : 8,
'uint8' : 1,
'uint16' : 2,
'uint32' : 4,
'uint64' : 8,
'uint8_t' : 1,
'uint16_t' : 2,
'uint32_t' : 4,
'uint64_t' : 8,
}
""" Rule of Node """
node_total_size = 200
node_prealloc_count = 10
node_prealloc_size = NodeSizeTable['uint32_t'] * node_prealloc_count
node_prealloc_last = 'reset_status_register'
class AstChecker:
""" AST checker """
def __init__(self, filename, verbose=False):
self.cpp_path = 'gcc'
self.cpp_args = ['-E', r'-I./utils/fake_libc_include']
self.sep = '----------------------------------------------------------'
self.decl = 'Decl'
self.typedef = 'Typedef'
self.struct = 'Struct'
self.filename = filename
self.ast = None
if verbose is True: print >> sys.stdout, self.sep
self.ast = self.__gen_ast(self.filename, verbose)
if self.ast is None:
raise OSError, 'Error: failed to generate AST from %s!' % \
(self.filename)
def __del__(self):
pass
def __gen_ast(self, filename, verbose=False):
if verbose is True: print >> sys.stdout, 'Generate AST from %s' % \
(filename)
ast = parse_file(filename,
use_cpp=True,
cpp_path=self.cpp_path,
cpp_args=self.cpp_args)
""" if verbose is True: ast.show() """
return ast
def __find_struct(self, ast, typename, verbose=False):
""" Refer to pycparser/c_ast.py & _c_ast.cfg """
node = None
if verbose is True: print >> sys.stdout, 'Search struct %s in AST' % \
(typename)
for item in ast.ext:
if item.__class__.__name__ == self.decl:
tp = item.type
if tp.__class__.__name__ == self.struct and \
tp.name == typename:
node = tp
break
if node is None:
for item in ast.ext:
if item.__class__.__name__ == self.typedef:
tp = item.type
if tp.declname == typename and \
tp.type.__class__.__name__ == self.struct:
node = tp.type
break
return node
def __query_struct(self, ast, node, itemlast='', verbose=False):
""" Refer to pycparser/c_ast.py & _c_ast.cfg """
global NodeSizeTable
size = 0
count = 0
if node is None:
return 0, 0
for i, decl in enumerate(node.decls):
typedecl = decl.type
identifiertype = typedecl.type
name = identifiertype.names[0]
found = False
for key, val in NodeSizeTable.items():
if key == name:
size += NodeSizeTable[name]
found = True
break
if found is False:
s, c = self.__query_struct(ast,
self.__find_struct(ast,
name,
verbose),
itemlast,
verbose)
size += s
if itemlast != '' and typedecl.declname == itemlast:
count = i + 1
break
if itemlast != '' and count == 0:
size = 0
return size, count
def __check_struct(self, ast, node, verbose=False):
global node_total_size
global node_prealloc_count, node_prealloc_size, node_prealloc_last
ret_total = False
ret_size = False
ret_count = False
msg = '\n'
msg += 'Check struct: %s\n' % (node.name)
msg += '- Check total size: '
size, count = self.__query_struct(ast, node, '', verbose)
if size <= node_total_size:
msg += 'PASS (size %d, max size %d)\n' % (size, node_total_size)
ret_total = True
else:
msg += 'FAILED (size %d, max size %d)\n' % (size, node_total_size)
ret_total = False
msg += '- Check prealloc size: '
size, count = self.__query_struct(ast,
node,
node_prealloc_last,
verbose)
if size == node_prealloc_size:
msg += 'PASS (size %d, prealloc size %d)\n' % \
(size, node_prealloc_size)
ret_size = True
else:
msg += 'FAILED (size %d, prealloc size %d)\n' % \
(size, node_prealloc_size)
ret_size = False
msg += '- Check prealloc count: '
if count == node_prealloc_count:
msg += 'PASS (count %d, prealloc count %d)' % \
(count, node_prealloc_count)
ret_count = True
else:
msg += 'FAILED (count %d, prealloc count %d)' % \
(count, node_prealloc_count)
ret_count = False
if verbose is True: print >> sys.stdout, msg
if ret_total is False or \
ret_size is False or \
ret_count is False:
return False
return True
def __compare_struct(self,
filename,
typename,
leftnode,
rightast,
rightnode,
verbose=False):
global node_prealloc_last
leftsize_total, leftcount_total = self.__query_struct(self.ast,
leftnode,
'',
verbose=False)
leftsize_prealloc, leftcount_prealloc = \
self.__query_struct(self.ast,
leftnode,
node_prealloc_last,
verbose=False)
leftsize_customized = leftsize_total - leftsize_prealloc
rightsize_total, rightcount_total = self.__query_struct(rightast,
rightnode,
'',
verbose=False)
rightsize_prealloc, rightcount_prealloc = \
self.__query_struct(rightast,
rightnode,
node_prealloc_last,
verbose=False)
rightsize_customized = rightsize_total - rightsize_prealloc
if verbose is True:
print >> sys.stdout, self.sep
print >> sys.stdout, 'Compare struct', typename
print >> sys.stdout
print >> sys.stdout, 'Left:'
print >> sys.stdout, '- File:', self.filename
print >> sys.stdout, '- Total size:', leftsize_total
print >> sys.stdout, '- Prealloc size:', leftsize_prealloc
print >> sys.stdout, '- Customized size:', leftsize_customized
print >> sys.stdout
print >> sys.stdout, 'Right:'
print >> sys.stdout, '- File:', filename
print >> sys.stdout, '- Total size:', rightsize_total
print >> sys.stdout, '- Prealloc size:', rightsize_prealloc
print >> sys.stdout, '- Customized size:', rightsize_customized
if leftsize_customized != rightsize_customized:
return False
return True
def check(self, typename, verbose=False):
node = self.__find_struct(self.ast, typename, verbose)
if node is None:
raise OSError, 'Error: %s not found' % (typename)
ret = self.__check_struct(self.ast, node, verbose)
print >> sys.stdout, self.sep
if ret is True:
print >> sys.stdout, 'Check result: PASS\n'
else:
print >> sys.stdout, 'Check result: FAILED\n'
return ret
def compare(self, filename, typename, verbose=False):
leftnode = self.__find_struct(self.ast, typename, verbose=False)
if leftnode is None:
raise OSError, 'Error: %s not found' % (typename)
rightast = self.__gen_ast(filename, verbose=False)
if rightast is None:
raise OSError, 'Error: failed to generate AST from %s!' % (filename)
rightnode = self.__find_struct(rightast, typename, verbose=False)
if rightnode is None:
raise OSError, 'Error: %s not found' % (typename)
ret = self.__compare_struct(filename,
typename,
leftnode,
rightast,
rightnode,
verbose)
msg = self.sep + '\n'
if ret is True:
msg += 'Compare result: SAME\n'
else:
msg += 'Compare result: DIFF\n'
print >> sys.stdout, msg
return ret
def print_banner():
""" Print banner """
global banner
print >> sys.stdout, banner
def print_usage():
""" Print usage """
print >> sys.stdout, ''
print >> sys.stdout, 'Version: 14.12'
print >> sys.stdout, 'License: BSD'
print >> sys.stdout, 'Author: Jia Jia'
print >> sys.stdout, ''
print >> sys.stdout, 'USAGE: python pycchecker.py [OPTION...]'
print >> sys.stdout, ''
print >> sys.stdout, 'OPTIONS:'
print >> sys.stdout, ' -f, --file NAME C source file NAME'
print >> sys.stdout, ' -t, --type NAME Type NAME in C source file'
print >> sys.stdout, ' -v, --verbose Display detailed information'
print >> sys.stdout, ' -h, --help Display help message'
print >> sys.stdout, ''
def main():
""" Main entry """
filelist = []
typename = ''
verbose = False
print_banner()
try:
opts, args = getopt.getopt(sys.argv[1:],
'f:t:o:vh',
['file=', 'type=', 'verbose', 'help'])
except getopt.GetoptError, err:
print >> sys.stderr, err
print_usage()
sys.exit(1)
for o, a in opts:
if o in ('-f', '--file'):
filelist.append(a)
elif o in ('-t', '--type'):
typename = a
elif o in ('-v', '--verbose'):
verbose = True
elif o in ('-h', '--help'):
print_usage()
sys.exit(0)
else:
continue
if filelist == []:
print >> sys.stderr, 'Error: incorrect file name!'
print_usage()
sys.exit(1)
if typename == '':
print >> sys.stderr, 'Error: incorrect type name!'
print_usage()
sys.exit(1)
try:
for f in filelist:
checker = AstChecker(f, verbose)
ret = checker.check(typename, verbose)
if ret is False: pass
if len(filelist[1:]) != 0 :
checker = AstChecker(filelist[0], verbose=False)
for f in filelist[1:]:
ret = checker.compare(f, typename, verbose)
if ret is False: pass
except IOError, err:
print >> sys.stderr, str(err)
sys.exit(1)
except OSError, err:
print >> sys.stderr, str(err)
sys.exit(1)
if __name__ == "__main__":
main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Executes task in a Kubernetes POD"""
import re
import warnings
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type
from kubernetes.client import CoreV1Api, models as k8s
try:
import airflow.utils.yaml as yaml
except ImportError:
import yaml
from airflow.exceptions import AirflowException
from airflow.kubernetes import kube_client, pod_generator
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.kubernetes.secret import Secret
from airflow.models import BaseOperator
from airflow.providers.cncf.kubernetes.backcompat.backwards_compat_converters import (
convert_affinity,
convert_configmap,
convert_env_vars,
convert_image_pull_secrets,
convert_pod_runtime_info_env,
convert_port,
convert_resources,
convert_toleration,
convert_volume,
convert_volume_mount,
)
from airflow.providers.cncf.kubernetes.backcompat.pod_runtime_info_env import PodRuntimeInfoEnv
from airflow.providers.cncf.kubernetes.utils import pod_launcher, xcom_sidecar
from airflow.utils.helpers import validate_key
from airflow.utils.state import State
from airflow.version import version as airflow_version
if TYPE_CHECKING:
import jinja2
class KubernetesPodOperator(BaseOperator):
"""
Execute a task in a Kubernetes Pod
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:KubernetesPodOperator`
.. note::
If you use `Google Kubernetes Engine <https://cloud.google.com/kubernetes-engine/>`__
and Airflow is not running in the same cluster, consider using
:class:`~airflow.providers.google.cloud.operators.kubernetes_engine.GKEStartPodOperator`, which
simplifies the authorization process.
:param namespace: the namespace to run within kubernetes.
:type namespace: str
:param image: Docker image you wish to launch. Defaults to hub.docker.com,
but fully qualified URLS will point to custom repositories. (templated)
:type image: str
:param name: name of the pod in which the task will run, will be used (plus a random
suffix) to generate a pod id (DNS-1123 subdomain, containing only [a-z0-9.-]).
:type name: str
:param cmds: entrypoint of the container. (templated)
The docker images's entrypoint is used if this is not provided.
:type cmds: list[str]
:param arguments: arguments of the entrypoint. (templated)
The docker image's CMD is used if this is not provided.
:type arguments: list[str]
:param ports: ports for launched pod.
:type ports: list[k8s.V1ContainerPort]
:param volume_mounts: volumeMounts for launched pod.
:type volume_mounts: list[k8s.V1VolumeMount]
:param volumes: volumes for launched pod. Includes ConfigMaps and PersistentVolumes.
:type volumes: list[k8s.V1Volume]
:param env_vars: Environment variables initialized in the container. (templated)
:type env_vars: list[k8s.V1EnvVar]
:param secrets: Kubernetes secrets to inject in the container.
They can be exposed as environment vars or files in a volume.
:type secrets: list[airflow.kubernetes.secret.Secret]
:param in_cluster: run kubernetes client with in_cluster configuration.
:type in_cluster: bool
:param cluster_context: context that points to kubernetes cluster.
Ignored when in_cluster is True. If None, current-context is used.
:type cluster_context: str
:param reattach_on_restart: if the scheduler dies while the pod is running, reattach and monitor
:type reattach_on_restart: bool
:param labels: labels to apply to the Pod. (templated)
:type labels: dict
:param startup_timeout_seconds: timeout in seconds to startup the pod.
:type startup_timeout_seconds: int
:param get_logs: get the stdout of the container as logs of the tasks.
:type get_logs: bool
:param image_pull_policy: Specify a policy to cache or always pull an image.
:type image_pull_policy: str
:param annotations: non-identifying metadata you can attach to the Pod.
Can be a large range of data, and can include characters
that are not permitted by labels.
:type annotations: dict
:param resources: A dict containing resources requests and limits.
Possible keys are request_memory, request_cpu, limit_memory, limit_cpu,
and limit_gpu, which will be used to generate airflow.kubernetes.pod.Resources.
See also kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
:type resources: k8s.V1ResourceRequirements
:param affinity: A dict containing a group of affinity scheduling rules.
:type affinity: k8s.V1Affinity
:param config_file: The path to the Kubernetes config file. (templated)
If not specified, default value is ``~/.kube/config``
:type config_file: str
:param node_selectors: A dict containing a group of scheduling rules.
:type node_selectors: dict
:param image_pull_secrets: Any image pull secrets to be given to the pod.
If more than one secret is required, provide a
comma separated list: secret_a,secret_b
:type image_pull_secrets: List[k8s.V1LocalObjectReference]
:param service_account_name: Name of the service account
:type service_account_name: str
:param is_delete_operator_pod: What to do when the pod reaches its final
state, or the execution is interrupted.
If False (default): do nothing, If True: delete the pod
:type is_delete_operator_pod: bool
:param hostnetwork: If True enable host networking on the pod.
:type hostnetwork: bool
:param tolerations: A list of kubernetes tolerations.
:type tolerations: List[k8s.V1Toleration]
:param security_context: security options the pod should run with (PodSecurityContext).
:type security_context: dict
:param dnspolicy: dnspolicy for the pod.
:type dnspolicy: str
:param schedulername: Specify a schedulername for the pod
:type schedulername: str
:param full_pod_spec: The complete podSpec
:type full_pod_spec: kubernetes.client.models.V1Pod
:param init_containers: init container for the launched Pod
:type init_containers: list[kubernetes.client.models.V1Container]
:param log_events_on_failure: Log the pod's events if a failure occurs
:type log_events_on_failure: bool
:param do_xcom_push: If True, the content of the file
/airflow/xcom/return.json in the container will also be pushed to an
XCom when the container completes.
:type do_xcom_push: bool
:param pod_template_file: path to pod template file (templated)
:type pod_template_file: str
:param priority_class_name: priority class name for the launched Pod
:type priority_class_name: str
:param termination_grace_period: Termination grace period if task killed in UI,
defaults to kubernetes default
:type termination_grace_period: int
"""
template_fields: Iterable[str] = (
'image',
'cmds',
'arguments',
'env_vars',
'labels',
'config_file',
'pod_template_file',
)
template_ext = ('.yaml', '.yml', '.json')
# fmt: off
def __init__(
# fmt: on
self,
*,
namespace: Optional[str] = None,
image: Optional[str] = None,
name: Optional[str] = None,
cmds: Optional[List[str]] = None,
arguments: Optional[List[str]] = None,
ports: Optional[List[k8s.V1ContainerPort]] = None,
volume_mounts: Optional[List[k8s.V1VolumeMount]] = None,
volumes: Optional[List[k8s.V1Volume]] = None,
env_vars: Optional[List[k8s.V1EnvVar]] = None,
env_from: Optional[List[k8s.V1EnvFromSource]] = None,
secrets: Optional[List[Secret]] = None,
in_cluster: Optional[bool] = None,
cluster_context: Optional[str] = None,
labels: Optional[Dict] = None,
reattach_on_restart: bool = True,
startup_timeout_seconds: int = 120,
get_logs: bool = True,
image_pull_policy: Optional[str] = None,
annotations: Optional[Dict] = None,
resources: Optional[k8s.V1ResourceRequirements] = None,
affinity: Optional[k8s.V1Affinity] = None,
config_file: Optional[str] = None,
node_selectors: Optional[dict] = None,
node_selector: Optional[dict] = None,
image_pull_secrets: Optional[List[k8s.V1LocalObjectReference]] = None,
service_account_name: Optional[str] = None,
is_delete_operator_pod: bool = False,
hostnetwork: bool = False,
tolerations: Optional[List[k8s.V1Toleration]] = None,
security_context: Optional[Dict] = None,
dnspolicy: Optional[str] = None,
schedulername: Optional[str] = None,
full_pod_spec: Optional[k8s.V1Pod] = None,
init_containers: Optional[List[k8s.V1Container]] = None,
log_events_on_failure: bool = False,
do_xcom_push: bool = False,
pod_template_file: Optional[str] = None,
priority_class_name: Optional[str] = None,
pod_runtime_info_envs: List[PodRuntimeInfoEnv] = None,
termination_grace_period: Optional[int] = None,
configmaps: Optional[str] = None,
**kwargs,
) -> None:
if kwargs.get('xcom_push') is not None:
raise AirflowException("'xcom_push' was deprecated, use 'do_xcom_push' instead")
super().__init__(resources=None, **kwargs)
self.do_xcom_push = do_xcom_push
self.image = image
self.namespace = namespace
self.cmds = cmds or []
self.arguments = arguments or []
self.labels = labels or {}
self.startup_timeout_seconds = startup_timeout_seconds
self.env_vars = convert_env_vars(env_vars) if env_vars else []
if pod_runtime_info_envs:
self.env_vars.extend([convert_pod_runtime_info_env(p) for p in pod_runtime_info_envs])
self.env_from = env_from or []
if configmaps:
self.env_from.extend([convert_configmap(c) for c in configmaps])
self.ports = [convert_port(p) for p in ports] if ports else []
self.volume_mounts = [convert_volume_mount(v) for v in volume_mounts] if volume_mounts else []
self.volumes = [convert_volume(volume) for volume in volumes] if volumes else []
self.secrets = secrets or []
self.in_cluster = in_cluster
self.cluster_context = cluster_context
self.reattach_on_restart = reattach_on_restart
self.get_logs = get_logs
self.image_pull_policy = image_pull_policy
if node_selectors:
# Node selectors is incorrect based on k8s API
warnings.warn(
"node_selectors is deprecated. Please use node_selector instead.", DeprecationWarning
)
self.node_selector = node_selectors
elif node_selector:
self.node_selector = node_selector
else:
self.node_selector = {}
self.annotations = annotations or {}
self.affinity = convert_affinity(affinity) if affinity else {}
self.k8s_resources = convert_resources(resources) if resources else {}
self.config_file = config_file
self.image_pull_secrets = convert_image_pull_secrets(image_pull_secrets) if image_pull_secrets else []
self.service_account_name = service_account_name
self.is_delete_operator_pod = is_delete_operator_pod
self.hostnetwork = hostnetwork
self.tolerations = [convert_toleration(toleration) for toleration in tolerations] \
if tolerations else []
self.security_context = security_context or {}
self.dnspolicy = dnspolicy
self.schedulername = schedulername
self.full_pod_spec = full_pod_spec
self.init_containers = init_containers or []
self.log_events_on_failure = log_events_on_failure
self.priority_class_name = priority_class_name
self.pod_template_file = pod_template_file
self.name = self._set_name(name)
self.termination_grace_period = termination_grace_period
self.client: CoreV1Api = None
self.pod: k8s.V1Pod = None
def _render_nested_template_fields(
self,
content: Any,
context: Dict,
jinja_env: "jinja2.Environment",
seen_oids: set,
) -> None:
if id(content) not in seen_oids and isinstance(content, k8s.V1EnvVar):
seen_oids.add(id(content))
self._do_render_template_fields(content, ('value', 'name'), context, jinja_env, seen_oids)
return
super()._render_nested_template_fields(
content,
context,
jinja_env,
seen_oids
)
@staticmethod
def create_labels_for_pod(context) -> dict:
"""
Generate labels for the pod to track the pod in case of Operator crash
:param context: task context provided by airflow DAG
:return: dict
"""
labels = {
'dag_id': context['dag'].dag_id,
'task_id': context['task'].task_id,
'execution_date': context['ts'],
'try_number': context['ti'].try_number,
}
# In the case of sub dags this is just useful
if context['dag'].is_subdag:
labels['parent_dag_id'] = context['dag'].parent_dag.dag_id
# Ensure that label is valid for Kube,
# and if not truncate/remove invalid chars and replace with short hash.
for label_id, label in labels.items():
safe_label = pod_generator.make_safe_label_value(str(label))
labels[label_id] = safe_label
return labels
def create_pod_launcher(self) -> Type[pod_launcher.PodLauncher]:
return pod_launcher.PodLauncher(kube_client=self.client, extract_xcom=self.do_xcom_push)
def execute(self, context) -> Optional[str]:
try:
if self.in_cluster is not None:
client = kube_client.get_kube_client(
in_cluster=self.in_cluster,
cluster_context=self.cluster_context,
config_file=self.config_file,
)
else:
client = kube_client.get_kube_client(
cluster_context=self.cluster_context, config_file=self.config_file
)
self.client = client
self.pod = self.create_pod_request_obj()
self.namespace = self.pod.metadata.namespace
# Add combination of labels to uniquely identify a running pod
labels = self.create_labels_for_pod(context)
label_selector = self._get_pod_identifying_label_string(labels)
pod_list = self.client.list_namespaced_pod(self.namespace, label_selector=label_selector)
if len(pod_list.items) > 1 and self.reattach_on_restart:
raise AirflowException(
f'More than one pod running with labels: {label_selector}'
)
launcher = self.create_pod_launcher()
if len(pod_list.items) == 1:
try_numbers_match = self._try_numbers_match(context, pod_list.items[0])
final_state, remote_pod, result = self.handle_pod_overlap(
labels, try_numbers_match, launcher, pod_list.items[0]
)
else:
self.log.info("creating pod with labels %s and launcher %s", labels, launcher)
final_state, remote_pod, result = self.create_new_pod_for_operator(labels, launcher)
if final_state != State.SUCCESS:
raise AirflowException(f'Pod {self.pod.metadata.name} returned a failure: {remote_pod}')
context['task_instance'].xcom_push(key='pod_name', value=self.pod.metadata.name)
context['task_instance'].xcom_push(key='pod_namespace', value=self.namespace)
return result
except AirflowException as ex:
raise AirflowException(f'Pod Launching failed: {ex}')
def handle_pod_overlap(
self, labels: dict, try_numbers_match: bool, launcher: Any, pod: k8s.V1Pod
) -> Tuple[State, k8s.V1Pod, Optional[str]]:
"""
In cases where the Scheduler restarts while a KubernetesPodOperator task is running,
this function will either continue to monitor the existing pod or launch a new pod
based on the `reattach_on_restart` parameter.
:param labels: labels used to determine if a pod is repeated
:type labels: dict
:param try_numbers_match: do the try numbers match? Only needed for logging purposes
:type try_numbers_match: bool
:param launcher: PodLauncher
:param pod: Pod found with matching labels
"""
if try_numbers_match:
log_line = f"found a running pod with labels {labels} and the same try_number."
else:
log_line = f"found a running pod with labels {labels} but a different try_number."
# In case of failed pods, should reattach the first time, but only once
# as the task will have already failed.
if self.reattach_on_restart and not pod.metadata.labels.get("already_checked"):
log_line += " Will attach to this pod and monitor instead of starting new one"
self.log.info(log_line)
self.pod = pod
final_state, remote_pod, result = self.monitor_launched_pod(launcher, pod)
else:
log_line += f"creating pod with labels {labels} and launcher {launcher}"
self.log.info(log_line)
final_state, remote_pod, result = self.create_new_pod_for_operator(labels, launcher)
return final_state, remote_pod, result
@staticmethod
def _get_pod_identifying_label_string(labels) -> str:
filtered_labels = {label_id: label for label_id, label in labels.items() if label_id != 'try_number'}
return ','.join(label_id + '=' + label for label_id, label in sorted(filtered_labels.items()))
@staticmethod
def _try_numbers_match(context, pod) -> bool:
return pod.metadata.labels['try_number'] == context['ti'].try_number
def _set_name(self, name):
if name is None:
if self.pod_template_file or self.full_pod_spec:
return None
raise AirflowException("`name` is required unless `pod_template_file` or `full_pod_spec` is set")
validate_key(name, max_length=220)
return re.sub(r'[^a-z0-9.-]+', '-', name.lower())
def create_pod_request_obj(self) -> k8s.V1Pod:
"""
Creates a V1Pod based on user parameters. Note that a `pod` or `pod_template_file`
will supersede all other values.
"""
self.log.debug("Creating pod for KubernetesPodOperator task %s", self.task_id)
if self.pod_template_file:
self.log.debug("Pod template file found, will parse for base pod")
pod_template = pod_generator.PodGenerator.deserialize_model_file(self.pod_template_file)
if self.full_pod_spec:
pod_template = PodGenerator.reconcile_pods(pod_template, self.full_pod_spec)
elif self.full_pod_spec:
pod_template = self.full_pod_spec
else:
pod_template = k8s.V1Pod(metadata=k8s.V1ObjectMeta(name="name"))
pod = k8s.V1Pod(
api_version="v1",
kind="Pod",
metadata=k8s.V1ObjectMeta(
namespace=self.namespace,
labels=self.labels,
name=PodGenerator.make_unique_pod_id(self.name),
annotations=self.annotations,
),
spec=k8s.V1PodSpec(
node_selector=self.node_selector,
affinity=self.affinity,
tolerations=self.tolerations,
init_containers=self.init_containers,
containers=[
k8s.V1Container(
image=self.image,
name="base",
command=self.cmds,
ports=self.ports,
image_pull_policy=self.image_pull_policy,
resources=self.k8s_resources,
volume_mounts=self.volume_mounts,
args=self.arguments,
env=self.env_vars,
env_from=self.env_from,
)
],
image_pull_secrets=self.image_pull_secrets,
service_account_name=self.service_account_name,
host_network=self.hostnetwork,
security_context=self.security_context,
dns_policy=self.dnspolicy,
scheduler_name=self.schedulername,
restart_policy='Never',
priority_class_name=self.priority_class_name,
volumes=self.volumes,
),
)
pod = PodGenerator.reconcile_pods(pod_template, pod)
for secret in self.secrets:
self.log.debug("Adding secret to task %s", self.task_id)
pod = secret.attach_to_pod(pod)
if self.do_xcom_push:
self.log.debug("Adding xcom sidecar to task %s", self.task_id)
pod = xcom_sidecar.add_xcom_sidecar(pod)
return pod
def create_new_pod_for_operator(self, labels, launcher) -> Tuple[State, k8s.V1Pod, Optional[str]]:
"""
Creates a new pod and monitors for duration of task
:param labels: labels used to track pod
:param launcher: pod launcher that will manage launching and monitoring pods
:return:
"""
self.log.debug(
"Adding KubernetesPodOperator labels to pod before launch for task %s", self.task_id
)
# Merge Pod Identifying labels with labels passed to operator
self.pod.metadata.labels.update(labels)
# Add Airflow Version to the label
# And a label to identify that pod is launched by KubernetesPodOperator
self.pod.metadata.labels.update(
{
'airflow_version': airflow_version.replace('+', '-'),
'kubernetes_pod_operator': 'True',
}
)
self.log.debug("Starting pod:\n%s", yaml.safe_dump(self.pod.to_dict()))
try:
launcher.start_pod(self.pod, startup_timeout=self.startup_timeout_seconds)
final_state, remote_pod, result = launcher.monitor_pod(pod=self.pod, get_logs=self.get_logs)
except AirflowException:
if self.log_events_on_failure:
for event in launcher.read_pod_events(self.pod).items:
self.log.error("Pod Event: %s - %s", event.reason, event.message)
raise
finally:
if self.is_delete_operator_pod:
self.log.debug("Deleting pod for task %s", self.task_id)
launcher.delete_pod(self.pod)
return final_state, remote_pod, result
def patch_already_checked(self, pod: k8s.V1Pod):
"""Add an "already tried annotation to ensure we only retry once"""
pod.metadata.labels["already_checked"] = "True"
body = PodGenerator.serialize_pod(pod)
self.client.patch_namespaced_pod(pod.metadata.name, pod.metadata.namespace, body)
def monitor_launched_pod(self, launcher, pod) -> Tuple[State, Optional[str]]:
"""
Monitors a pod to completion that was created by a previous KubernetesPodOperator
:param launcher: pod launcher that will manage launching and monitoring pods
:param pod: podspec used to find pod using k8s API
:return:
"""
try:
(final_state, remote_pod, result) = launcher.monitor_pod(pod, get_logs=self.get_logs)
finally:
if self.is_delete_operator_pod:
launcher.delete_pod(pod)
if final_state != State.SUCCESS:
if self.log_events_on_failure:
for event in launcher.read_pod_events(pod).items:
self.log.error("Pod Event: %s - %s", event.reason, event.message)
self.patch_already_checked(pod)
raise AirflowException(f'Pod returned a failure: {final_state}')
return final_state, remote_pod, result
def on_kill(self) -> None:
if self.pod:
pod: k8s.V1Pod = self.pod
namespace = pod.metadata.namespace
name = pod.metadata.name
kwargs = {}
if self.termination_grace_period is not None:
kwargs = {"grace_period_seconds": self.termination_grace_period}
self.client.delete_namespaced_pod(name=name, namespace=namespace, **kwargs)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import shutil
import tempfile
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.compute import api as compute_api
from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova import test
from nova.tests import fake_network
from nova.tests.image import fake
from nova import volume
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
def get_fake_cache():
def _ip(ip, fixed=True, floats=None):
ip_dict = {'address': ip, 'type': 'fixed'}
if not fixed:
ip_dict['type'] = 'floating'
if fixed and floats:
ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
return ip_dict
info = [{'address': 'aa:bb:cc:dd:ee:ff',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'private',
'subnets': [{'cidr': '192.168.0.0/24',
'ips': [_ip('192.168.0.3',
floats=['1.2.3.4',
'5.6.7.8']),
_ip('192.168.0.4')]}]}}]
if FLAGS.use_ipv6:
ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
'ips': [_ip(ipv6_addr)]})
return info
def get_instances_with_cached_ips(orig_func, *args, **kwargs):
"""Kludge the cache into instance(s) without having to create DB
entries
"""
instances = orig_func(*args, **kwargs)
if isinstance(instances, list):
for instance in instances:
instance['info_cache'] = {'network_info': get_fake_cache()}
else:
instances['info_cache'] = {'network_info': get_fake_cache()}
return instances
class CinderCloudTestCase(test.TestCase):
def setUp(self):
super(CinderCloudTestCase, self).setUp()
vol_tmpdir = tempfile.mkdtemp()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
volume_api_class='nova.tests.fake_volume.API',
volumes_dir=vol_tmpdir)
def fake_show(meh, context, id):
return {'id': id,
'name': 'fake_name',
'container_format': 'ami',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine',
'image_state': 'available'}}
def fake_detail(_self, context, **kwargs):
image = fake_show(None, context, None)
image['name'] = kwargs.get('filters', {}).get('name')
return [image]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
fake.stub_out_image_service(self.stubs)
def dumb(*args, **kwargs):
pass
self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
fake_network.set_stub_network_methods(self.stubs)
# set up our cloud
self.cloud = cloud.CloudController()
self.flags(compute_scheduler_driver='nova.scheduler.'
'chance.ChanceScheduler')
# set up services
self.compute = self.start_service('compute')
self.scheduler = self.start_service('scheduler')
self.network = self.start_service('network')
self.volume = self.start_service('volume')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
self.volume_api = volume.API()
# NOTE(comstud): Make 'cast' behave like a 'call' which will
# ensure that operations complete
self.stubs.Set(rpc, 'cast', rpc.call)
# make sure we can map ami-00000001/2 to a uuid in FakeImageService
db.api.s3_image_create(self.context,
'cedef40a-ed67-4d10-800e-17455edce175')
db.api.s3_image_create(self.context,
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
def tearDown(self):
try:
shutil.rmtree(FLAGS.volumes_dir)
except OSError, e:
pass
self.volume_api.reset_fake_api(self.context)
super(CinderCloudTestCase, self).tearDown()
fake.FakeImageService_reset()
def _stub_instance_get_with_fixed_ips(self, func_name):
orig_func = getattr(self.cloud.compute_api, func_name)
def fake_get(*args, **kwargs):
return get_instances_with_cached_ips(orig_func, *args, **kwargs)
self.stubs.Set(self.cloud.compute_api, func_name, fake_get)
def _create_key(self, name):
# NOTE(vish): create depends on pool, so just call helper directly
keypair_api = compute_api.KeypairAPI()
return keypair_api.create_key_pair(self.context, self.context.user_id,
name)
def test_describe_volumes(self):
"""Makes sure describe_volumes works and filters results."""
vol1 = self.cloud.create_volume(self.context,
size=1,
name='test-1',
description='test volume 1')
vol2 = self.cloud.create_volume(self.context,
size=1,
name='test-2',
description='test volume 2')
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 2)
result = self.cloud.describe_volumes(self.context,
[vol1['volumeId']])
self.assertEqual(len(result['volumeSet']), 1)
self.assertEqual(vol1['volumeId'], result['volumeSet'][0]['volumeId'])
self.cloud.delete_volume(self.context, vol1['volumeId'])
self.cloud.delete_volume(self.context, vol2['volumeId'])
def test_create_volume_in_availability_zone(self):
"""Makes sure create_volume works when we specify an availability
zone
"""
availability_zone = 'zone1:host1'
result = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
volume_id = result['volumeId']
availabilityZone = result['availabilityZone']
self.assertEqual(availabilityZone, availability_zone)
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 1)
self.assertEqual(result['volumeSet'][0]['volumeId'], volume_id)
self.assertEqual(result['volumeSet'][0]['availabilityZone'],
availabilityZone)
self.cloud.delete_volume(self.context, volume_id)
def test_create_volume_from_snapshot(self):
"""Makes sure create_volume works when we specify a snapshot."""
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
snap = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-1',
description='test snap of vol %s'
% vol1['volumeId'])
vol2 = self.cloud.create_volume(self.context,
snapshot_id=snap['snapshotId'])
volume1_id = vol1['volumeId']
volume2_id = vol2['volumeId']
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 2)
self.assertEqual(result['volumeSet'][1]['volumeId'], volume2_id)
self.cloud.delete_volume(self.context, volume2_id)
self.cloud.delete_snapshot(self.context, snap['snapshotId'])
self.cloud.delete_volume(self.context, volume1_id)
def test_describe_snapshots(self):
"""Makes sure describe_snapshots works and filters results."""
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
snap1 = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-1',
description='test snap1 of vol %s' %
vol1['volumeId'])
snap2 = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-1',
description='test snap2 of vol %s' %
vol1['volumeId'])
result = self.cloud.describe_snapshots(self.context)
self.assertEqual(len(result['snapshotSet']), 2)
result = self.cloud.describe_snapshots(
self.context,
snapshot_id=[snap2['snapshotId']])
self.assertEqual(len(result['snapshotSet']), 1)
self.cloud.delete_snapshot(self.context, snap1['snapshotId'])
self.cloud.delete_snapshot(self.context, snap2['snapshotId'])
self.cloud.delete_volume(self.context, vol1['volumeId'])
def test_create_snapshot(self):
"""Makes sure create_snapshot works."""
availability_zone = 'zone1:host1'
result = self.cloud.describe_snapshots(self.context)
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
snap1 = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-1',
description='test snap1 of vol %s' %
vol1['volumeId'])
snapshot_id = snap1['snapshotId']
result = self.cloud.describe_snapshots(self.context)
self.assertEqual(len(result['snapshotSet']), 1)
self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id)
self.cloud.delete_snapshot(self.context, snap1['snapshotId'])
self.cloud.delete_volume(self.context, vol1['volumeId'])
def test_delete_snapshot(self):
"""Makes sure delete_snapshot works."""
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
snap1 = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-1',
description='test snap1 of vol %s' %
vol1['volumeId'])
snapshot_id = snap1['snapshotId']
result = self.cloud.delete_snapshot(self.context,
snapshot_id=snapshot_id)
self.assertTrue(result)
self.cloud.delete_volume(self.context, vol1['volumeId'])
def _block_device_mapping_create(self, instance_uuid, mappings):
volumes = []
for bdm in mappings:
db.block_device_mapping_create(self.context, bdm)
if 'volume_id' in bdm:
values = {'id': bdm['volume_id']}
for bdm_key, vol_key in [('snapshot_id', 'snapshot_id'),
('snapshot_size', 'volume_size'),
('delete_on_termination',
'delete_on_termination')]:
if bdm_key in bdm:
values[vol_key] = bdm[bdm_key]
kwargs = {'name': 'bdmtest-volume',
'description': 'bdm test volume description',
'status': 'available',
'host': self.volume.host,
'size': 1,
'attach_status': 'detached',
'volume_id': values['id']}
vol = self.volume_api.create_with_kwargs(self.context,
**kwargs)
if 'snapshot_id' in values:
self.volume_api.create_snapshot(self.context,
vol,
'snapshot-bdm',
'fake snap for bdm tests',
values['snapshot_id'])
self.volume_api.attach(self.context, vol,
instance_uuid, bdm['device_name'])
volumes.append(vol)
return volumes
def _setUpBlockDeviceMapping(self):
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
inst1 = db.instance_create(self.context,
{'image_ref': image_uuid,
'instance_type_id': 1,
'root_device_name': '/dev/sdb1'})
inst2 = db.instance_create(self.context,
{'image_ref': image_uuid,
'instance_type_id': 1,
'root_device_name': '/dev/sdc1'})
instance_uuid = inst1['uuid']
mappings0 = [
{'instance_uuid': instance_uuid,
'device_name': '/dev/sdb1',
'snapshot_id': '1',
'volume_id': '2'},
{'instance_uuid': instance_uuid,
'device_name': '/dev/sdb2',
'volume_id': '3',
'volume_size': 1},
{'instance_uuid': instance_uuid,
'device_name': '/dev/sdb3',
'delete_on_termination': True,
'snapshot_id': '4',
'volume_id': '5'},
{'instance_uuid': instance_uuid,
'device_name': '/dev/sdb4',
'delete_on_termination': False,
'snapshot_id': '6',
'volume_id': '7'},
{'instance_uuid': instance_uuid,
'device_name': '/dev/sdb5',
'snapshot_id': '8',
'volume_id': '9',
'volume_size': 0},
{'instance_uuid': instance_uuid,
'device_name': '/dev/sdb6',
'snapshot_id': '10',
'volume_id': '11',
'volume_size': 1},
{'instance_uuid': instance_uuid,
'device_name': '/dev/sdb7',
'no_device': True},
{'instance_uuid': instance_uuid,
'device_name': '/dev/sdb8',
'virtual_name': 'swap'},
{'instance_uuid': instance_uuid,
'device_name': '/dev/sdb9',
'virtual_name': 'ephemeral3'}]
volumes = self._block_device_mapping_create(instance_uuid, mappings0)
return (inst1, inst2, volumes)
def _tearDownBlockDeviceMapping(self, inst1, inst2, volumes):
for vol in volumes:
self.volume_api.delete(self.context, vol)
for uuid in (inst1['uuid'], inst2['uuid']):
for bdm in db.block_device_mapping_get_all_by_instance(
self.context, uuid):
db.block_device_mapping_destroy(self.context, bdm['id'])
db.instance_destroy(self.context, inst2['uuid'])
db.instance_destroy(self.context, inst1['uuid'])
_expected_instance_bdm1 = {
'instanceId': 'i-00000001',
'rootDeviceName': '/dev/sdb1',
'rootDeviceType': 'ebs'}
_expected_block_device_mapping0 = [
{'deviceName': '/dev/sdb1',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': '2',
}},
{'deviceName': '/dev/sdb2',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': '3',
}},
{'deviceName': '/dev/sdb3',
'ebs': {'status': 'in-use',
'deleteOnTermination': True,
'volumeId': '5',
}},
{'deviceName': '/dev/sdb4',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': '7',
}},
{'deviceName': '/dev/sdb5',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': '9',
}},
{'deviceName': '/dev/sdb6',
'ebs': {'status': 'in-use',
'deleteOnTermination': False,
'volumeId': '11', }}]
# NOTE(yamahata): swap/ephemeral device case isn't supported yet.
_expected_instance_bdm2 = {
'instanceId': 'i-00000002',
'rootDeviceName': '/dev/sdc1',
'rootDeviceType': 'instance-store'}
def test_format_instance_bdm(self):
(inst1, inst2, volumes) = self._setUpBlockDeviceMapping()
result = {}
self.cloud._format_instance_bdm(self.context, inst1['uuid'],
'/dev/sdb1', result)
self.assertSubDictMatch(
{'rootDeviceType': self._expected_instance_bdm1['rootDeviceType']},
result)
self._assertEqualBlockDeviceMapping(
self._expected_block_device_mapping0, result['blockDeviceMapping'])
result = {}
self.cloud._format_instance_bdm(self.context, inst2['uuid'],
'/dev/sdc1', result)
self.assertSubDictMatch(
{'rootDeviceType': self._expected_instance_bdm2['rootDeviceType']},
result)
self._tearDownBlockDeviceMapping(inst1, inst2, volumes)
def _assertInstance(self, instance_id):
ec2_instance_id = ec2utils.id_to_ec2_id(instance_id)
result = self.cloud.describe_instances(self.context,
instance_id=[ec2_instance_id])
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 1)
result = result['instancesSet'][0]
self.assertEqual(result['instanceId'], ec2_instance_id)
return result
def _assertEqualBlockDeviceMapping(self, expected, result):
self.assertEqual(len(expected), len(result))
for x in expected:
found = False
for y in result:
if x['deviceName'] == y['deviceName']:
self.assertSubDictMatch(x, y)
found = True
break
self.assertTrue(found)
def test_describe_instances_bdm(self):
"""Make sure describe_instances works with root_device_name and
block device mappings
"""
(inst1, inst2, volumes) = self._setUpBlockDeviceMapping()
result = self._assertInstance(inst1['id'])
self.assertSubDictMatch(self._expected_instance_bdm1, result)
self._assertEqualBlockDeviceMapping(
self._expected_block_device_mapping0, result['blockDeviceMapping'])
result = self._assertInstance(inst2['id'])
self.assertSubDictMatch(self._expected_instance_bdm2, result)
self._tearDownBlockDeviceMapping(inst1, inst2, volumes)
def assertDictListUnorderedMatch(self, L1, L2, key):
self.assertEqual(len(L1), len(L2))
for d1 in L1:
self.assertTrue(key in d1)
for d2 in L2:
self.assertTrue(key in d2)
if d1[key] == d2[key]:
self.assertDictMatch(d1, d2)
def _setUpImageSet(self, create_volumes_and_snapshots=False):
mappings1 = [
{'device': '/dev/sda1', 'virtual': 'root'},
{'device': 'sdb0', 'virtual': 'ephemeral0'},
{'device': 'sdb1', 'virtual': 'ephemeral1'},
{'device': 'sdb2', 'virtual': 'ephemeral2'},
{'device': 'sdb3', 'virtual': 'ephemeral3'},
{'device': 'sdb4', 'virtual': 'ephemeral4'},
{'device': 'sdc0', 'virtual': 'swap'},
{'device': 'sdc1', 'virtual': 'swap'},
{'device': 'sdc2', 'virtual': 'swap'},
{'device': 'sdc3', 'virtual': 'swap'},
{'device': 'sdc4', 'virtual': 'swap'}]
block_device_mapping1 = [
{'device_name': '/dev/sdb1', 'snapshot_id': 01234567},
{'device_name': '/dev/sdb2', 'volume_id': 01234567},
{'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
{'device_name': '/dev/sdb4', 'no_device': True},
{'device_name': '/dev/sdc1', 'snapshot_id': 12345678},
{'device_name': '/dev/sdc2', 'volume_id': 12345678},
{'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'},
{'device_name': '/dev/sdc4', 'no_device': True}]
image1 = {
'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine',
'image_state': 'available',
'mappings': mappings1,
'block_device_mapping': block_device_mapping1,
}
}
mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
block_device_mapping2 = [{'device_name': '/dev/sdb1',
'snapshot_id': 01234567}]
image2 = {
'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fake_name',
'properties': {
'kernel_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'type': 'machine',
'root_device_name': '/dev/sdb1',
'mappings': mappings2,
'block_device_mapping': block_device_mapping2}}
def fake_show(meh, context, image_id):
_images = [copy.deepcopy(image1), copy.deepcopy(image2)]
for i in _images:
if str(i['id']) == str(image_id):
return i
raise exception.ImageNotFound(image_id=image_id)
def fake_detail(meh, context):
return [copy.deepcopy(image1), copy.deepcopy(image2)]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
volumes = []
snapshots = []
if create_volumes_and_snapshots:
for bdm in block_device_mapping1:
if 'volume_id' in bdm:
vol = self._volume_create(bdm['volume_id'])
volumes.append(vol['id'])
if 'snapshot_id' in bdm:
kwargs = {'volume_id': 76543210,
'volume_size': 1,
'name': 'test-snap',
'description': 'test snap desc',
'snap_id': bdm['snapshot_id'],
'status': 'available'}
snap = self.volume_api.create_snapshot_with_kwargs(
self.context, **kwargs)
snapshots.append(snap['id'])
return (volumes, snapshots)
def _assertImageSet(self, result, root_device_type, root_device_name):
self.assertEqual(1, len(result['imagesSet']))
result = result['imagesSet'][0]
self.assertTrue('rootDeviceType' in result)
self.assertEqual(result['rootDeviceType'], root_device_type)
self.assertTrue('rootDeviceName' in result)
self.assertEqual(result['rootDeviceName'], root_device_name)
self.assertTrue('blockDeviceMapping' in result)
return result
_expected_root_device_name1 = '/dev/sda1'
# NOTE(yamahata): noDevice doesn't make sense when returning mapping
# It makes sense only when user overriding existing
# mapping.
_expected_bdms1 = [
{'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'},
{'deviceName': '/dev/sdb1', 'ebs': {'snapshotId':
'snap-00053977'}},
{'deviceName': '/dev/sdb2', 'ebs': {'snapshotId':
'vol-00053977'}},
{'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'},
# {'deviceName': '/dev/sdb4', 'noDevice': True},
{'deviceName': '/dev/sdc0', 'virtualName': 'swap'},
{'deviceName': '/dev/sdc1', 'ebs': {'snapshotId':
'snap-00bc614e'}},
{'deviceName': '/dev/sdc2', 'ebs': {'snapshotId':
'vol-00bc614e'}},
{'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'},
# {'deviceName': '/dev/sdc4', 'noDevice': True}
]
_expected_root_device_name2 = '/dev/sdb1'
_expected_bdms2 = [{'deviceName': '/dev/sdb1',
'ebs': {'snapshotId': 'snap-00053977'}}]
def _run_instance(self, **kwargs):
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
return instance_id
def _restart_compute_service(self, periodic_interval=None):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
if periodic_interval:
self.compute = self.start_service(
'compute', periodic_interval=periodic_interval)
else:
self.compute = self.start_service('compute')
def _volume_create(self, volume_id=None):
kwargs = {'name': 'test-volume',
'description': 'test volume description',
'status': 'available',
'host': self.volume.host,
'size': 1,
'attach_status': 'detached'}
if volume_id:
kwargs['volume_id'] = volume_id
return self.volume_api.create_with_kwargs(self.context, **kwargs)
#return db.volume_create(self.context, kwargs)
def _assert_volume_attached(self, vol, instance_uuid, mountpoint):
self.assertEqual(vol['instance_uuid'], instance_uuid)
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
def _assert_volume_detached(self, vol):
self.assertEqual(vol['instance_uuid'], None)
self.assertEqual(vol['mountpoint'], None)
self.assertEqual(vol['status'], "available")
self.assertEqual(vol['attach_status'], "detached")
def test_stop_start_with_volume(self):
"""Make sure run instance with block device mapping works"""
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
vol2 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/sdb',
'volume_id': vol1_uuid,
'delete_on_termination': False},
{'device_name': '/dev/sdc',
'volume_id': vol2_uuid,
'delete_on_termination': True},
]}
ec2_instance_id = self._run_instance(**kwargs)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
ec2_instance_id)
vols = self.volume_api.get_all(self.context)
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
self.assertEqual(len(vols), 2)
for vol in vols:
self.assertTrue(str(vol['id']) == str(vol1_uuid) or
str(vol['id']) == str(vol2_uuid))
if(str(vol['id']) == str(vol1_uuid)):
self.volume_api.attach(self.context, vol,
instance_uuid, '/dev/sdb')
elif(str(vol['id']) == str(vol2_uuid)):
self.volume_api.attach(self.context, vol,
instance_uuid, '/dev/sdc')
vol = self.volume_api.get(self.context, vol1_uuid)
self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
vol = self.volume_api.get(self.context, vol2_uuid)
self._assert_volume_attached(vol, instance_uuid, '/dev/sdc')
result = self.cloud.stop_instances(self.context, [ec2_instance_id])
self.assertTrue(result)
vol = self.volume_api.get(self.context, vol1_uuid)
self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
vol = self.volume_api.get(self.context, vol1_uuid)
self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
vol = self.volume_api.get(self.context, vol2_uuid)
self._assert_volume_attached(vol, instance_uuid, '/dev/sdc')
self.cloud.start_instances(self.context, [ec2_instance_id])
vols = self.volume_api.get_all(self.context)
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
self.assertEqual(len(vols), 2)
for vol in vols:
self.assertTrue(str(vol['id']) == str(vol1_uuid) or
str(vol['id']) == str(vol2_uuid))
self.assertTrue(vol['mountpoint'] == '/dev/sdb' or
vol['mountpoint'] == '/dev/sdc')
self.assertEqual(vol['instance_uuid'], instance_uuid)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
#Here we puke...
self.cloud.terminate_instances(self.context, [ec2_instance_id])
admin_ctxt = context.get_admin_context(read_deleted="no")
vol = self.volume_api.get(admin_ctxt, vol2_uuid)
self.assertFalse(vol['deleted'])
self.cloud.delete_volume(self.context, vol1['volumeId'])
self._restart_compute_service()
def test_stop_with_attached_volume(self):
"""Make sure attach info is reflected to block device mapping"""
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
vol2 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/sdb',
'volume_id': vol1_uuid,
'delete_on_termination': True}]}
ec2_instance_id = self._run_instance(**kwargs)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
ec2_instance_id)
vols = self.volume_api.get_all(self.context)
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
self.assertEqual(len(vols), 1)
for vol in vols:
self.assertEqual(vol['id'], vol1_uuid)
self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
vol = self.volume_api.get(self.context, vol2_uuid)
self._assert_volume_detached(vol)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.cloud.compute_api.attach_volume(self.context,
instance,
volume_id=vol2_uuid,
device='/dev/sdc')
vol1 = self.volume_api.get(self.context, vol1_uuid)
self._assert_volume_attached(vol1, instance_uuid, '/dev/sdb')
vol2 = self.volume_api.get(self.context, vol2_uuid)
self._assert_volume_attached(vol2, instance_uuid, '/dev/sdc')
self.cloud.compute_api.detach_volume(self.context,
volume_id=vol1_uuid)
vol1 = self.volume_api.get(self.context, vol1_uuid)
self._assert_volume_detached(vol1)
result = self.cloud.stop_instances(self.context, [ec2_instance_id])
self.assertTrue(result)
vol2 = self.volume_api.get(self.context, vol2_uuid)
self._assert_volume_attached(vol2, instance_uuid, '/dev/sdc')
self.cloud.start_instances(self.context, [ec2_instance_id])
vols = self.volume_api.get_all(self.context)
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
self.assertEqual(len(vols), 1)
self._assert_volume_detached(vol1)
vol1 = self.volume_api.get(self.context, vol1_uuid)
self._assert_volume_detached(vol1)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
def _create_snapshot(self, ec2_volume_id):
result = self.cloud.create_snapshot(self.context,
volume_id=ec2_volume_id)
return result['snapshotId']
def test_run_with_snapshot(self):
"""Makes sure run/stop/start instance with snapshot works."""
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
availability_zone=availability_zone)
snap1 = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-1',
description='test snap of vol %s' %
vol1['volumeId'])
snap1_uuid = ec2utils.ec2_snap_id_to_uuid(snap1['snapshotId'])
snap2 = self.cloud.create_snapshot(self.context,
vol1['volumeId'],
name='snap-2',
description='test snap of vol %s' %
vol1['volumeId'])
snap2_uuid = ec2utils.ec2_snap_id_to_uuid(snap2['snapshotId'])
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1,
'block_device_mapping': [{'device_name': '/dev/vdb',
'snapshot_id': snap1_uuid,
'delete_on_termination': False, },
{'device_name': '/dev/vdc',
'snapshot_id': snap2_uuid,
'delete_on_termination': True}]}
ec2_instance_id = self._run_instance(**kwargs)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
ec2_instance_id)
vols = self.volume_api.get_all(self.context)
vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
self.assertEqual(len(vols), 2)
vol1_id = None
vol2_id = None
for vol in vols:
snapshot_uuid = vol['snapshot_id']
if snapshot_uuid == snap1_uuid:
vol1_id = vol['id']
mountpoint = '/dev/vdb'
elif snapshot_uuid == snap2_uuid:
vol2_id = vol['id']
mountpoint = '/dev/vdc'
else:
self.fail()
self._assert_volume_attached(vol, instance_uuid, mountpoint)
#Just make sure we found them
self.assertTrue(vol1_id)
self.assertTrue(vol2_id)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
admin_ctxt = context.get_admin_context(read_deleted="no")
vol = self.volume_api.get(admin_ctxt, vol1_id)
self._assert_volume_detached(vol)
self.assertFalse(vol['deleted'])
#db.volume_destroy(self.context, vol1_id)
##admin_ctxt = context.get_admin_context(read_deleted="only")
##vol = db.volume_get(admin_ctxt, vol2_id)
##self.assertTrue(vol['deleted'])
#for snapshot_id in (ec2_snapshot1_id, ec2_snapshot2_id):
# self.cloud.delete_snapshot(self.context, snapshot_id)
def test_create_image(self):
"""Make sure that CreateImage works"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval=0.3)
(volumes, snapshots) = self._setUpImageSet(
create_volumes_and_snapshots=True)
kwargs = {'image_id': 'ami-1',
'instance_type': FLAGS.default_instance_type,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
self._restart_compute_service()
@staticmethod
def _fake_bdm_get(ctxt, id):
return [{'volume_id': 87654321,
'snapshot_id': None,
'no_device': None,
'virtual_name': None,
'delete_on_termination': True,
'device_name': '/dev/sdh'},
{'volume_id': None,
'snapshot_id': 98765432,
'no_device': None,
'virtual_name': None,
'delete_on_termination': True,
'device_name': '/dev/sdi'},
{'volume_id': None,
'snapshot_id': None,
'no_device': True,
'virtual_name': None,
'delete_on_termination': None,
'device_name': None},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'virtual_name': 'ephemeral0',
'delete_on_termination': None,
'device_name': '/dev/sdb'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'virtual_name': 'swap',
'delete_on_termination': None,
'device_name': '/dev/sdc'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'virtual_name': 'ephemeral1',
'delete_on_termination': None,
'device_name': '/dev/sdd'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'virtual_name': 'ephemeral2',
'delete_on_termination': None,
'device_name': '/dev/sd3'},
]
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2017 Prof. William H. Green (whgreen@mit.edu),
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains classes and functions that are used by multiple modules
in this subpackage.
"""
import itertools
import logging
import warnings
from rmgpy.data.base import LogicNode
from rmgpy.reaction import Reaction
from rmgpy.molecule import Group, Molecule
from rmgpy.species import Species
from rmgpy.exceptions import DatabaseError, KineticsError
################################################################################
def saveEntry(f, entry):
"""
Save an `entry` in the kinetics database by writing a string to
the given file object `f`.
"""
from rmgpy.cantherm.output import prettify
def sortEfficiencies(efficiencies0):
efficiencies = {}
for mol, eff in efficiencies0.iteritems():
if isinstance(mol, str):
# already in SMILES string format
smiles = mol
else:
smiles = mol.toSMILES()
efficiencies[smiles] = eff
keys = efficiencies.keys()
keys.sort()
return [(key, efficiencies[key]) for key in keys]
f.write('entry(\n')
f.write(' index = {0:d},\n'.format(entry.index))
if entry.label != '':
f.write(' label = "{0}",\n'.format(entry.label))
#Entries for kinetic rules, libraries, training reactions
#and depositories will have an Reaction object for its item
if isinstance(entry.item, Reaction):
#Write out additional data if depository or library
#kinetic rules would have a Group object for its reactants instead of Species
if isinstance(entry.item.reactants[0], Species):
# Add degeneracy if the reaction is coming from a depository or kinetics library
f.write(' degeneracy = {0:.1f},\n'.format(entry.item.degeneracy))
if entry.item.duplicate:
f.write(' duplicate = {0!r},\n'.format(entry.item.duplicate))
if not entry.item.reversible:
f.write(' reversible = {0!r},\n'.format(entry.item.reversible))
#Entries for groups with have a group or logicNode for its item
elif isinstance(entry.item, Group):
f.write(' group = \n')
f.write('"""\n')
f.write(entry.item.toAdjacencyList())
f.write('""",\n')
elif isinstance(entry.item, LogicNode):
f.write(' group = "{0}",\n'.format(entry.item))
else:
raise DatabaseError("Encountered unexpected item of type {0} while saving database.".format(entry.item.__class__))
# Write kinetics
if isinstance(entry.data, str):
f.write(' kinetics = "{0}",\n'.format(entry.data))
elif entry.data is not None:
efficiencies = None
if hasattr(entry.data, 'efficiencies'):
efficiencies = entry.data.efficiencies
entry.data.efficiencies = dict(sortEfficiencies(entry.data.efficiencies))
kinetics = prettify(repr(entry.data))
kinetics = ' kinetics = {0},\n'.format(kinetics.replace('\n', '\n '))
f.write(kinetics)
if hasattr(entry.data, 'efficiencies'):
entry.data.efficiencies = efficiencies
else:
f.write(' kinetics = None,\n')
# Write reference
if entry.reference is not None:
reference = entry.reference.toPrettyRepr()
lines = reference.splitlines()
f.write(' reference = {0}\n'.format(lines[0]))
for line in lines[1:-1]:
f.write(' {0}\n'.format(line))
f.write(' ),\n'.format(lines[0]))
if entry.referenceType != "":
f.write(' referenceType = "{0}",\n'.format(entry.referenceType))
if entry.rank is not None:
f.write(' rank = {0},\n'.format(entry.rank))
if entry.shortDesc.strip() !='':
f.write(' shortDesc = u"""')
try:
f.write(entry.shortDesc.encode('utf-8'))
except:
f.write(entry.shortDesc.strip().encode('ascii', 'ignore')+ "\n")
f.write('""",\n')
if entry.longDesc.strip() !='':
f.write(' longDesc = \n')
f.write('u"""\n')
try:
f.write(entry.longDesc.strip().encode('utf-8') + "\n")
except:
f.write(entry.longDesc.strip().encode('ascii', 'ignore')+ "\n")
f.write('""",\n')
f.write(')\n\n')
def filter_reactions(reactants, products, reactionList):
"""
Remove any reactions from the given `reactionList` whose reactants do
not involve all the given `reactants` or whose products do not involve
all the given `products`. This method checks both forward and reverse
directions, and only filters out reactions that don't match either.
reactants and products can be either molecule or species objects
"""
warnings.warn("The filter_reactions method is no longer used and may be removed in a future version.", DeprecationWarning)
# Convert from molecules to species and generate resonance isomers.
reactants = ensure_species(reactants, resonance=True)
products = ensure_species(products, resonance=True)
reactions = reactionList[:]
for reaction in reactionList:
# Forward direction
reactants0 = [r for r in reaction.reactants]
for reactant in reactants:
for reactant0 in reactants0:
if reactant.isIsomorphic(reactant0):
reactants0.remove(reactant0)
break
products0 = [p for p in reaction.products]
for product in products:
for product0 in products0:
if product.isIsomorphic(product0):
products0.remove(product0)
break
forward = not (len(reactants0) != 0 or len(products0) != 0)
# Reverse direction
reactants0 = [r for r in reaction.products]
for reactant in reactants:
for reactant0 in reactants0:
if reactant.isIsomorphic(reactant0):
reactants0.remove(reactant0)
break
products0 = [p for p in reaction.reactants]
for product in products:
for product0 in products0:
if product.isIsomorphic(product0):
products0.remove(product0)
break
reverse = not (len(reactants0) != 0 or len(products0) != 0)
if not forward and not reverse:
reactions.remove(reaction)
return reactions
def ensure_species(input_list, resonance=False, keepIsomorphic=False):
"""
Given an input list of molecules or species, return a list with only
species objects.
"""
output_list = []
for item in input_list:
if isinstance(item, Molecule):
new_item = Species(molecule=[item])
elif isinstance(item, Species):
new_item = item
else:
raise TypeError('Only Molecule or Species objects can be handled.')
if resonance:
new_item.generate_resonance_structures(keepIsomorphic=keepIsomorphic)
output_list.append(new_item)
return output_list
def generate_molecule_combos(input_species):
"""
Generate combinations of molecules from the given species objects.
"""
if len(input_species) == 1:
combos = [(mol,) for mol in input_species[0].molecule]
elif len(input_species) == 2:
combos = itertools.product(input_species[0].molecule, input_species[1].molecule)
else:
raise ValueError('Reaction generation can be done for 1 or 2 species, not {0}.'.format(len(input_species)))
return combos
def ensure_independent_atom_ids(input_species, resonance=True):
"""
Given a list or tuple of :class:`Species` objects, ensure that atom ids are
independent across all of the species. Optionally, the `resonance` argument
can be set to False to not generate resonance structures.
Modifies the input species in place, nothing is returned.
"""
# Method to check that all species' atom ids are different
def independent_ids():
num_atoms = 0
IDs = []
for species in input_species:
num_atoms += len(species.molecule[0].atoms)
IDs.extend([atom.id for atom in species.molecule[0].atoms])
num_ID = len(set(IDs))
return num_ID == num_atoms
# If they are not all different, reassign ids and remake resonance structures
if not independent_ids():
logging.debug('identical atom ids found between species. regenerating')
for species in input_species:
mol = species.molecule[0]
mol.assignAtomIDs()
species.molecule = [mol]
# Remake resonance structures with new labels
if resonance:
species.generate_resonance_structures(keepIsomorphic=True)
elif resonance:
# IDs are already independent, generate resonance structures if needed
for species in input_species:
species.generate_resonance_structures(keepIsomorphic=True)
def find_degenerate_reactions(rxnList, same_reactants=None, kinetics_database=None, kinetics_family=None):
"""
given a list of Reaction object with Molecule objects, this method
removes degenerate reactions and increments the degeneracy of the
reaction object. For multiple transition states, this method adds
them as separate duplicate reactions. This method modifies
rxnList in place and does not return anything.
This algorithm used to exist in family.__generateReactions, but was moved
here because it didn't have any family dependence.
"""
# We want to sort all the reactions into sublists composed of isomorphic reactions
# with degenerate transition states
rxnSorted = []
for rxn0 in rxnList:
# find resonance structures for rxn0
ensure_species_in_reaction(rxn0)
if len(rxnSorted) == 0:
# This is the first reaction, so create a new sublist
rxnSorted.append([rxn0])
else:
# Loop through each sublist, which represents a unique reaction
for rxnList1 in rxnSorted:
# Try to determine if the current rxn0 is identical or isomorphic to any reactions in the sublist
isomorphic = False
identical = False
sameTemplate = False
for rxn in rxnList1:
isomorphic = rxn0.isIsomorphic(rxn, checkIdentical=False, checkTemplateRxnProducts=True)
if not isomorphic:
identical = False
else:
identical = rxn0.isIsomorphic(rxn, checkIdentical=True, checkTemplateRxnProducts=True)
sameTemplate = frozenset(rxn.template) == frozenset(rxn0.template)
if not isomorphic:
# a different product was found, go to next list
break
elif not sameTemplate:
# a different transition state was found, mark as duplicate and
# go to the next sublist
rxn.duplicate = True
rxn0.duplicate = True
break
elif identical:
# An exact copy of rxn0 is already in our list, so we can move on to the next rxn
break
else: # sameTemplate and isomorphic but not identical
# This is the right sublist for rxn0, but continue to see if there is an identical rxn
continue
else:
# We did not break, so this is the right sublist, but there is no identical reaction
# This means that we should add rxn0 to the sublist as a degenerate rxn
rxnList1.append(rxn0)
if isomorphic and sameTemplate:
# We already found the right sublist, so we can move on to the next rxn
break
else:
# We did not break, which means that there was no isomorphic sublist, so create a new one
rxnSorted.append([rxn0])
rxnList = []
for rxnList1 in rxnSorted:
# Collapse our sorted reaction list by taking one reaction from each sublist
rxn = rxnList1[0]
# The degeneracy of each reaction is the number of reactions that were in the sublist
rxn.degeneracy = sum([reaction0.degeneracy for reaction0 in rxnList1])
rxnList.append(rxn)
for rxn in rxnList:
if rxn.isForward:
reduce_same_reactant_degeneracy(rxn, same_reactants)
else:
# fix the degeneracy of (not ownReverse) reactions found in the backwards direction
try:
family = kinetics_family or kinetics_database.families[rxn.family]
except AttributeError:
from rmgpy.data.rmg import getDB
family = getDB('kinetics').families[rxn.family]
if not family.ownReverse:
rxn.degeneracy = family.calculateDegeneracy(rxn)
return rxnList
def ensure_species_in_reaction(reaction):
"""
Modifies a reaction holding Molecule objects to a reaction holding
Species objects. Generates resonance structures for reaction products.
"""
# if already species' objects, return none
if isinstance(reaction.reactants[0], Species):
return None
# obtain species with all resonance isomers
if reaction.isForward:
reaction.reactants = ensure_species(reaction.reactants, resonance=False)
reaction.products = ensure_species(reaction.products, resonance=True, keepIsomorphic=True)
else:
reaction.reactants = ensure_species(reaction.reactants, resonance=True, keepIsomorphic=True)
reaction.products = ensure_species(reaction.products, resonance=False)
# convert reaction.pairs object to species
new_pairs = []
for reactant, product in reaction.pairs:
new_pair = []
for reactant0 in reaction.reactants:
if reactant0.isIsomorphic(reactant):
new_pair.append(reactant0)
break
for product0 in reaction.products:
if product0.isIsomorphic(product):
new_pair.append(product0)
break
new_pairs.append(new_pair)
reaction.pairs = new_pairs
try:
ensure_species_in_reaction(reaction.reverse)
except AttributeError:
pass
def reduce_same_reactant_degeneracy(reaction, same_reactants=None):
"""
This method reduces the degeneracy of reactions with identical reactants,
since translational component of the transition states are already taken
into account (so swapping the same reactant is not valid)
This comes from work by Bishop and Laidler in 1965
"""
if len(reaction.reactants) == 2 and (
(reaction.isForward and same_reactants) or
reaction.reactants[0].isIsomorphic(reaction.reactants[1])
):
reaction.degeneracy *= 0.5
logging.debug('Degeneracy of reaction {} was decreased by 50% to {} since the reactants are identical'.format(reaction, reaction.degeneracy))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import wx
import json
import subprocess
import threading
import sys
def square_to_coord(square):
bottom_corner = (480, 0)
unit = 60
squarex = square[0]
squarey = square[1]-1
newx = (squarex*unit) - 60
newy = bottom_corner[0] - (unit + squarey*unit)
return(newx, newy)
def coord_to_square(coord):
x = coord[0]
y = coord[1]
squarex = 1 + int(x/60)
squarey = 8 - int(y/60)
return (squarex, squarey)
def square_to_string(square):
x = str(square[0])
y = str(square[1])
return x+y
def string_to_square(string):
return (int(string[0]), int(string[1]))
class ChessBoard(wx.Frame):
def __init__(self, parent, title):
self.engine = subprocess.Popen('./carl.py',
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
bufsize=0, universal_newlines=True)
self.engine.stdin.write('{"command": "newGame", "humanCol": "white"}\n')
self.engine.stdin.flush()
self.activepiece = None
self.winner = None
self.ai_plays = 'black'
self.whose_turn = 'white'
self.filenames = {'wp': 'png/Chess_wpawn.png',
'bp': 'png/Chess_bpawn.png',
'wK': 'png/Chess_wking.png',
'bK': 'png/Chess_bking.png',
'wQ': 'png/Chess_wqueen.png',
'bQ': 'png/Chess_bqueen.png',
'wB': 'png/Chess_wbishop.png',
'bB': 'png/Chess_bbishop.png',
'wN': 'png/Chess_wknight.png',
'bN': 'png/Chess_bknight.png',
'wR': 'png/Chess_wrook.png',
'bR': 'png/Chess_brook.png'}
thread = threading.Thread(target=self.run)
thread.setDaemon(True)
thread.start()
size=(480,520)
wx.Frame.__init__(self, parent, title=title, size=size)
self.SetMinSize(size)
self.SetMaxSize(size)
# self.CreateStatusBar()
self.chess_panel = wx.Panel(self)
# menu:
filemenu = wx.Menu()
menuNewGameMP = filemenu.Append(wx.ID_ANY, "&New Game (Human vs Human)", "Clear the board and start again")
menuNewGameWhite = filemenu.Append(wx.ID_ANY, "&New Game (White vs Computer)", "Clear the board and start again")
menuNewGameBlack = filemenu.Append(wx.ID_ANY, "&New Game (Black vs Computer)", "Clear the board and start again")
menuAbout = filemenu.Append(wx.ID_ABOUT, "&About", "Information about this program")
menuUndo = filemenu.Append(wx.ID_ANY, "&Undo", "Go back one move")
menuAIMove = filemenu.Append(wx.ID_ANY, "&AI Move", "AI makes the best move at this position")
menuExit = filemenu.Append(wx.ID_EXIT, "E&xit", "Terminate the program")
menuBar = wx.MenuBar()
menuBar.Append(filemenu, "&File")
self.SetMenuBar(menuBar)
self.Bind(wx.EVT_MENU, self.OnAbout, menuAbout)
self.Bind(wx.EVT_MENU, self.OnExit, menuExit)
self.Bind(wx.EVT_MENU, self.OnNewGameMP, menuNewGameMP)
self.Bind(wx.EVT_MENU, self.OnNewGameWhite, menuNewGameWhite)
self.Bind(wx.EVT_MENU, self.OnNewGameBlack, menuNewGameBlack)
self.Bind(wx.EVT_MENU, self.OnRewind, menuUndo)
self.Bind(wx.EVT_MENU, self.OnAIMove, menuAIMove)
self.chess_panel.Bind(wx.EVT_LEFT_DOWN, self.SquareClick)
self.chess_panel.Bind(wx.EVT_RIGHT_DOWN, self.SquareRightClick)
self.Centre()
self.Show(True)
def run(self):
while True:
x = self.engine.stdout.readline()
print x
wx.CallAfter(self.process_engine, x)
def process_engine(self, jsonstring):
print "received json message from engine: " + jsonstring
try:
jsondict = json.loads(jsonstring)
except Exception as x:
print "error: %s" % x
sys.exit()
message = jsondict[u'message']
if message == 'boardState':
self.boardstate = jsondict['board']
if jsondict['winner'] == 'None':
self.whose_turn = jsondict['turn']
self.DrawBoard(self.boardstate)
else:
self.DrawBoard(self.boardstate)
self.winner = jsondict['winner']
dlg = wx.MessageDialog(self, "Game over: %s wins!" % self.winner.capitalize())
dlg.ShowModal()
dlg.Destroy()
self.DrawBoard(self.boardstate)
# dlg = wx.MessageDialog(self, "%s wins" % self.winner.capitalize())
# dlg.ShowModal()
# dlg.Destroy()
elif message == 'pieceMoves':
piece_pos = jsondict['piecePos']
movelist = jsondict['moves']
squares_to_highlight = [string_to_square(piece_pos)]
for square in movelist:
squares_to_highlight.append(string_to_square(square))
self.HighlightSquares(squares_to_highlight)
# self.HighlightMoves ?
# elif message == 'gameOver':
# self.winner = jsondict['winner']
# dlg = wx.MessageDialog(self, "Game over: %s wins!" % self.winner.capitalize())
# dlg.ShowModal()
# dlg.Destroy()
# self.Close(True)
#print "%s wins" % self.winner.capitalize()
elif message == 'console':
print jsondict['statement']
def HighlightSquares(self, squares):
self.DrawBoard(self.boardstate)
for square in squares:
dc = wx.PaintDC(self.chess_panel)
dc.SetPen(wx.Pen('#000000'))
r, c = square[0]-1, 8-square[1]
dc.SetBrush(wx.Brush('gold'))
# if (square[0] + square[1]) % 2 == 0:
# dc.SetBrush(wx.Brush('gold'))
# else:
# dc.SetBrush(wx.Brush('gold'))
dc.DrawRectangle(r*60, c*60, 60, 60)
piece = self.boardstate[str(square[0])+str(square[1])]
self.DrawPiece(square, piece)
def SquareClick(self, e):
if self.winner is None:
print "Click detected at %s" % e.GetPosition()
clickpos = e.GetPosition()
clicksquare = coord_to_square(clickpos)
active_turn = self.whose_turn[0]
piece = self.boardstate[square_to_string(clicksquare)]
if piece != 'None' and piece[0] == active_turn:
self.activepiece = clicksquare
jsonstring = '{"command": "getMoves", "piecePos": "%s"}\n' % square_to_string(clicksquare)
self.engine.stdin.write(jsonstring)
elif self.activepiece is not None:
jsonstring = '{"command": "sendMove", "piecePos": "%s", "newPos": "%s"}\n' % \
(square_to_string(self.activepiece), square_to_string(clicksquare))
print jsonstring
self.activepiece = None
self.DrawBoard(self.boardstate)
self.engine.stdin.write(jsonstring)
# ai makes a move:
#if self.whose_turn == self.ai_plays:
#self.engine.stdin.write('{"command": "makeBestMove"}\n')
else:
print "Click detected at %s, but the game is over" % e.GetPosition()
def SquareRightClick(self, e):
print "Click detected at %s" % e.GetPosition()
clickpos = e.GetPosition()
clicksquare = coord_to_square(clickpos)
self.SetStatusText(str(clicksquare))
piece = self.boardstate[square_to_string(clicksquare)]
if piece != 'None':
self.activepiece = clicksquare
jsonstring = '{"command": "getStats", "piecePos": "%s"}\n' % square_to_string(clicksquare)
# self.engine.stdin.write(jsonstring)
elif self.activepiece is not None:
jsonstring = '{"command": "sendMove", "piecePos": "%s", "newPos": "%s"}\n' % \
(square_to_string(self.activepiece), square_to_string(clicksquare))
print jsonstring
self.activepiece = None
self.DrawBoard(self.boardstate)
self.engine.stdin.write(jsonstring)
def DrawPiece(self, pos, piece):
if piece != 'None':
coord = square_to_coord(pos)
png = wx.Image(self.filenames[piece], wx.BITMAP_TYPE_ANY).ConvertToBitmap()
dc = wx.PaintDC(self.chess_panel)
dc.DrawBitmap(png, coord[0], coord[1], True)
def DrawBoard(self, boardstate):
dc = wx.PaintDC(self.chess_panel)
dc.SetPen(wx.Pen('#000000'))
for r in range(0, 8):
for c in range(0, 8):
if ((r+1)+(c+1)) % 2 == 0:
dc.SetBrush(wx.Brush('tan'))
else:
dc.SetBrush(wx.Brush('sienna'))
dc.DrawRectangle(r*60, c*60, 60, 60)
for x, y in boardstate:
piece = boardstate[x+y]
if piece != 'None':
pos = (int(x), int(y))
self.DrawPiece(pos, piece)
# non game methods:
def OnAbout(self, e):
dlg = wx.MessageDialog(self, "chess ai, routinely loses")
dlg.ShowModal()
dlg.Destroy()
self.DrawBoard(self.boardstate)
def OnExit(self, e):
self.Close(True)
def OnNewGameMP(self, e):
self.engine.stdin.write('{"command": "newGame", "humanCol": "both"}\n')
def OnNewGameWhite(self, e):
self.engine.stdin.write('{"command": "newGame", "humanCol": "white"}\n')
def OnNewGameBlack(self, e):
self.engine.stdin.write('{"command": "newGame", "humanCol": "black"}\n')
def OnRewind(self, e):
self.engine.stdin.write('{"command": "rewind"}\n')
def OnAIMove(self, e):
self.engine.stdin.write('{"command": "makeBestMove"}\n')
if __name__ == '__main__':
app = None
app = wx.App(False)
frame = ChessBoard(None, 'chess_gui')
print dir(frame)
app.MainLoop()
|
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single LSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd LSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using LSTM not BLSTM
e151
* Max pooling
"""
def set_subsample_target(net, epoch):
net.source.subsample_target = 4
net.source.input_padding = 5
net.generate_validation_data_and_set_shapes()
def exp_a(name):
# skip_prob = 0.7
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5, 5, 5, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=2000,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=25,
# include_diff=True,
input_padding=1,
subsample_target=2
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=1000,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=.1, clip_range=(-1, 1)),
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 2,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(25)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1 # pool over the time axis
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
],
layer_changes={
101: {
'remove_from': -3,
'callback': set_subsample_target,
'new_layers':
[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 2,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(1)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1 # pool over the time axis
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
},
201: {
'remove_from': -3,
'new_layers':
[
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(1),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
}
}
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
|
|
"""
# Written by Petru Paler, Uoti Urpala, Ross Cohen and John Hoffman
Taken from the Bittornado package, which is under the MIT license.
The original License.txt lists the copyright as follows:
Copyright (C) 2001-2002 Bram Cohen
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation files
(the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
The Software is provided "AS IS", without warranty of any kind,
express or implied, including but not limited to the warranties of
merchantability, fitness for a particular purpose and
noninfringement. In no event shall the authors or copyright holders
be liable for any claim, damages or other liability, whether in an
action of contract, tort or otherwise, arising from, out of or in
connection with the Software or the use or other dealings in the
Software.
"""
from types import IntType, LongType, StringType, ListType, TupleType, DictType
try:
from types import BooleanType
except ImportError:
BooleanType = None
try:
from types import UnicodeType
except ImportError:
UnicodeType = None
from cStringIO import StringIO
def decode_int(x, f):
f += 1
newf = x.index('e', f)
try:
n = int(x[f:newf])
except:
n = long(x[f:newf])
if x[f] == '-':
if x[f + 1] == '0':
raise ValueError
elif x[f] == '0' and newf != f+1:
raise ValueError
return (n, newf+1)
def decode_string(x, f):
colon = x.index(':', f)
try:
n = int(x[f:colon])
except (OverflowError, ValueError):
n = long(x[f:colon])
if x[f] == '0' and colon != f+1:
raise ValueError
colon += 1
return (x[colon:colon+n], colon+n)
def decode_unicode(x, f):
s, f = decode_string(x, f+1)
return (s.decode('UTF-8'),f)
def decode_list(x, f):
r, f = [], f+1
while x[f] != 'e':
v, f = decode_func[x[f]](x, f)
r.append(v)
return (r, f + 1)
def decode_dict(x, f):
r, f = {}, f+1
lastkey = None
while x[f] != 'e':
k, f = decode_string(x, f)
if lastkey >= k:
raise ValueError
lastkey = k
r[k], f = decode_func[x[f]](x, f)
return (r, f + 1)
decode_func = {}
decode_func['l'] = decode_list
decode_func['d'] = decode_dict
decode_func['i'] = decode_int
decode_func['0'] = decode_string
decode_func['1'] = decode_string
decode_func['2'] = decode_string
decode_func['3'] = decode_string
decode_func['4'] = decode_string
decode_func['5'] = decode_string
decode_func['6'] = decode_string
decode_func['7'] = decode_string
decode_func['8'] = decode_string
decode_func['9'] = decode_string
#decode_func['u'] = decode_unicode
def bdecode(x, sloppy = 0):
try:
r, l = decode_func[x[0]](x, 0)
# except (IndexError, KeyError):
except (IndexError, KeyError, ValueError):
raise ValueError, "bad bencoded data"
if not sloppy and l != len(x):
raise ValueError, "bad bencoded data"
return r
def test_bdecode():
try:
bdecode('0:0:')
assert 0
except ValueError:
pass
try:
bdecode('ie')
assert 0
except ValueError:
pass
try:
bdecode('i341foo382e')
assert 0
except ValueError:
pass
assert bdecode('i4e') == 4L
assert bdecode('i0e') == 0L
assert bdecode('i123456789e') == 123456789L
assert bdecode('i-10e') == -10L
try:
bdecode('i-0e')
assert 0
except ValueError:
pass
try:
bdecode('i123')
assert 0
except ValueError:
pass
try:
bdecode('')
assert 0
except ValueError:
pass
try:
bdecode('i6easd')
assert 0
except ValueError:
pass
try:
bdecode('35208734823ljdahflajhdf')
assert 0
except ValueError:
pass
try:
bdecode('2:abfdjslhfld')
assert 0
except ValueError:
pass
assert bdecode('0:') == ''
assert bdecode('3:abc') == 'abc'
assert bdecode('10:1234567890') == '1234567890'
try:
bdecode('02:xy')
assert 0
except ValueError:
pass
try:
bdecode('l')
assert 0
except ValueError:
pass
assert bdecode('le') == []
try:
bdecode('leanfdldjfh')
assert 0
except ValueError:
pass
assert bdecode('l0:0:0:e') == ['', '', '']
try:
bdecode('relwjhrlewjh')
assert 0
except ValueError:
pass
assert bdecode('li1ei2ei3ee') == [1, 2, 3]
assert bdecode('l3:asd2:xye') == ['asd', 'xy']
assert bdecode('ll5:Alice3:Bobeli2ei3eee') == [['Alice', 'Bob'], [2, 3]]
try:
bdecode('d')
assert 0
except ValueError:
pass
try:
bdecode('defoobar')
assert 0
except ValueError:
pass
assert bdecode('de') == {}
assert bdecode('d3:agei25e4:eyes4:bluee') == {'age': 25, 'eyes': 'blue'}
assert bdecode('d8:spam.mp3d6:author5:Alice6:lengthi100000eee') == {'spam.mp3': {'author': 'Alice', 'length': 100000}}
try:
bdecode('d3:fooe')
assert 0
except ValueError:
pass
try:
bdecode('di1e0:e')
assert 0
except ValueError:
pass
try:
bdecode('d1:b0:1:a0:e')
assert 0
except ValueError:
pass
try:
bdecode('d1:a0:1:a0:e')
assert 0
except ValueError:
pass
try:
bdecode('i03e')
assert 0
except ValueError:
pass
try:
bdecode('l01:ae')
assert 0
except ValueError:
pass
try:
bdecode('9999:x')
assert 0
except ValueError:
pass
try:
bdecode('l0:')
assert 0
except ValueError:
pass
try:
bdecode('d0:0:')
assert 0
except ValueError:
pass
try:
bdecode('d0:')
assert 0
except ValueError:
pass
bencached_marker = []
class Bencached:
def __init__(self, s):
self.marker = bencached_marker
self.bencoded = s
BencachedType = type(Bencached('')) # insufficient, but good as a filter
def encode_bencached(x,r):
assert x.marker == bencached_marker
r.append(x.bencoded)
def encode_int(x,r):
r.extend(('i',str(x),'e'))
def encode_bool(x,r):
encode_int(int(x),r)
def encode_string(x,r):
r.extend((str(len(x)),':',x))
def encode_unicode(x,r):
#r.append('u')
encode_string(x.encode('UTF-8'),r)
def encode_list(x,r):
r.append('l')
for e in x:
encode_func[type(e)](e, r)
r.append('e')
def encode_dict(x,r):
r.append('d')
ilist = x.items()
ilist.sort()
for k,v in ilist:
r.extend((str(len(k)),':',k))
encode_func[type(v)](v, r)
r.append('e')
encode_func = {}
encode_func[BencachedType] = encode_bencached
encode_func[IntType] = encode_int
encode_func[LongType] = encode_int
encode_func[StringType] = encode_string
encode_func[ListType] = encode_list
encode_func[TupleType] = encode_list
encode_func[DictType] = encode_dict
if BooleanType:
encode_func[BooleanType] = encode_bool
if UnicodeType:
encode_func[UnicodeType] = encode_unicode
def bencode(x):
r = []
try:
encode_func[type(x)](x, r)
except:
print "*** error *** could not encode type %s (value: %s)" % (type(x), x)
assert 0
return ''.join(r)
try:
import psyco
psyco.bind(bdecode)
psyco.bind(bencode)
except ImportError:
pass
|
|
from pathlib import Path
import platform
import warnings
import numpy as np
import pandas as pd
from pkg_resources import parse_version
import pytest
import pvlib
pvlib_base_version = \
parse_version(parse_version(pvlib.__version__).base_version)
# decorator takes one argument: the base version for which it should fail
# for example @fail_on_pvlib_version('0.7') will cause a test to fail
# on pvlib versions 0.7a, 0.7b, 0.7rc1, etc.
# test function may not take args, kwargs, or fixtures.
def fail_on_pvlib_version(version):
# second level of decorator takes the function under consideration
def wrapper(func):
# third level defers computation until the test is called
# this allows the specific test to fail at test runtime,
# rather than at decoration time (when the module is imported)
def inner():
# fail if the version is too high
if pvlib_base_version >= parse_version(version):
pytest.fail('the tested function is scheduled to be '
'removed in %s' % version)
# otherwise return the function to be executed
else:
return func()
return inner
return wrapper
# commonly used directories in the tests
TEST_DIR = Path(__file__).parent
DATA_DIR = TEST_DIR.parent / 'data'
# pytest-rerunfailures variables
RERUNS = 5
RERUNS_DELAY = 2
platform_is_windows = platform.system() == 'Windows'
skip_windows = pytest.mark.skipif(platform_is_windows,
reason='does not run on windows')
try:
import scipy
has_scipy = True
except ImportError:
has_scipy = False
requires_scipy = pytest.mark.skipif(not has_scipy, reason='requires scipy')
try:
import tables
has_tables = True
except ImportError:
has_tables = False
requires_tables = pytest.mark.skipif(not has_tables, reason='requires tables')
try:
import ephem
has_ephem = True
except ImportError:
has_ephem = False
requires_ephem = pytest.mark.skipif(not has_ephem, reason='requires ephem')
def numpy_1_10():
return parse_version(np.__version__) >= parse_version('1.10.0')
needs_numpy_1_10 = pytest.mark.skipif(
not numpy_1_10(), reason='requires numpy 1.10 or greater')
def pandas_0_22():
return parse_version(pd.__version__) >= parse_version('0.22.0')
needs_pandas_0_22 = pytest.mark.skipif(
not pandas_0_22(), reason='requires pandas 0.22 or greater')
def has_spa_c():
try:
from pvlib.spa_c_files.spa_py import spa_calc
except ImportError:
return False
else:
return True
requires_spa_c = pytest.mark.skipif(not has_spa_c(), reason="requires spa_c")
def has_numba():
try:
import numba
except ImportError:
return False
else:
vers = numba.__version__.split('.')
if int(vers[0] + vers[1]) < 17:
return False
else:
return True
requires_numba = pytest.mark.skipif(not has_numba(), reason="requires numba")
try:
import siphon
has_siphon = True
except ImportError:
has_siphon = False
requires_siphon = pytest.mark.skipif(not has_siphon,
reason='requires siphon')
try:
import netCDF4 # noqa: F401
has_netCDF4 = True
except ImportError:
has_netCDF4 = False
requires_netCDF4 = pytest.mark.skipif(not has_netCDF4,
reason='requires netCDF4')
try:
import pvfactors # noqa: F401
has_pvfactors = True
except ImportError:
has_pvfactors = False
requires_pvfactors = pytest.mark.skipif(not has_pvfactors,
reason='requires pvfactors')
try:
import PySAM # noqa: F401
has_pysam = True
except ImportError:
has_pysam = False
requires_pysam = pytest.mark.skipif(not has_pysam, reason="requires PySAM")
@pytest.fixture(scope="session")
def sam_data():
data = {}
with warnings.catch_warnings():
# ignore messages about duplicate entries in the databases.
warnings.simplefilter("ignore", UserWarning)
data['sandiamod'] = pvlib.pvsystem.retrieve_sam('sandiamod')
data['adrinverter'] = pvlib.pvsystem.retrieve_sam('adrinverter')
return data
@pytest.fixture(scope="function")
def pvsyst_module_params():
"""
Define some PVSyst module parameters for testing.
The scope of the fixture is set to ``'function'`` to allow tests to modify
parameters if required without affecting other tests.
"""
parameters = {
'gamma_ref': 1.05,
'mu_gamma': 0.001,
'I_L_ref': 6.0,
'I_o_ref': 5e-9,
'EgRef': 1.121,
'R_sh_ref': 300,
'R_sh_0': 1000,
'R_s': 0.5,
'R_sh_exp': 5.5,
'cells_in_series': 60,
'alpha_sc': 0.001,
}
return parameters
@pytest.fixture(scope='function')
def cec_inverter_parameters():
"""
Define some CEC inverter parameters for testing.
The scope of the fixture is set to ``'function'`` to allow tests to modify
parameters if required without affecting other tests.
"""
parameters = {
'Name': 'ABB: MICRO-0.25-I-OUTD-US-208 208V [CEC 2014]',
'Vac': 208.0,
'Paco': 250.0,
'Pdco': 259.5220505,
'Vdco': 40.24260317,
'Pso': 1.771614224,
'C0': -2.48e-5,
'C1': -9.01e-5,
'C2': 6.69e-4,
'C3': -0.0189,
'Pnt': 0.02,
'Vdcmax': 65.0,
'Idcmax': 10.0,
'Mppt_low': 20.0,
'Mppt_high': 50.0,
}
return parameters
@pytest.fixture(scope='function')
def cec_module_params():
"""
Define some CEC module parameters for testing.
The scope of the fixture is set to ``'function'`` to allow tests to modify
parameters if required without affecting other tests.
"""
parameters = {
'Name': 'Example Module',
'BIPV': 'Y',
'Date': '4/28/2008',
'T_NOCT': 65,
'A_c': 0.67,
'N_s': 18,
'I_sc_ref': 7.5,
'V_oc_ref': 10.4,
'I_mp_ref': 6.6,
'V_mp_ref': 8.4,
'alpha_sc': 0.003,
'beta_oc': -0.04,
'a_ref': 0.473,
'I_L_ref': 7.545,
'I_o_ref': 1.94e-09,
'R_s': 0.094,
'R_sh_ref': 15.72,
'Adjust': 10.6,
'gamma_r': -0.5,
'Version': 'MM105',
'PTC': 48.9,
'Technology': 'Multi-c-Si',
}
return parameters
@pytest.fixture(scope='function')
def cec_module_cs5p_220m():
"""
Define Canadian Solar CS5P-220M module parameters for testing.
The scope of the fixture is set to ``'function'`` to allow tests to modify
parameters if required without affecting other tests.
"""
parameters = {
'Name': 'Canadian Solar CS5P-220M',
'BIPV': 'N',
'Date': '10/5/2009',
'T_NOCT': 42.4,
'A_c': 1.7,
'N_s': 96,
'I_sc_ref': 5.1,
'V_oc_ref': 59.4,
'I_mp_ref': 4.69,
'V_mp_ref': 46.9,
'alpha_sc': 0.004539,
'beta_oc': -0.22216,
'a_ref': 2.6373,
'I_L_ref': 5.114,
'I_o_ref': 8.196e-10,
'R_s': 1.065,
'R_sh_ref': 381.68,
'Adjust': 8.7,
'gamma_r': -0.476,
'Version': 'MM106',
'PTC': 200.1,
'Technology': 'Mono-c-Si',
}
return parameters
@pytest.fixture(scope='function')
def cec_module_spr_e20_327():
"""
Define SunPower SPR-E20-327 module parameters for testing.
The scope of the fixture is set to ``'function'`` to allow tests to modify
parameters if required without affecting other tests.
"""
parameters = {
'Name': 'SunPower SPR-E20-327',
'BIPV': 'N',
'Date': '1/14/2013',
'T_NOCT': 46,
'A_c': 1.631,
'N_s': 96,
'I_sc_ref': 6.46,
'V_oc_ref': 65.1,
'I_mp_ref': 5.98,
'V_mp_ref': 54.7,
'alpha_sc': 0.004522,
'beta_oc': -0.23176,
'a_ref': 2.6868,
'I_L_ref': 6.468,
'I_o_ref': 1.88e-10,
'R_s': 0.37,
'R_sh_ref': 298.13,
'Adjust': -0.1862,
'gamma_r': -0.386,
'Version': 'NRELv1',
'PTC': 301.4,
'Technology': 'Mono-c-Si',
}
return parameters
@pytest.fixture(scope='function')
def cec_module_fs_495():
"""
Define First Solar FS-495 module parameters for testing.
The scope of the fixture is set to ``'function'`` to allow tests to modify
parameters if required without affecting other tests.
"""
parameters = {
'Name': 'First Solar FS-495',
'BIPV': 'N',
'Date': '9/18/2014',
'T_NOCT': 44.6,
'A_c': 0.72,
'N_s': 216,
'I_sc_ref': 1.55,
'V_oc_ref': 86.5,
'I_mp_ref': 1.4,
'V_mp_ref': 67.9,
'alpha_sc': 0.000924,
'beta_oc': -0.22741,
'a_ref': 2.9482,
'I_L_ref': 1.563,
'I_o_ref': 2.64e-13,
'R_s': 6.804,
'R_sh_ref': 806.27,
'Adjust': -10.65,
'gamma_r': -0.264,
'Version': 'NRELv1',
'PTC': 89.7,
'Technology': 'CdTe',
}
return parameters
@pytest.fixture(scope='function')
def sapm_temperature_cs5p_220m():
# SAPM temperature model parameters for Canadian_Solar_CS5P_220M
# (glass/polymer) in open rack
return {'a': -3.40641, 'b': -0.0842075, 'deltaT': 3}
@pytest.fixture(scope='function')
def sapm_module_params():
"""
Define SAPM model parameters for Canadian Solar CS5P 220M module.
The scope of the fixture is set to ``'function'`` to allow tests to modify
parameters if required without affecting other tests.
"""
parameters = {'Material': 'c-Si',
'Cells_in_Series': 96,
'Parallel_Strings': 1,
'A0': 0.928385,
'A1': 0.068093,
'A2': -0.0157738,
'A3': 0.0016606,
'A4': -6.93E-05,
'B0': 1,
'B1': -0.002438,
'B2': 0.0003103,
'B3': -0.00001246,
'B4': 2.11E-07,
'B5': -1.36E-09,
'C0': 1.01284,
'C1': -0.0128398,
'C2': 0.279317,
'C3': -7.24463,
'C4': 0.996446,
'C5': 0.003554,
'C6': 1.15535,
'C7': -0.155353,
'Isco': 5.09115,
'Impo': 4.54629,
'Voco': 59.2608,
'Vmpo': 48.3156,
'Aisc': 0.000397,
'Aimp': 0.000181,
'Bvoco': -0.21696,
'Mbvoc': 0.0,
'Bvmpo': -0.235488,
'Mbvmp': 0.0,
'N': 1.4032,
'IXO': 4.97599,
'IXXO': 3.18803,
'FD': 1}
return parameters
|
|
# Copyright (c) 2014 Andrew Kerr. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Mock unit tests for the NetApp cmode nfs storage driver
"""
import ddt
import mock
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_service import loopingcall
from oslo_utils import units
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
fake_api as netapp_api)
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder.tests.unit.volume.drivers.netapp import fakes as na_fakes
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap import nfs_cmode
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume.drivers import nfs
from cinder.volume import utils as volume_utils
@ddt.ddt
class NetAppCmodeNfsDriverTestCase(test.TestCase):
def setUp(self):
super(NetAppCmodeNfsDriverTestCase, self).setUp()
kwargs = {'configuration': self.get_config_cmode()}
with mock.patch.object(utils, 'get_root_helper',
return_value=mock.Mock()):
with mock.patch.object(remotefs_brick, 'RemoteFsClient',
return_value=mock.Mock()):
self.driver = nfs_cmode.NetAppCmodeNfsDriver(**kwargs)
self.driver._mounted_shares = [fake.NFS_SHARE]
self.driver.ssc_vols = True
self.driver.vserver = fake.VSERVER_NAME
self.driver.ssc_enabled = True
def get_config_cmode(self):
config = na_fakes.create_configuration_cmode()
config.netapp_storage_protocol = 'nfs'
config.netapp_login = 'admin'
config.netapp_password = 'pass'
config.netapp_server_hostname = '127.0.0.1'
config.netapp_transport_type = 'http'
config.netapp_server_port = '80'
config.netapp_vserver = fake.VSERVER_NAME
return config
@mock.patch.object(client_cmode, 'Client', mock.Mock())
@mock.patch.object(nfs.NfsDriver, 'do_setup')
@mock.patch.object(na_utils, 'check_flags')
def test_do_setup(self, mock_check_flags, mock_super_do_setup):
self.driver.do_setup(mock.Mock())
self.assertTrue(mock_check_flags.called)
self.assertTrue(mock_super_do_setup.called)
@ddt.data({'thin': True, 'nfs_sparsed_volumes': True},
{'thin': True, 'nfs_sparsed_volumes': False},
{'thin': False, 'nfs_sparsed_volumes': True},
{'thin': False, 'nfs_sparsed_volumes': False})
@ddt.unpack
def test_get_pool_stats(self, thin, nfs_sparsed_volumes):
class test_volume(object):
pass
test_volume = test_volume()
test_volume.id = {'vserver': 'openstack', 'name': 'vola'}
test_volume.aggr = {
'disk_type': 'SSD',
'ha_policy': 'cfo',
'junction': '/vola',
'name': 'aggr1',
'raid_type': 'raiddp',
}
test_volume.export = {'path': fake.NFS_SHARE}
test_volume.sis = {'dedup': False, 'compression': False}
test_volume.state = {
'status': 'online',
'vserver_root': False,
'junction_active': True,
}
test_volume.qos = {'qos_policy_group': None}
ssc_map = {
'mirrored': {},
'dedup': {},
'compression': {},
'thin': {test_volume if thin else None},
'all': [test_volume],
}
self.driver.ssc_vols = ssc_map
self.driver.configuration.nfs_sparsed_volumes = nfs_sparsed_volumes
netapp_thin = 'true' if thin else 'false'
netapp_thick = 'false' if thin else 'true'
thick = not thin and not nfs_sparsed_volumes
total_capacity_gb = na_utils.round_down(
fake.TOTAL_BYTES / units.Gi, '0.01')
free_capacity_gb = na_utils.round_down(
fake.AVAILABLE_BYTES / units.Gi, '0.01')
provisioned_capacity_gb = total_capacity_gb - free_capacity_gb
capacity = {
'reserved_percentage': fake.RESERVED_PERCENTAGE,
'max_over_subscription_ratio': fake.MAX_OVER_SUBSCRIPTION_RATIO,
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'provisioned_capacity_gb': provisioned_capacity_gb,
}
self.mock_object(self.driver,
'_get_share_capacity_info',
mock.Mock(return_value=capacity))
result = self.driver._get_pool_stats()
expected = [{'pool_name': '192.168.99.24:/fake/export/path',
'netapp_unmirrored': 'true',
'QoS_support': True,
'thick_provisioning_support': thick,
'netapp_thick_provisioned': netapp_thick,
'netapp_nocompression': 'true',
'thin_provisioning_support': not thick,
'free_capacity_gb': 12.0,
'netapp_thin_provisioned': netapp_thin,
'total_capacity_gb': 4468.0,
'netapp_compression': 'false',
'netapp_mirrored': 'false',
'netapp_dedup': 'false',
'reserved_percentage': 7,
'netapp_raid_type': 'raiddp',
'netapp_disk_type': 'SSD',
'netapp_nodedup': 'true',
'reserved_percentage': 7,
'max_over_subscription_ratio': 19.0,
'provisioned_capacity_gb': 4456.0}]
self.assertEqual(expected, result)
def test_check_for_setup_error(self):
super_check_for_setup_error = self.mock_object(
nfs_base.NetAppNfsDriver, 'check_for_setup_error')
mock_check_ssc_api_permissions = self.mock_object(
ssc_cmode, 'check_ssc_api_permissions')
mock_start_periodic_tasks = self.mock_object(
self.driver, '_start_periodic_tasks')
self.driver.zapi_client = mock.Mock()
self.driver.check_for_setup_error()
self.assertEqual(1, super_check_for_setup_error.call_count)
mock_check_ssc_api_permissions.assert_called_once_with(
self.driver.zapi_client)
self.assertEqual(1, mock_start_periodic_tasks.call_count)
def test_delete_volume(self):
fake_provider_location = 'fake_provider_location'
fake_volume = {'name': 'fake_name',
'provider_location': 'fake_provider_location'}
fake_qos_policy_group_info = {'legacy': None, 'spec': None}
self.mock_object(nfs_base.NetAppNfsDriver, 'delete_volume')
self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
mock.Mock(return_value=fake_qos_policy_group_info))
self.mock_object(self.driver, '_post_prov_deprov_in_ssc')
self.driver.zapi_client = mock.Mock()
self.driver.delete_volume(fake_volume)
nfs_base.NetAppNfsDriver.delete_volume.assert_called_once_with(
fake_volume)
self.driver.zapi_client.mark_qos_policy_group_for_deletion\
.assert_called_once_with(fake_qos_policy_group_info)
self.driver._post_prov_deprov_in_ssc.assert_called_once_with(
fake_provider_location)
def test_delete_volume_get_qos_info_exception(self):
fake_provider_location = 'fake_provider_location'
fake_volume = {'name': 'fake_name',
'provider_location': 'fake_provider_location'}
self.mock_object(nfs_base.NetAppNfsDriver, 'delete_volume')
self.mock_object(na_utils, 'get_valid_qos_policy_group_info',
mock.Mock(side_effect=exception.Invalid))
self.mock_object(self.driver, '_post_prov_deprov_in_ssc')
self.driver.delete_volume(fake_volume)
nfs_base.NetAppNfsDriver.delete_volume.assert_called_once_with(
fake_volume)
self.driver._post_prov_deprov_in_ssc.assert_called_once_with(
fake_provider_location)
def test_do_qos_for_volume_no_exception(self):
mock_get_info = self.mock_object(na_utils,
'get_valid_qos_policy_group_info')
mock_get_info.return_value = fake.QOS_POLICY_GROUP_INFO
self.driver.zapi_client = mock.Mock()
mock_provision_qos = self.driver.zapi_client.provision_qos_policy_group
mock_set_policy = self.mock_object(self.driver,
'_set_qos_policy_group_on_volume')
mock_error_log = self.mock_object(nfs_cmode.LOG, 'error')
mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug')
mock_cleanup = self.mock_object(self.driver,
'_cleanup_volume_on_failure')
self.driver._do_qos_for_volume(fake.NFS_VOLUME, fake.EXTRA_SPECS)
mock_get_info.assert_has_calls([
mock.call(fake.NFS_VOLUME, fake.EXTRA_SPECS)])
mock_provision_qos.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_INFO)])
mock_set_policy.assert_has_calls([
mock.call(fake.NFS_VOLUME, fake.QOS_POLICY_GROUP_INFO)])
self.assertEqual(0, mock_error_log.call_count)
self.assertEqual(0, mock_debug_log.call_count)
self.assertEqual(0, mock_cleanup.call_count)
def test_do_qos_for_volume_exception_w_cleanup(self):
mock_get_info = self.mock_object(na_utils,
'get_valid_qos_policy_group_info')
mock_get_info.return_value = fake.QOS_POLICY_GROUP_INFO
self.driver.zapi_client = mock.Mock()
mock_provision_qos = self.driver.zapi_client.provision_qos_policy_group
mock_set_policy = self.mock_object(self.driver,
'_set_qos_policy_group_on_volume')
mock_set_policy.side_effect = netapp_api.NaApiError
mock_error_log = self.mock_object(nfs_cmode.LOG, 'error')
mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug')
mock_cleanup = self.mock_object(self.driver,
'_cleanup_volume_on_failure')
self.assertRaises(netapp_api.NaApiError,
self.driver._do_qos_for_volume,
fake.NFS_VOLUME,
fake.EXTRA_SPECS)
mock_get_info.assert_has_calls([
mock.call(fake.NFS_VOLUME, fake.EXTRA_SPECS)])
mock_provision_qos.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_INFO)])
mock_set_policy.assert_has_calls([
mock.call(fake.NFS_VOLUME, fake.QOS_POLICY_GROUP_INFO)])
self.assertEqual(1, mock_error_log.call_count)
self.assertEqual(1, mock_debug_log.call_count)
mock_cleanup.assert_has_calls([
mock.call(fake.NFS_VOLUME)])
def test_do_qos_for_volume_exception_no_cleanup(self):
mock_get_info = self.mock_object(na_utils,
'get_valid_qos_policy_group_info')
mock_get_info.side_effect = exception.Invalid
self.driver.zapi_client = mock.Mock()
mock_provision_qos = self.driver.zapi_client.provision_qos_policy_group
mock_set_policy = self.mock_object(self.driver,
'_set_qos_policy_group_on_volume')
mock_error_log = self.mock_object(nfs_cmode.LOG, 'error')
mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug')
mock_cleanup = self.mock_object(self.driver,
'_cleanup_volume_on_failure')
self.assertRaises(exception.Invalid, self.driver._do_qos_for_volume,
fake.NFS_VOLUME, fake.EXTRA_SPECS, cleanup=False)
mock_get_info.assert_has_calls([
mock.call(fake.NFS_VOLUME, fake.EXTRA_SPECS)])
self.assertEqual(0, mock_provision_qos.call_count)
self.assertEqual(0, mock_set_policy.call_count)
self.assertEqual(1, mock_error_log.call_count)
self.assertEqual(0, mock_debug_log.call_count)
self.assertEqual(0, mock_cleanup.call_count)
def test_set_qos_policy_group_on_volume(self):
mock_get_name_from_info = self.mock_object(
na_utils, 'get_qos_policy_group_name_from_info')
mock_get_name_from_info.return_value = fake.QOS_POLICY_GROUP_NAME
mock_extract_host = self.mock_object(volume_utils, 'extract_host')
mock_extract_host.return_value = fake.NFS_SHARE
self.driver.zapi_client = mock.Mock()
mock_get_flex_vol_name =\
self.driver.zapi_client.get_vol_by_junc_vserver
mock_get_flex_vol_name.return_value = fake.FLEXVOL
mock_file_assign_qos = self.driver.zapi_client.file_assign_qos
self.driver._set_qos_policy_group_on_volume(fake.NFS_VOLUME,
fake.QOS_POLICY_GROUP_INFO)
mock_get_name_from_info.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_INFO)])
mock_extract_host.assert_has_calls([
mock.call(fake.NFS_HOST_STRING, level='pool')])
mock_get_flex_vol_name.assert_has_calls([
mock.call(fake.VSERVER_NAME, fake.EXPORT_PATH)])
mock_file_assign_qos.assert_has_calls([
mock.call(fake.FLEXVOL, fake.QOS_POLICY_GROUP_NAME,
fake.NFS_VOLUME['name'])])
def test_set_qos_policy_group_on_volume_no_info(self):
mock_get_name_from_info = self.mock_object(
na_utils, 'get_qos_policy_group_name_from_info')
mock_extract_host = self.mock_object(volume_utils, 'extract_host')
self.driver.zapi_client = mock.Mock()
mock_get_flex_vol_name =\
self.driver.zapi_client.get_vol_by_junc_vserver
mock_file_assign_qos = self.driver.zapi_client.file_assign_qos
self.driver._set_qos_policy_group_on_volume(fake.NFS_VOLUME,
None)
self.assertEqual(0, mock_get_name_from_info.call_count)
self.assertEqual(0, mock_extract_host.call_count)
self.assertEqual(0, mock_get_flex_vol_name.call_count)
self.assertEqual(0, mock_file_assign_qos.call_count)
def test_set_qos_policy_group_on_volume_no_name(self):
mock_get_name_from_info = self.mock_object(
na_utils, 'get_qos_policy_group_name_from_info')
mock_get_name_from_info.return_value = None
mock_extract_host = self.mock_object(volume_utils, 'extract_host')
self.driver.zapi_client = mock.Mock()
mock_get_flex_vol_name =\
self.driver.zapi_client.get_vol_by_junc_vserver
mock_file_assign_qos = self.driver.zapi_client.file_assign_qos
self.driver._set_qos_policy_group_on_volume(fake.NFS_VOLUME,
fake.QOS_POLICY_GROUP_INFO)
mock_get_name_from_info.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_INFO)])
self.assertEqual(0, mock_extract_host.call_count)
self.assertEqual(0, mock_get_flex_vol_name.call_count)
self.assertEqual(0, mock_file_assign_qos.call_count)
def test_unmanage(self):
mock_get_info = self.mock_object(na_utils,
'get_valid_qos_policy_group_info')
mock_get_info.return_value = fake.QOS_POLICY_GROUP_INFO
self.driver.zapi_client = mock.Mock()
mock_mark_for_deletion =\
self.driver.zapi_client.mark_qos_policy_group_for_deletion
super_unmanage = self.mock_object(nfs_base.NetAppNfsDriver, 'unmanage')
self.driver.unmanage(fake.NFS_VOLUME)
mock_get_info.assert_has_calls([mock.call(fake.NFS_VOLUME)])
mock_mark_for_deletion.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_INFO)])
super_unmanage.assert_has_calls([mock.call(fake.NFS_VOLUME)])
def test_unmanage_invalid_qos(self):
mock_get_info = self.mock_object(na_utils,
'get_valid_qos_policy_group_info')
mock_get_info.side_effect = exception.Invalid
super_unmanage = self.mock_object(nfs_base.NetAppNfsDriver, 'unmanage')
self.driver.unmanage(fake.NFS_VOLUME)
mock_get_info.assert_has_calls([mock.call(fake.NFS_VOLUME)])
super_unmanage.assert_has_calls([mock.call(fake.NFS_VOLUME)])
def test_create_volume(self):
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(na_utils, 'get_volume_extra_specs')
self.mock_object(self.driver, '_do_create_volume')
self.mock_object(self.driver, '_do_qos_for_volume')
update_ssc = self.mock_object(self.driver, '_update_stale_vols')
self.mock_object(self.driver, '_get_vol_for_share')
expected = {'provider_location': fake.NFS_SHARE}
result = self.driver.create_volume(fake.NFS_VOLUME)
self.assertEqual(expected, result)
self.assertEqual(1, update_ssc.call_count)
def test_create_volume_exception(self):
self.mock_object(self.driver, '_ensure_shares_mounted')
self.mock_object(na_utils, 'get_volume_extra_specs')
mock_create = self.mock_object(self.driver, '_do_create_volume')
mock_create.side_effect = Exception
update_ssc = self.mock_object(self.driver, '_update_stale_vols')
self.mock_object(self.driver, '_get_vol_for_share')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume,
fake.NFS_VOLUME)
self.assertEqual(1, update_ssc.call_count)
def test_start_periodic_tasks(self):
self.driver.zapi_client = mock.Mock()
mock_remove_unused_qos_policy_groups = self.mock_object(
self.driver.zapi_client,
'remove_unused_qos_policy_groups')
harvest_qos_periodic_task = mock.Mock()
mock_loopingcall = self.mock_object(
loopingcall,
'FixedIntervalLoopingCall',
mock.Mock(side_effect=[harvest_qos_periodic_task]))
self.driver._start_periodic_tasks()
mock_loopingcall.assert_has_calls([
mock.call(mock_remove_unused_qos_policy_groups)])
self.assertTrue(harvest_qos_periodic_task.start.called)
@ddt.data(
{'space': True, 'ssc': True, 'match': True, 'expected': True},
{'space': True, 'ssc': True, 'match': False, 'expected': False},
{'space': True, 'ssc': False, 'match': True, 'expected': True},
{'space': True, 'ssc': False, 'match': False, 'expected': True},
{'space': False, 'ssc': True, 'match': True, 'expected': False},
{'space': False, 'ssc': True, 'match': False, 'expected': False},
{'space': False, 'ssc': False, 'match': True, 'expected': False},
{'space': False, 'ssc': False, 'match': False, 'expected': False},
)
@ddt.unpack
@mock.patch.object(nfs_cmode.NetAppCmodeNfsDriver,
'_is_share_vol_type_match')
@mock.patch.object(nfs_cmode.NetAppCmodeNfsDriver,
'_share_has_space_for_clone')
@mock.patch.object(nfs_cmode.NetAppCmodeNfsDriver,
'_is_volume_thin_provisioned')
def test_is_share_clone_compatible(self,
mock_is_volume_thin_provisioned,
mock_share_has_space_for_clone,
mock_is_share_vol_type_match,
space, ssc, match, expected):
mock_share_has_space_for_clone.return_value = space
mock_is_share_vol_type_match.return_value = match
with mock.patch.object(self.driver, 'ssc_enabled', ssc):
result = self.driver._is_share_clone_compatible(fake.VOLUME,
fake.NFS_SHARE)
self.assertEqual(expected, result)
@ddt.data(
{'sparsed': True, 'ssc': True, 'vol_thin': True, 'expected': True},
{'sparsed': True, 'ssc': True, 'vol_thin': False, 'expected': True},
{'sparsed': True, 'ssc': False, 'vol_thin': True, 'expected': True},
{'sparsed': True, 'ssc': False, 'vol_thin': False, 'expected': True},
{'sparsed': False, 'ssc': True, 'vol_thin': True, 'expected': True},
{'sparsed': False, 'ssc': True, 'vol_thin': False, 'expected': False},
{'sparsed': False, 'ssc': False, 'vol_thin': True, 'expected': False},
{'sparsed': False, 'ssc': False, 'vol_thin': False, 'expected': False},
)
@ddt.unpack
def test_is_volume_thin_provisioned(
self, sparsed, ssc, vol_thin, expected):
fake_volume = object()
ssc_vols = {'thin': {fake_volume if vol_thin else None}}
with mock.patch.object(self.driver, 'ssc_enabled', ssc):
with mock.patch.object(self.driver, 'ssc_vols', ssc_vols):
with mock.patch.object(self.driver.configuration,
'nfs_sparsed_volumes',
sparsed):
result = self.driver._is_volume_thin_provisioned(
fake_volume)
self.assertEqual(expected, result)
|
|
import argparse
import sys
import math
import pint
import operator
from silhouette import *
def log(msg):
ts = time.strftime("%c")
msg = "%s] %s" % (ts, msg)
sys.stdout.write(msg)
sys.stdout.flush()
class Worker(object):
def __init__(self):
self._cutter = None
@property
def cutter(self):
if self._cutter == None:
self._cutter = Silhouette()
self._cutter.connect()
self._cutter.speed = 1
self._cutter.home()
return self._cutter
def cut_circle(self, **kw):
points = circle(**kw)
points = list(points)
self.cutter.position = points[0]
self.cutter.draw(points)
def iter_cut(self, **kw):
cnt = 0
kwstr = 'Cutting circle:\n radius: %(radius)s, steps: %(steps)s, center: (%(center_x)s, %(center_y)s)' % kw
msg = "%s\n" % kw
log(msg)
reps = kw.get("repeat", 1)
minp = kw.get("min_pressure", 1)
maxp = kw.get("max_pressure", 1)
total_cycles = ((maxp + 1) - minp) * reps
phase_step = (2 * math.pi) / total_cycles
for pressure in range(minp, maxp + 1):
for rep in range(reps):
#kw["phase"] = phase_step * cnt
kw["phase"] = 0
msg = "Run #%d/%d, repeat #%d/%d, pressure: %d, phase: %0.2f degs\n" % (cnt + 1, total_cycles, rep + 1, reps, pressure, math.degrees(kw["phase"]))
log(msg)
self.cutter.pressure = pressure
self.cut_circle(**kw)
cnt += 1
class CircleMode(object):
boiler_plate = {
"steps": 500,
"repeat": 2,
"min_pressure": 1,
"max_pressure": 10,
}
def __init__(self, args):
self.args = args
self.worker = Worker()
def run_circle(self, kw):
self.worker.iter_cut(**kw)
def run(self):
rmap = [(x, unit(y)) for (x, y) in self.radius_map.items()]
func = operator.itemgetter(1)
rmap.sort(key=func)
for (name, radius) in rmap:
kw = self.boiler_plate.copy()
kw.update(self.args)
kw["radius"] = radius
self.run_circle(kw)
self.worker.cutter.home()
class Membrane(CircleMode):
radius_map = {
"outer": "19mm",
}
class MembraneOring(CircleMode):
radius_map = {
"outer": "19mm",
"inner": "15mm",
}
class ValveSeal(CircleMode):
radius_map = {
"outer": "10mm",
"inner": "8mm",
}
class InletSeal54(CircleMode):
radius_map = {
"outer": "7.5mm",
"inner": "2.5mm",
}
class InletSeal72(CircleMode):
radius_map = {
"outer": "7.5mm",
"inner": "3mm",
}
class OutletSeal72(CircleMode):
radius_map = {
"outer": "10mm",
"inner": "4mm",
}
class InSealValve(CircleMode):
radius_map = {
"outer": "7.75mm",
"inner": "2mm",
}
class InnerInSeal(CircleMode):
radius_map = {
"outer": "10mm",
"inner": "4mm",
}
class StretchMembrane(CircleMode):
#original_radius = 58
original_radius = 34
# first membrane, seems to stretch out still
stretch = 15
#stretch = 25
margin = 20
hole_radius = original_radius - stretch
outer_radius = hole_radius + margin
post_radius = 1.5
post_count = 16
radius_map = {
"outer": str(outer_radius) + "mm",
"post": str(post_radius) + "mm",
"wreath": str(hole_radius) + "mm",
}
def posts(self):
astep = math.pi * 2 / self.post_count
for idx in range(self.post_count):
x = math.cos(astep * idx) * self.hole_radius
y = math.sin(astep * idx) * self.hole_radius
yield (x, y)
def run(self):
cx = unit(self.args["center_x"])
cy = unit(self.args["center_y"])
kw = self.boiler_plate.copy()
kw["radius"] = unit(self.radius_map["post"])
kw.update(self.args)
#
for (x, y) in self.posts():
kw["center_x"] = unit(x, unit="mm") + cx
kw["center_y"] = unit(y, unit="mm") + cy
self.run_circle(kw)
# outer radius
kw["radius"] = unit(self.radius_map["outer"])
kw["center_x"] = cx
kw["center_y"] = cy
self.run_circle(kw)
self.worker.cutter.home()
class Gasket(CircleMode):
radius_map = {
"outer": "19.5mm",
"inner": "10.0mm",
"lughole": "2.0mm",
}
spoke_radius = "15.5mm"
lug_holes = 3
def run(self):
cx = unit(self.args["center_x"])
cy = unit(self.args["center_y"])
kw = self.boiler_plate.copy()
kw["radius"] = unit(self.radius_map["lughole"])
kw.update(self.args)
# lug holes
astep = (2.0 * math.pi) / self.lug_holes
radius = unit(self.spoke_radius)
for step in range(self.lug_holes):
x = math.cos(astep * step) * radius + cx
y = math.sin(astep * step) * radius + cy
kw["center_x"] = x
kw["center_y"] = y
self.run_circle(kw)
# inner radius
kw["radius"] = self.radius_map["inner"]
kw["center_x"] = cx
kw["center_y"] = cy
self.run_circle(kw)
# outer radius
kw["radius"] = self.radius_map["outer"]
kw["center_x"] = cx
kw["center_y"] = cy
self.run_circle(kw)
self.worker.cutter.home()
Modes = [mode for mode in globals().values() if (type(mode) == type) and issubclass(mode, CircleMode)]
def run_pattern(args, home=True):
w = Worker()
w.iter_cut(**args)
if home:
w.cutter.home()
def run_mode(mode, args, center=None):
mmap = {m.__name__.lower(): m for m in Modes}
mode = mode.lower()
_mode = mmap.get(mode, None)
if not _mode:
raise KeyError, "Mode must be one of %s, not %s" % (str.join(', ', mmap), mode)
mode = _mode(args)
mode.run()
def cli():
Defaults = {
"center_x": 0,
"center_y": 0,
"steps": 100,
}
parser = argparse.ArgumentParser(description='Cut/draw circles')
parser.add_argument('-r', '--radius', type=str, help='Radius of circle (1.2in, 3mm, etc)')
parser.add_argument('-x', '--center-x', type=str, help='X center of circle (1.2in, 3mm, etc)')
parser.add_argument('-y', '--center-y', type=str, help='Y center of circle (1.2in, 3mm, etc)')
parser.add_argument('-s', '--steps', type=int, help='The number of points used to draw the circle (resolution)')
parser.add_argument('-p', '--min-pressure', type=int, help='Min pressure of knife (1-33)')
parser.add_argument('-P', '--max-pressure', type=int, help='Max pressure of knife (1-33)')
parser.add_argument('-R', '--repeat', type=int, help='How many times to repeat the cut')
parser.add_argument('-m', '--mode', type=str, help='Use a predefined mode (see this code)')
parser.set_defaults(**Defaults)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = cli()
args = args.__dict__.copy()
# wash args
goodkeys = ("radius", "steps", "repeat", "min_pressure", "max_pressure", "center_x", "center_y")
_args = {k: v for (k, v) in args.items() if (k in goodkeys) and (v != None)}
if args["mode"]:
run_mode(args["mode"], _args)
else:
run_pattern(_args)
|
|
# -*- coding: utf-8 -*-
"""
celery.worker.hub
~~~~~~~~~~~~~~~~~
Event-loop implementation.
"""
from __future__ import absolute_import
from kombu.utils import cached_property
from kombu.utils import eventio
from celery.utils.timer2 import Schedule
READ, WRITE, ERR = eventio.READ, eventio.WRITE, eventio.ERR
class BoundedSemaphore(object):
"""Asynchronous Bounded Semaphore.
Bounded means that the value will stay within the specified
range even if it is released more times than it was acquired.
This type is *not thread safe*.
Example:
>>> x = BoundedSemaphore(2)
>>> def callback(i):
... print('HELLO %r' % i)
>>> x.acquire(callback, 1)
HELLO 1
>>> x.acquire(callback, 2)
HELLO 2
>>> x.acquire(callback, 3)
>>> x._waiters # private, do not access directly
[(callback, 3)]
>>> x.release()
HELLO 3
"""
def __init__(self, value):
self.initial_value = self.value = value
self._waiting = []
def acquire(self, callback, *partial_args):
"""Acquire semaphore, applying ``callback`` when
the semaphore is ready.
:param callback: The callback to apply.
:param \*partial_args: partial arguments to callback.
"""
if self.value <= 0:
self._waiting.append((callback, partial_args))
return False
else:
self.value = max(self.value - 1, 0)
callback(*partial_args)
return True
def release(self):
"""Release semaphore.
This will apply any waiting callbacks from previous
calls to :meth:`acquire` done when the semaphore was busy.
"""
self.value = min(self.value + 1, self.initial_value)
if self._waiting:
waiter, args = self._waiting.pop()
waiter(*args)
def grow(self, n=1):
"""Change the size of the semaphore to hold more values."""
self.initial_value += n
self.value += n
[self.release() for _ in xrange(n)]
def shrink(self, n=1):
"""Change the size of the semaphore to hold less values."""
self.initial_value = max(self.initial_value - n, 0)
self.value = max(self.value - n, 0)
def clear(self):
"""Reset the sempahore, including wiping out any waiting callbacks."""
self._waiting[:] = []
self.value = self.initial_value
class Hub(object):
"""Event loop object.
:keyword timer: Specify custom :class:`~celery.utils.timer2.Schedule`.
"""
#: Flag set if reading from an fd will not block.
READ = READ
#: Flag set if writing to an fd will not block.
WRITE = WRITE
#: Flag set on error, and the fd should be read from asap.
ERR = ERR
#: List of callbacks to be called when the loop is initialized,
#: applied with the hub instance as sole argument.
on_init = None
#: List of callbacks to be called when the loop is exiting,
#: applied with the hub instance as sole argument.
on_close = None
#: List of callbacks to be called when a task is received.
#: Takes no arguments.
on_task = None
def __init__(self, timer=None):
self.timer = Schedule() if timer is None else timer
self.readers = {}
self.writers = {}
self.on_init = []
self.on_close = []
self.on_task = []
def start(self):
"""Called by StartStopComponent at worker startup."""
self.poller = eventio.poll()
def stop(self):
"""Called by StartStopComponent at worker shutdown."""
self.poller.close()
def init(self):
for callback in self.on_init:
callback(self)
def fire_timers(self, min_delay=1, max_delay=10, max_timers=10):
delay = None
if self.timer._queue:
for i in xrange(max_timers):
delay, entry = self.scheduler.next()
if entry is None:
break
self.timer.apply_entry(entry)
return min(max(delay, min_delay), max_delay)
def add(self, fd, callback, flags):
self.poller.register(fd, flags)
if not isinstance(fd, int):
fd = fd.fileno()
if flags & READ:
self.readers[fd] = callback
if flags & WRITE:
self.writers[fd] = callback
def add_reader(self, fd, callback):
return self.add(fd, callback, READ | ERR)
def add_writer(self, fd, callback):
return self.add(fd, callback, WRITE)
def update_readers(self, map):
[self.add_reader(*x) for x in map.iteritems()]
def update_writers(self, map):
[self.add_writer(*x) for x in map.iteritems()]
def _unregister(self, fd):
try:
self.poller.unregister(fd)
except (KeyError, OSError):
pass
def remove(self, fd):
fileno = fd.fileno() if not isinstance(fd, int) else fd
self.readers.pop(fileno, None)
self.writers.pop(fileno, None)
self._unregister(fd)
def __enter__(self):
self.init()
return self
def close(self, *args):
[self._unregister(fd) for fd in self.readers]
self.readers.clear()
[self._unregister(fd) for fd in self.writers]
self.writers.clear()
for callback in self.on_close:
callback(self)
__exit__ = close
@cached_property
def scheduler(self):
return iter(self.timer)
class DummyLock(object):
"""Pretending to be a lock."""
def __enter__(self):
return self
def __exit__(self, *exc_info):
pass
|
|
"""
GCL -- Generic Configuration Language
See README.md for an explanation of GCL and concepts.
"""
import functools
from os import path
import pyparsing as p
from . import functions
__version__ = '0.4.14'
class GCLError(RuntimeError):
pass
class ParseError(GCLError):
pass
class EvaluationError(GCLError):
def __init__(self, message, inner=None):
super(EvaluationError, self).__init__(message)
self.inner = inner
def __str__(self):
return self.args[0] + ('\n' + str(self.inner) if self.inner else '')
def do(*fns):
def fg(args):
for fn in fns:
args = fn(args)
return args
return fg
def doapply(what):
def fn(args):
return what(*args)
return fn
def head(x):
return x[0]
def second(x):
return x[1]
def inner(x):
return x[1:-1]
def mkBool(s):
return True if s == 'true' else False
def drop(x):
return []
def find_relative(current_dir, rel_path):
if rel_path.startswith('/'):
return rel_path
else:
return path.normpath(path.join(current_dir, rel_path))
class Cache(object):
def __init__(self):
self._cache = {}
def get(self, key, thunk):
if key not in self._cache:
self._cache[key] = thunk()
return self._cache[key]
class Activation(object):
def __init__(self, stack, key):
self.stack = stack
self.key = key
def __enter__(self):
self.stack[self.key] = self
def __exit__(self, value, type, exc):
del self.stack[self.key]
# Because we can't trust id(), it'll get reused, we number objects ourselves
# for caching purposes.
obj_nr = 0
def obj_ident():
global obj_nr
obj_nr += 1
return obj_nr
eval_cache = Cache()
activation_stack = {}
def eval(thunk, env):
"""Evaluate a thunk in an environment.
Will defer the actual evaluation to the thunk itself, but adds two things:
caching and recursion detection.
Since we have to use a global evaluation stack, GCL evaluation is not thread
safe.
"""
#with self._evals.evaluating((id(env), key), key):
key = (thunk.ident, env.ident)
if key in activation_stack:
raise EvaluationError('Reference cycle')
with Activation(activation_stack, key):
return eval_cache.get(key, lambda: thunk.eval(env))
class OnDiskFiles(object):
"""Abstraction of a file system, with search path."""
def __init__(self, search_path=[]):
self.search_path = search_path
def resolve(self, current_file, rel_path):
"""Search the filesystem."""
search_path = [path.dirname(current_file)] + self.search_path
target_path = None
for search in search_path:
if path.isfile(path.join(search, rel_path)):
target_path = path.normpath(path.join(search, rel_path))
break
if not target_path:
raise EvaluationError('No such file: %r, searched %s' %
(rel_path, ':'.join(search_path)))
return target_path, path.abspath(target_path)
def load(self, path):
with open(path, 'r') as f:
return f.read()
class InMemoryFiles(object):
"""Simulate a filesystem from an in-memory dictionary.
The dictionary maps path to file contents.
"""
def __init__(self, file_dict):
self.file_dict = file_dict
def resolve(self, current_file, rel_path):
"""Search the filesystem."""
p = path.join(path.dirname(current_file), rel_path)
if p not in self.file_dict:
raise RuntimeError('No such fake file: %r' % p)
return p, p
def load(self, path):
return self.file_dict[path]
class NormalLoader(object):
def __init__(self, fs):
self.fs = fs
self.cache = Cache()
def __call__(self, current_file, rel_path, env=None):
nice_path, full_path = self.fs.resolve(current_file, rel_path)
# Cache on full path, but tell script about nice path
do_load = lambda: loads(self.fs.load(full_path), filename=nice_path, loader=self, env=env)
return self.cache.get(full_path, do_load)
def loader_with_search_path(search_path):
return NormalLoader(OnDiskFiles(search_path))
# Default loader doesn't have any search path
default_loader = NormalLoader(OnDiskFiles())
# Python 2 and 3 compatible string check
try:
isinstance("", basestring)
def is_str(s):
return isinstance(s, basestring)
except NameError:
def is_str(s):
return isinstance(s, str)
#----------------------------------------------------------------------
# Model
#
class ParseContext(object):
def __init__(self):
self.filename = '<from string>'
self.loader = None
the_context = ParseContext()
class EmptyEnvironment(object):
def __init__(self):
self.ident = obj_ident()
def __getitem__(self, key):
raise EvaluationError('Unbound variable: %r' % key)
def __contains__(self, key):
return False
def __repr__(self):
return '<empty>'
def keys(self):
return set()
@property
def root(self):
return self
def extend(self, d):
return Environment(d or {}, self)
class SourceLocation(object):
def __init__(self, string, offset):
self.filename = the_context.filename
self.string = string
self.offset = offset
@property
def line(self):
return p.line(self.offset, self.string)
@property
def lineno(self):
return p.lineno(self.offset, self.string)
@property
def col(self):
return p.col(self.offset, self.string)
@property
def line_spec(self):
return '%s:%s' % (self.filename, self.lineno)
def error_in_context(self, msg):
msg = '%s:%d: %s in \'%s\'' % (self.filename, self.lineno, msg, self.line)
return msg
class Environment(object):
"""Binding environment, inherits from another Environment."""
def __init__(self, values, parent=None, names=None):
self.ident = obj_ident()
self.parent = parent or EmptyEnvironment()
self.values = values
self.names = names or values.keys()
def __getitem__(self, key):
if key in self.names:
return self.values[key]
return self.parent[key]
def __contains__(self, key):
if key in self.names:
return True
return key in self.parent
@property
def root(self):
if isinstance(self.parent, EmptyEnvironment):
return self
else:
return self.parent.root
def extend(self, d):
return Environment(d or {}, self)
def keys(self):
return set(self.names).union(self.parent.keys())
def __repr__(self):
return 'Environment(%s :: %r)' % (', '.join(self.names), self.parent)
class Thunk(object):
def eval(self, env):
raise EvaluationError('Whoops')
class Null(Thunk):
"""Null, evaluates to None."""
def __init__(self):
self.ident = obj_ident()
def eval(self, env):
return None
def __repr__(self):
return "null";
class Void(Thunk):
"""A missing value."""
def __init__(self, name, location):
self.name = name
self.location = location
self.ident = obj_ident()
def eval(self, env):
raise EvaluationError(self.location.error_in_context('Unbound value: %r' % self.name))
def __repr__(self):
return '<unbound>'
class Inherit(Thunk):
"""Inherit Thunks can be either bound or unbound."""
def __init__(self, name=None, env=None):
self.ident = obj_ident()
self.name = name
self.env = env
def eval(self, env):
if not self.env:
raise EvaluationError("Shouldn't evaluate Inherit nodes")
return self.env[self.name]
def __repr__(self):
return 'inherit %s' % self.name
def mkInherits(tokens):
return [(t, Inherit()) for t in list(tokens)]
class Constant(Thunk):
"""A GCL constant expression."""
def __init__(self, value):
self.ident = obj_ident()
self.value = value
def eval(self, env):
return self.value
def __repr__(self):
if type(self.value) == bool:
return 'true' if self.value else 'false'
return repr(self.value)
class Var(Thunk):
"""Reference to another value."""
def __init__(self, name, location):
self.ident = obj_ident()
self.name = name
self.location = location
def eval(self, env):
try:
return env[self.name]
except EvaluationError as e:
raise EvaluationError(self.location.error_in_context('while evaluating %r' % self.name), e)
def __repr__(self):
return self.name
def mkVar(s, loc, toks):
return Var(toks[0], SourceLocation(s, loc))
class List(Thunk):
"""A GCL list."""
def __init__(self, values):
self.ident = obj_ident()
self.values = values
def eval(self, env):
return [eval(v, env) for v in self.values]
def __repr__(self):
return repr(self.values)
class ArgList(Thunk):
"""A paren-separated argument list.
This is actually a shallow wrapper for Python's list type. We can't use that
because pyparsing will automatically concatenate lists, which we don't want
in this case.
"""
def __init__(self, values):
self.ident = obj_ident()
self.values = values
def eval(self, env):
return [eval(v, env) for v in self.values]
def __repr__(self):
return '(%s)' % ', '.join(repr(x) for x in self.values)
class UnboundTuple(Thunk):
"""Unbound tuple.
When evaluating, the tuple doesn't actually evaluate its children. Instead,
we return a (lazy) Tuple object that only evaluates the elements when they're
requested.
"""
def __init__(self, kv_pairs):
self.ident = obj_ident()
self.items = dict(kv_pairs)
self._cache = Cache()
def eval(self, env):
return self._cache.get(env.ident, lambda: Tuple(self.items, env))
def __repr__(self):
return ('{' +
'; '.join('%s = %r' % (key, value) for key, value in self.items.items()) +
'}')
class TupleLike(object):
"""Interface for tuple-like objects."""
def __getitem__(self, key):
pass
def keys(self):
pass
def items(self):
pass
def __contains__(self):
return
def __iter__(self):
return
class Tuple(TupleLike):
"""Bound tuple, with lazy evaluation.
Contains real values or Thunks. Thunks will be evaluated upon request, but
not before.
The parent_env is the environment in which we do lookups for values that are
not in this Tuple (the lexically enclosing scope).
"""
def __init__(self, items, parent_env):
self.ident = obj_ident()
self.__items = items
self._parent_env = parent_env
self._env_cache = Cache() # Env cache so eval caching works more effectively
def dict(self):
return self.__items
def get(self, key, default=None):
if key in self:
return self[key]
return default
def __getitem__(self, key):
if type(key) == int:
raise ValueError('Trying to access tuple as a list')
x = self.get_thunk(key)
# Check if this is a Thunk that needs to be lazily evaluated before we
# return it.
if isinstance(x, Thunk):
return eval(x, self.env(self))
return x
def __contains__(self, key):
return key in self.__items
def env(self, current_scope):
"""Return an environment that will look up in current_scope for keys in
this tuple, and the parent env otherwise.
"""
return self._env_cache.get(
current_scope.ident,
lambda: Environment(current_scope, self._parent_env, names=self.keys()))
def keys(self):
return self.__items.keys()
@property
def tuples(self):
return [self]
def items(self):
return list(self.iteritems())
def iteritems(self):
for k in self.keys():
yield k, self[k]
def get_thunk(self, k):
if k not in self.__items:
raise EvaluationError('Unknown key: %r' % k)
x = self.__items[k]
# Don't evaluate in this env but parent env
if isinstance(x, Inherit):
# Change an unbound Inherit into a bound Inherit
return Inherit(k, self._parent_env)
return x
def _render(self, key):
if key in self:
return '%s = %r' % (key, self.get_thunk(key))
else:
return '%s' % key
def compose(self, tup):
if not isinstance(tup, Tuple):
tup = Tuple(tup, EmptyEnvironment())
return CompositeTuple(self.tuples + [tup])
def __repr__(self):
return '{%s}' % '; '.join(self._render(k) for k in self.keys())
class CompositeBaseTuple(object):
"""A tuple-like object that will be used to resolve 'base' to.
This will start looking in the tuples of the composite, from right to left,
and check the complete composite for declared v
"""
def __init__(self, composite, index):
self.composite = composite
self.index = index
def __getitem__(self, key):
for tup, env in self.composite.lookups[self.index:]:
if key in tup:
thunk = tup.get_thunk(key)
if not isinstance(thunk, Thunk):
return thunk
if not isinstance(thunk, Void):
return eval(thunk, env)
raise EvaluationError('Unknown key in base: %r' % key)
def env_of(tup, self):
if isinstance(tup, Tuple):
return tup.env(self)
return tup
class CompositeTuple(Tuple):
"""2 or more composited tuples.
Keys are looked up from right-to-left, and every key will be evaluated in its
tuple's own environment, except the 'current_scope' will be set to the
CompositeTuple (so that declared names will be looked up in the composite
tuple).
To properly resolve the special variable 'base', we construct smaller
composite tuples which only contain the tuples to the left of each tuple,
which will get returned as the result of the expression 'base'.
"""
def __init__(self, tuples):
self.ident = obj_ident()
self._tuples = tuples
self._keys = functools.reduce(lambda s, t: s.union(t.keys()), self._tuples, set())
self._makeLookupList()
def _makeLookupList(self):
# Count index from the back because we're going to reverse
envs = [Environment({'base': CompositeBaseTuple(self, len(self.tuples) - i)}, env_of(t, self)) for i, t in enumerate(self.tuples)]
self.lookups = list(zip(self._tuples, envs))
self.lookups.reverse()
@property
def tuples(self):
return self._tuples
def __contains__(self, key):
return key in self._keys
def keys(self):
return list(self._keys)
def items(self):
return [(k, self[k]) for k in self.keys()]
def get(self, key, default=None):
if key in self:
return self[key]
return default
def compose(self, tup):
if not isinstance(tup, Tuple):
tup = Tuple(tup, EmptyEnvironment())
return CompositeTuple(self.tuples + [tup])
def __getitem__(self, key):
for tup, env in self.lookups:
if key in tup:
thunk = tup.get_thunk(key)
if not isinstance(thunk, Thunk):
return thunk # Not a thunk but a literal then
if not isinstance(thunk, Void):
return eval(thunk, env)
raise EvaluationError('Unknown key: %r' % key)
def __repr__(self):
return ' '.join(repr(t) for t in self.tuples)
class Application(Thunk):
"""Function application."""
def __init__(self, left, right):
self.ident = obj_ident()
self.left = left
self.right = right
def eval(self, env):
fn = eval(self.left, env)
arg = eval(self.right, env)
# Normalize arg into a list of arguments, which it already is if the
# right-hand side is an ArgList, but not otherwise.
if not isinstance(self.right, ArgList):
arg = [arg]
# We now have evaluated and unevaluated versions of functor and arguments
# The evaluated ones will be used for processing, the unevaluated ones will
# be used for error reporting.
# Tuple application
if isinstance(fn, Tuple):
return self.applyTuple(fn, arg, env)
# List application
if isinstance(fn, list):
return self.applyList(fn, arg)
# Any other callable type, just use as a Python function
if not callable(fn):
raise EvaluationError('Result of %r (%r) not callable' % (self.left, fn))
if isinstance(fn, functions.EnvironmentFunction):
return fn(*arg, env=env)
return fn(*arg)
def __repr__(self):
return '%r(%r)' % (self.left, self.right)
def applyTuple(self, tuple, right, env):
"""Apply a tuple to something else."""
if len(right) != 1:
raise EvaluationError('Tuple (%r) can only be applied to one argument, got %r' % (self.left, self.right))
right = right[0]
if isinstance(right, Tuple):
return CompositeTuple(tuple.tuples + right.tuples)
if is_str(right):
return tuple[right]
raise EvaluationError("Can't apply tuple (%r) to argument (%r): string or tuple expected, got %r" % (self.left, self.right, right))
def applyList(self, lst, right):
"""Apply a list to something else."""
if len(right) != 1:
raise EvaluationError('List (%r) can only be applied to one argument, got %r' % (self.left, self.right))
right = right[0]
if isinstance(right, int):
return lst[right]
raise EvaluationError("Can't apply list (%r) to argument (%r): integer expected, got %r" % (self.left, self.right, right))
def mkApplications(atoms):
"""Make a sequence of applications from a list of tokens.
atoms is a list of atoms, which will be handled left-associatively. E.g:
['foo', [], []] == foo()() ==> Application(Application('foo', []), [])
"""
atoms = list(atoms)
while len(atoms) > 1:
atoms[0:2] = [Application(atoms[0], atoms[1])]
# Nothing left to apply
return atoms[0]
class UnOp(Thunk):
def __init__(self, op, right):
self.ident = obj_ident()
self.op = op
self.right = right
def eval(self, env):
right = eval(self.right, env)
fn = functions.unary_operators.get(self.op, None)
if fn is None:
raise EvaluationError('Unknown unary operator: %s' % self.op)
return fn(right)
def __repr__(self):
return '%s%r' % (self.op, self.right)
def mkUnOp(tokens):
return UnOp(tokens[0], tokens[1])
class BinOp(Thunk):
def __init__(self, left, op, right):
self.ident = obj_ident()
self.left = left
self.op = op
self.right = right
def eval(self, env):
left = eval(self.left, env)
right = eval(self.right, env)
fn = functions.all_binary_operators.get(self.op, None)
if fn is None:
raise EvaluationError('Unknown operator: %s' % self.op)
return fn(left, right)
def __repr__(self):
return ('%r %s %r' % (self.left, self.op, self.right))
def mkBinOps(tokens):
tokens = list(tokens)
while len(tokens) > 1:
assert(len(tokens) >= 3)
tokens[0:3] = [BinOp(tokens[0], tokens[1], tokens[2])]
return tokens[0]
class Deref(Thunk):
"""Dereferencing of a dictionary-like object."""
def __init__(self, haystack, needle, location):
self.ident = obj_ident()
self.haystack = haystack
self.needle = needle
self.location = location
def eval(self, env):
try:
haystack = eval(self.haystack, env)
return haystack[self.needle]
except EvaluationError as e:
raise EvaluationError(self.location.error_in_context('while evaluating \'%r\'' % self), e)
except TypeError as e:
raise EvaluationError(self.location.error_in_context('while getting %r from %r' % (self.needle, self.haystack)), e)
def __repr__(self):
return '%s.%s' % (self.haystack, self.needle)
def mkDerefs(s, loc, tokens):
location = SourceLocation(s, loc)
tokens = list(tokens)
while len(tokens) > 1:
tokens[0:2] = [Deref(tokens[0], tokens[1], location)]
return tokens[0]
class Condition(Thunk):
def __init__(self, cond, then, else_):
self.ident = obj_ident()
self.cond = cond
self.then = then
self.else_ = else_
def eval(self, env):
if eval(self.cond, env):
return eval(self.then, env)
else:
return eval(self.else_, env)
def __repr__(self):
return 'if %r then %r else %r' % (self.cond, self.then, self.else_)
class Include(Thunk):
def __init__(self, file_ref):
self.ident = obj_ident()
self.file_ref = file_ref
self.current_file = the_context.filename
self.loader = the_context.loader
def eval(self, env):
file_ref = eval(self.file_ref, env)
if not is_str(file_ref):
raise EvaluationError('Included argument (%r) must be a string, got %r' %
(self.file_ref, file_ref))
return self.loader(self.current_file, file_ref, env=env.root)
def __repr__(self):
return 'include(%r)' % self.file_ref
class Lambda(Thunk):
def __init__(self, *args):
self.ident = obj_ident()
self.params = args[:-1]
self.expr = args[-1]
def eval(self, env):
def fn(*args):
if len(args) != len(self.params):
raise EvaluationError('Lambda (%r) passed wrong number of params (%r)' % (
self, self.params))
new_vars = {p: v for p, v in zip(self.params, args)}
new_env = env.extend(new_vars)
return eval(self.expr, new_env)
return fn
def __repr__(self):
param_str = ', '.join(map(repr, self.params))
if param_str:
param_str = ' ' + param_str
return 'lambda%s: %r' % (param_str, self.expr)
#----------------------------------------------------------------------
# Grammar
#
def sym(sym):
return p.Literal(sym).suppress()
def kw(kw):
return p.Keyword(kw).suppress()
def listMembers(sep, expr, what):
return p.Optional(p.delimitedList(expr, sep) -
p.Optional(sep).suppress()).setParseAction(
lambda ts: what(list(ts)))
def bracketedList(l, r, sep, expr, what):
"""Parse bracketed list.
Empty list is possible, as is a trailing separator.
"""
return (sym(l) - listMembers(sep, expr, what) - sym(r)).setParseAction(head)
keywords = ['and', 'or', 'not', 'if', 'then', 'else', 'include', 'inherit', 'null', 'true', 'false', 'lambda']
expression = p.Forward()
comment = '#' + p.restOfLine
identifier = p.Regex(r'[a-zA-Z_][a-zA-Z0-9_-]*')
# Contants
integer = p.Word(p.nums).setParseAction(do(head, int, Constant))
floating = p.Regex(r'\d*\.\d+').setParseAction(do(head, float, Constant))
dq_string = p.QuotedString('"', escChar='\\', multiline=True).setParseAction(do(head, Constant))
sq_string = p.QuotedString("'", escChar='\\', multiline=True).setParseAction(do(head, Constant))
boolean = (p.Keyword('true') | p.Keyword('false')).setParseAction(do(head, mkBool, Constant))
null = p.Keyword('null').setParseAction(Null)
# List
list_ = bracketedList('[', ']', ',', expression, List)
# Tuple
inherit = (kw('inherit') - p.ZeroOrMore(identifier)).setParseAction(mkInherits)
tuple_member = (inherit
| (identifier + ~p.FollowedBy('=')).setParseAction(lambda s, loc, x: (x[0], Void(x[0], SourceLocation(s, loc))))
| (identifier - '=' - expression).setParseAction(lambda x: (x[0], x[2]))
)
tuple_members = listMembers(';', tuple_member, UnboundTuple)
tuple = bracketedList('{', '}', ';', tuple_member, UnboundTuple)
# Variable (can't be any of the keywords, which may have lower matching priority)
variable = ~p.oneOf(' '.join(keywords)) + identifier.copy().setParseAction(mkVar)
# Argument list will live by itself as a atom. Actually, it's a tuple, but we
# don't call it that because we use that term for something else already :)
arg_list = bracketedList('(', ')', ',', expression, ArgList)
parenthesized_expr = (sym('(') - expression - ')').setParseAction(head)
unary_op = (p.oneOf(' '.join(functions.unary_operators.keys())) - expression).setParseAction(mkUnOp)
if_then_else = (kw('if') - expression -
kw('then') - expression -
kw('else') - expression).setParseAction(doapply(Condition))
parameter = identifier.copy()
lambda_ = (kw('lambda') - listMembers(',', parameter, list) - sym(':') - expression).setParseAction(doapply(Lambda))
# We don't allow space-application here
# Now our grammar is becoming very dirty and hackish
deref = p.Forward()
include = (kw('include') - deref).setParseAction(doapply(Include))
atom = (tuple
| variable
| dq_string
| sq_string
| boolean
| list_
| null
| unary_op
| parenthesized_expr
| if_then_else
| lambda_
| include
| floating
| integer
)
# We have two different forms of function application, so they can have 2
# different precedences. This one: fn(args), which binds stronger than
# dereferencing (fn(args).attr == (fn(args)).attr)
applic1 = (atom - p.ZeroOrMore(arg_list)).setParseAction(mkApplications)
# Dereferencing of an expression (obj.bar)
deref << (applic1 - p.ZeroOrMore(p.Literal('.').suppress() - identifier)).setParseAction(mkDerefs)
# Juxtaposition function application (fn arg), must be 1-arg every time
applic2 = (deref - p.ZeroOrMore(deref)).setParseAction(mkApplications)
# All binary operators at various precedence levels go here:
# This piece of code does the moral equivalent of:
#
# T = F*F | F/F | F
# E = T+T | T-T | T
#
# etc.
term = applic2
for op_level in functions.binary_operators:
operator_syms = ' '.join(op_level.keys())
term = (term - p.ZeroOrMore(p.oneOf(operator_syms) - term)).setParseAction(mkBinOps)
expression << term
# Two entry points: start at an arbitrary expression, or expect the top-level
# scope to be a tuple.
start = expression.ignore(comment)
start_tuple = tuple_members.ignore(comment)
#----------------------------------------------------------------------
# Top-level functions
#
default_env = Environment(functions.builtin_functions)
def reads(s, filename=None, loader=None, implicit_tuple=True):
"""Load but don't evaluate a GCL expression from a string."""
try:
the_context.filename = filename or '<input>'
the_context.loader = loader or default_loader
return (start_tuple if implicit_tuple else start).parseString(s, parseAll=True)[0]
except (p.ParseException, p.ParseSyntaxException) as e:
msg = '%s:%d: %s\n%s\n%s^-- here' % (the_context.filename, e.lineno, e.msg, e.line, ' ' * (e.col - 1))
raise ParseError(msg)
def read(filename, loader=None, implicit_tuple=True):
"""Load but don't evaluate a GCL expression from a file."""
with open(filename, 'r') as f:
return reads(f.read(),
filename=filename,
loader=loader,
implicit_tuple=implicit_tuple)
def loads(s, filename=None, loader=None, implicit_tuple=True, env=None):
"""Load and evaluate a GCL expression from a string."""
ast = reads(s, filename=filename, loader=loader, implicit_tuple=implicit_tuple)
return eval(ast, env or default_env)
def load(filename, loader=None, implicit_tuple=True, env=None):
"""Load and evaluate a GCL expression from a file."""
with open(filename, 'r') as f:
return loads(f.read(),
filename=filename,
loader=loader,
implicit_tuple=implicit_tuple,
env=env)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core conversion logic, serves as main point of access."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
import gast
from tensorflow.python.autograph import operators
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.converters import arg_defaults
from tensorflow.python.autograph.converters import asserts
from tensorflow.python.autograph.converters import break_statements
from tensorflow.python.autograph.converters import builtin_functions
from tensorflow.python.autograph.converters import call_trees
from tensorflow.python.autograph.converters import conditional_expressions
from tensorflow.python.autograph.converters import continue_statements
from tensorflow.python.autograph.converters import control_flow
from tensorflow.python.autograph.converters import decorators
from tensorflow.python.autograph.converters import directives
from tensorflow.python.autograph.converters import error_handlers
from tensorflow.python.autograph.converters import function_scopes
from tensorflow.python.autograph.converters import lists
from tensorflow.python.autograph.converters import logical_expressions
from tensorflow.python.autograph.converters import return_statements
from tensorflow.python.autograph.converters import side_effect_guards
from tensorflow.python.autograph.converters import slices
from tensorflow.python.autograph.core import config
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.core import errors
from tensorflow.python.autograph.core import function_wrapping
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import origin_info
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_inspect
# TODO(mdan): Might we not need any renaming at all?
def is_whitelisted_for_graph(o):
"""Check whether an entity is whitelisted for use in graph mode.
Examples of whitelisted entities include all members of the tensorflow
package.
Args:
o: A Python entity.
Returns:
Boolean
"""
m = tf_inspect.getmodule(o)
for prefix, in config.DEFAULT_UNCOMPILED_MODULES:
if m.__name__.startswith(prefix):
return True
if hasattr(o, 'autograph_info__'):
return True
return False
def entity_to_graph(o, program_ctx, arg_values, arg_types):
"""Compile a Python entity into equivalent TensorFlow.
The function will also recursively compile all the entities that `o`
references, updating `dependency_cache`.
This function is reentrant, and relies on dependency_cache to avoid
generating duplicate code.
Args:
o: A Python entity.
program_ctx: A ProgramContext object.
arg_values: A dict containing value hints for symbols like function
parameters.
arg_types: A dict containing type hints for symbols like function
parameters.
Returns:
A tuple (ast, new_name, namespace):
* ast: An AST representing an entity with interface equivalent to `o`,
but which when executed it creates TF a graph.
* new_name: The symbol name under which the new entity can be found.
* namespace: A dict mapping all symbols visible to the converted entity,
keyed by their symbol name.
Raises:
ValueError: if the entity type is not supported.
"""
if program_ctx.options.verbose:
logging.info('Converting {}'.format(o))
if tf_inspect.isclass(o):
node, name, ns = class_to_graph(o, program_ctx)
elif tf_inspect.isfunction(o):
# TODO(mdan): This is not a reliable mechanism.
# The most reliable way is to check the source code, the AST will contain
# a Lambda node instead of a FunctionDef
if o.__name__ == '<lambda>':
raise NotImplementedError(
'lambda functions are not yet supported; declare the function'
' using def instead: %s' % o)
else:
node, name, ns = function_to_graph(o, program_ctx, arg_values, arg_types)
elif tf_inspect.ismethod(o):
node, name, ns = function_to_graph(o, program_ctx, arg_values, arg_types)
# TODO(mdan,yashkatariya): Remove when object conversion is implemented.
elif hasattr(o, '__class__'):
raise NotImplementedError(
'Object conversion is not yet supported. If you are '
'trying to convert code that uses an existing object, '
'try including the creation of that object in the '
'conversion. For example, instead of converting the method '
'of a class, try converting the entire class instead. '
'See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/'
'contrib/autograph/README.md#using-the-functional-api '
'for more information.')
else:
raise ValueError(
'Entity "%s" has unsupported type "%s". Only functions and classes are '
'supported for now.' % (o, type(o)))
# TODO(mdan): This is temporary. it should be created using a converter.
# TODO(mdan): The attribute should be added with a helper, not directly.
# The helper can ensure there are no collisions.
template = '''
entity.autograph_info__ = {}
'''
node.extend(templates.replace(template, entity=name))
program_ctx.add_to_cache(o, node)
if program_ctx.options.verbose:
logging.info('Compiled output of {}:\n\n{}\n'.format(
o, compiler.ast_to_source(node)))
if program_ctx.options.recursive:
while True:
candidate = None
for obj in program_ctx.name_map.keys():
if obj not in program_ctx.dependency_cache:
candidate = obj
break
if candidate is None:
break
if (hasattr(candidate, 'im_class') and
getattr(candidate, 'im_class') not in program_ctx.partial_types):
# Class members are converted with their objects, unless they're
# only converted partially.
continue
entity_to_graph(candidate, program_ctx, {}, {})
return node, name, ns
def class_to_graph(c, program_ctx):
"""Specialization of `entity_to_graph` for classes."""
converted_members = {}
method_filter = lambda m: tf_inspect.isfunction(m) or tf_inspect.ismethod(m)
members = tf_inspect.getmembers(c, predicate=method_filter)
if not members:
raise ValueError('Cannot convert %s: it has no member methods.' % c)
class_namespace = {}
for _, m in members:
# Only convert the members that are directly defined by the class.
if inspect_utils.getdefiningclass(m, c) is not c:
continue
node, _, namespace = function_to_graph(
m,
program_ctx=program_ctx,
arg_values={},
arg_types={'self': (c.__name__, c)},
owner_type=c,
rewrite_errors=False)
if class_namespace is None:
class_namespace = namespace
else:
class_namespace.update(namespace)
converted_members[m] = node[0]
namer = program_ctx.new_namer(class_namespace)
class_name = namer.compiled_class_name(c.__name__, c)
# TODO(mdan): This needs to be explained more thoroughly.
# Process any base classes: if the superclass if of a whitelisted type, an
# absolute import line is generated. Otherwise, it is marked for conversion
# (as a side effect of the call to namer.compiled_class_name() followed by
# program_ctx.update_name_map(namer)).
output_nodes = []
renames = {}
base_names = []
for base in c.__bases__:
if isinstance(object, base):
base_names.append('object')
continue
if is_whitelisted_for_graph(base):
alias = namer.new_symbol(base.__name__, ())
output_nodes.append(
gast.ImportFrom(
module=base.__module__,
names=[gast.alias(name=base.__name__, asname=alias)],
level=0))
else:
# This will trigger a conversion into a class with this name.
alias = namer.compiled_class_name(base.__name__, base)
base_names.append(alias)
renames[qual_names.QN(base.__name__)] = qual_names.QN(alias)
program_ctx.update_name_map(namer)
# Generate the definition of the converted class.
bases = [gast.Name(n, gast.Load(), None) for n in base_names]
class_def = gast.ClassDef(
class_name,
bases=bases,
keywords=[],
body=list(converted_members.values()),
decorator_list=[])
# Make a final pass to replace references to the class or its base classes.
# Most commonly, this occurs when making super().__init__() calls.
# TODO(mdan): Making direct references to superclass' superclass will fail.
class_def = qual_names.resolve(class_def)
renames[qual_names.QN(c.__name__)] = qual_names.QN(class_name)
class_def = ast_util.rename_symbols(class_def, renames)
output_nodes.append(class_def)
return output_nodes, class_name, class_namespace
def _add_reserved_symbol(namespace, name, entity):
if name not in namespace:
namespace[name] = entity
elif namespace[name] != entity:
raise ValueError('The name "%s" is reserved and may not be used.' % name)
ag_internal = None
def _add_self_references(namespace, autograph_module):
"""Adds namespace references to the module that exposes the api itself."""
global ag_internal
if ag_internal is None:
# Craft a module that exposes parts of the external API as well as certain
# internal modules.
ag_internal = imp.new_module('autograph')
ag_internal.__dict__.update(autograph_module.__dict__)
ag_internal.utils = utils
ag_internal.function_scope = function_wrapping.function_scope
ag_internal.rewrite_graph_construction_error = (
errors.rewrite_graph_construction_error)
# TODO(mdan): Add safeguards against name clashes.
# We don't want to create a submodule because we want the operators to be
# accessible as ag__.<operator>
ag_internal.__dict__.update(operators.__dict__)
_add_reserved_symbol(namespace, 'ag__', ag_internal)
def function_to_graph(f,
program_ctx,
arg_values,
arg_types,
owner_type=None,
rewrite_errors=True):
"""Specialization of `entity_to_graph` for callable functions."""
node, source = parser.parse_entity(f)
node = node.body[0]
# TODO(znado): Place inside standard_analysis.
origin_info.resolve(node, source, f)
namespace = inspect_utils.getnamespace(f)
_add_self_references(namespace, program_ctx.autograph_module)
namer = program_ctx.new_namer(namespace)
entity_info = transformer.EntityInfo(
source_code=source,
source_file='<fragment>',
namespace=namespace,
arg_values=arg_values,
arg_types=arg_types,
owner_type=owner_type)
context = converter.EntityContext(namer, entity_info, program_ctx)
node = node_to_graph(node, context, rewrite_errors=rewrite_errors)
# TODO(mdan): This somewhat duplicates the call rename logic in call_trees.py
new_name, did_rename = namer.compiled_function_name(f.__name__, f, owner_type)
if not did_rename:
new_name = f.__name__
if node.name != f.__name__:
raise NotImplementedError('Strange corner case. Send us offending code!')
node.name = new_name
program_ctx.update_name_map(namer)
# TODO(mdan): Use this at compilation.
return [node], new_name, namespace
def node_to_graph(node, context, rewrite_errors=True):
"""Convert Python code to equivalent TF graph mode code.
Args:
node: AST, the code to convert.
context: converter.EntityContext
rewrite_errors: Boolean, whether or not to rewrite the error traceback.
Returns:
A tuple (node, deps):
* node: A Python ast node, representing the converted code.
* deps: A set of strings, the fully qualified names of entity
dependencies that this node has.
"""
# TODO(mdan): Insert list_comprehensions somewhere.
node = converter.standard_analysis(node, context, is_initial=True)
# Past this point, line numbers are no longer accurate so we ignore the
# source.
# TODO(mdan): Is it feasible to reconstruct intermediate source code?
context.info.source_code = None
if context.program.options.uses(converter.Feature.DECORATORS):
node = converter.apply_(node, context, decorators)
node = converter.apply_(node, context, arg_defaults)
node = converter.apply_(node, context, directives)
node = converter.apply_(node, context, break_statements)
node = converter.apply_(node, context, asserts)
# Note: sequencing continue canonicalization before for loop one avoids
# dealing with the extra loop increment operation that the for
# canonicalization creates.
node = converter.apply_(node, context, continue_statements)
node = converter.apply_(node, context, return_statements)
if context.program.options.uses(converter.Feature.LISTS):
node = converter.apply_(node, context, lists)
node = converter.apply_(node, context, slices)
node = converter.apply_(node, context, builtin_functions)
node = converter.apply_(node, context, call_trees)
node = converter.apply_(node, context, control_flow)
node = converter.apply_(node, context, conditional_expressions)
node = converter.apply_(node, context, logical_expressions)
if context.program.options.uses(converter.Feature.AUTO_CONTROL_DEPS):
node = converter.apply_(node, context, side_effect_guards)
node = converter.apply_(node, context, function_scopes)
if rewrite_errors:
node = converter.apply_(node, context, error_handlers)
return node
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from code import Code
from model import PropertyType
import cpp_util
import schema_util
import util_cc_helper
from cpp_namespace_environment import CppNamespaceEnvironment
class CCGenerator(object):
def __init__(self, type_generator):
self._type_generator = type_generator
def Generate(self, namespace):
return _Generator(namespace, self._type_generator).Generate()
class _Generator(object):
"""A .cc generator for a namespace.
"""
def __init__(self, namespace, cpp_type_generator):
assert type(namespace.environment) is CppNamespaceEnvironment
self._namespace = namespace
self._type_helper = cpp_type_generator
self._util_cc_helper = (
util_cc_helper.UtilCCHelper(self._type_helper))
self._generate_error_messages = namespace.compiler_options.get(
'generate_error_messages', False)
def Generate(self):
"""Generates a Code object with the .cc for a single namespace.
"""
cpp_namespace = cpp_util.GetCppNamespace(
self._namespace.environment.namespace_pattern,
self._namespace.unix_name)
c = Code()
(c.Append(cpp_util.CHROMIUM_LICENSE)
.Append()
.Append(cpp_util.GENERATED_FILE_MESSAGE % self._namespace.source_file)
.Append()
.Append(self._util_cc_helper.GetIncludePath())
.Append('#include "base/logging.h"')
.Append('#include "base/stl_util.h"')
.Append('#include "base/strings/string_number_conversions.h"')
.Append('#include "base/strings/utf_string_conversions.h"')
.Append('#include "%s/%s.h"' %
(self._namespace.source_file_dir, self._namespace.short_filename))
.Append('#include <set>')
.Cblock(self._type_helper.GenerateIncludes(include_soft=True))
.Append()
.Append('using base::UTF8ToUTF16;')
.Append()
.Concat(cpp_util.OpenNamespace(cpp_namespace))
)
if self._namespace.properties:
(c.Append('//')
.Append('// Properties')
.Append('//')
.Append()
)
for prop in self._namespace.properties.values():
property_code = self._type_helper.GeneratePropertyValues(
prop,
'const %(type)s %(name)s = %(value)s;',
nodoc=True)
if property_code:
c.Cblock(property_code)
if self._namespace.types:
(c.Append('//')
.Append('// Types')
.Append('//')
.Append()
.Cblock(self._GenerateTypes(None, self._namespace.types.values()))
)
if self._namespace.functions:
(c.Append('//')
.Append('// Functions')
.Append('//')
.Append()
)
for function in self._namespace.functions.values():
c.Cblock(self._GenerateFunction(function))
if self._namespace.events:
(c.Append('//')
.Append('// Events')
.Append('//')
.Append()
)
for event in self._namespace.events.values():
c.Cblock(self._GenerateEvent(event))
c.Cblock(cpp_util.CloseNamespace(cpp_namespace))
c.Append()
return c
def _GenerateType(self, cpp_namespace, type_):
"""Generates the function definitions for a type.
"""
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
c = Code()
if type_.functions:
# Wrap functions within types in the type's namespace.
(c.Append('namespace %s {' % classname)
.Append())
for function in type_.functions.values():
c.Cblock(self._GenerateFunction(function))
c.Append('} // namespace %s' % classname)
elif type_.property_type == PropertyType.ARRAY:
c.Cblock(self._GenerateType(cpp_namespace, type_.item_type))
elif type_.property_type in (PropertyType.CHOICES,
PropertyType.OBJECT):
if cpp_namespace is None:
classname_in_namespace = classname
else:
classname_in_namespace = '%s::%s' % (cpp_namespace, classname)
if type_.property_type == PropertyType.OBJECT:
c.Cblock(self._GeneratePropertyFunctions(classname_in_namespace,
type_.properties.values()))
else:
c.Cblock(self._GenerateTypes(classname_in_namespace, type_.choices))
(c.Append('%s::%s()' % (classname_in_namespace, classname))
.Cblock(self._GenerateInitializersAndBody(type_))
.Append('%s::~%s() {}' % (classname_in_namespace, classname))
.Append()
)
if type_.origin.from_json:
c.Cblock(self._GenerateTypePopulate(classname_in_namespace, type_))
if cpp_namespace is None: # only generate for top-level types
c.Cblock(self._GenerateTypeFromValue(classname_in_namespace, type_))
if type_.origin.from_client:
c.Cblock(self._GenerateTypeToValue(classname_in_namespace, type_))
elif type_.property_type == PropertyType.ENUM:
(c.Cblock(self._GenerateEnumToString(cpp_namespace, type_))
.Cblock(self._GenerateEnumFromString(cpp_namespace, type_))
)
return c
def _GenerateInitializersAndBody(self, type_):
items = []
for prop in type_.properties.values():
t = prop.type_
real_t = self._type_helper.FollowRef(t)
if real_t.property_type == PropertyType.ENUM:
namespace_prefix = ('%s::' % real_t.namespace.unix_name
if real_t.namespace != self._namespace
else '')
items.append('%s(%s%s)' % (prop.unix_name,
namespace_prefix,
self._type_helper.GetEnumNoneValue(t)))
elif prop.optional:
continue
elif t.property_type == PropertyType.INTEGER:
items.append('%s(0)' % prop.unix_name)
elif t.property_type == PropertyType.DOUBLE:
items.append('%s(0.0)' % prop.unix_name)
elif t.property_type == PropertyType.BOOLEAN:
items.append('%s(false)' % prop.unix_name)
elif (t.property_type == PropertyType.ANY or
t.property_type == PropertyType.ARRAY or
t.property_type == PropertyType.BINARY or
t.property_type == PropertyType.CHOICES or
t.property_type == PropertyType.OBJECT or
t.property_type == PropertyType.FUNCTION or
t.property_type == PropertyType.REF or
t.property_type == PropertyType.STRING):
# TODO(miket): It would be nice to initialize CHOICES, but we
# don't presently have the semantics to indicate which one of a set
# should be the default.
continue
else:
raise TypeError(t)
if items:
s = ': %s' % (', '.join(items))
else:
s = ''
s = s + ' {}'
return Code().Append(s)
def _GenerateTypePopulate(self, cpp_namespace, type_):
"""Generates the function for populating a type given a pointer to it.
E.g for type "Foo", generates Foo::Populate()
"""
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
c = Code()
(c.Append('// static')
.Append('bool %(namespace)s::Populate(')
.Sblock(' %s) {' % self._GenerateParams(
('const base::Value& value', '%(name)s* out'))))
if self._generate_error_messages:
c.Append('DCHECK(error);')
if type_.property_type == PropertyType.CHOICES:
for choice in type_.choices:
(c.Sblock('if (%s) {' % self._GenerateValueIsTypeExpression('value',
choice))
.Concat(self._GeneratePopulateVariableFromValue(
choice,
'(&value)',
'out->as_%s' % choice.unix_name,
'false',
is_ptr=True))
.Append('return true;')
.Eblock('}')
)
(c.Concat(self._GenerateError(
'"expected %s, got " + %s' %
(" or ".join(choice.name for choice in type_.choices),
self._util_cc_helper.GetValueTypeString('value'))))
.Append('return false;'))
elif type_.property_type == PropertyType.OBJECT:
(c.Sblock('if (!value.IsType(base::Value::TYPE_DICTIONARY)) {')
.Concat(self._GenerateError(
'"expected dictionary, got " + ' +
self._util_cc_helper.GetValueTypeString('value')))
.Append('return false;')
.Eblock('}'))
if type_.properties or type_.additional_properties is not None:
c.Append('const base::DictionaryValue* dict = '
'static_cast<const base::DictionaryValue*>(&value);')
if self._generate_error_messages:
c.Append('std::set<std::string> keys;')
for prop in type_.properties.itervalues():
c.Concat(self._InitializePropertyToDefault(prop, 'out'))
for prop in type_.properties.itervalues():
if self._generate_error_messages:
c.Append('keys.insert("%s");' % (prop.name))
c.Concat(self._GenerateTypePopulateProperty(prop, 'dict', 'out'))
# Check for extra values.
if self._generate_error_messages:
(c.Sblock('for (base::DictionaryValue::Iterator it(*dict); '
'!it.IsAtEnd(); it.Advance()) {')
.Sblock('if (!keys.count(it.key())) {')
.Concat(self._GenerateError('"found unexpected key \'" + '
'it.key() + "\'"'))
.Eblock('}')
.Eblock('}')
)
if type_.additional_properties is not None:
if type_.additional_properties.property_type == PropertyType.ANY:
c.Append('out->additional_properties.MergeDictionary(dict);')
else:
cpp_type = self._type_helper.GetCppType(type_.additional_properties,
is_in_container=True)
(c.Append('for (base::DictionaryValue::Iterator it(*dict);')
.Sblock(' !it.IsAtEnd(); it.Advance()) {')
.Append('%s tmp;' % cpp_type)
.Concat(self._GeneratePopulateVariableFromValue(
type_.additional_properties,
'(&it.value())',
'tmp',
'false'))
.Append('out->additional_properties[it.key()] = tmp;')
.Eblock('}')
)
c.Append('return true;')
(c.Eblock('}')
.Substitute({'namespace': cpp_namespace, 'name': classname}))
return c
def _GenerateValueIsTypeExpression(self, var, type_):
real_type = self._type_helper.FollowRef(type_)
if real_type.property_type is PropertyType.CHOICES:
return '(%s)' % ' || '.join(self._GenerateValueIsTypeExpression(var,
choice)
for choice in real_type.choices)
return '%s.IsType(%s)' % (var, cpp_util.GetValueType(real_type))
def _GenerateTypePopulateProperty(self, prop, src, dst):
"""Generate the code to populate a single property in a type.
src: base::DictionaryValue*
dst: Type*
"""
c = Code()
value_var = prop.unix_name + '_value'
c.Append('const base::Value* %(value_var)s = NULL;')
if prop.optional:
(c.Sblock(
'if (%(src)s->GetWithoutPathExpansion("%(key)s", &%(value_var)s)) {')
.Concat(self._GeneratePopulatePropertyFromValue(
prop, value_var, dst, 'false')))
underlying_type = self._type_helper.FollowRef(prop.type_)
if underlying_type.property_type == PropertyType.ENUM:
namespace_prefix = ('%s::' % underlying_type.namespace.unix_name
if underlying_type.namespace != self._namespace
else '')
(c.Append('} else {')
.Append('%%(dst)s->%%(name)s = %s%s;' %
(namespace_prefix,
self._type_helper.GetEnumNoneValue(prop.type_))))
c.Eblock('}')
else:
(c.Sblock(
'if (!%(src)s->GetWithoutPathExpansion("%(key)s", &%(value_var)s)) {')
.Concat(self._GenerateError('"\'%%(key)s\' is required"'))
.Append('return false;')
.Eblock('}')
.Concat(self._GeneratePopulatePropertyFromValue(
prop, value_var, dst, 'false'))
)
c.Append()
c.Substitute({
'value_var': value_var,
'key': prop.name,
'src': src,
'dst': dst,
'name': prop.unix_name
})
return c
def _GenerateTypeFromValue(self, cpp_namespace, type_):
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
c = Code()
(c.Append('// static')
.Append('scoped_ptr<%s> %s::FromValue(%s) {' % (classname,
cpp_namespace, self._GenerateParams(('const base::Value& value',))))
)
if self._generate_error_messages:
c.Append('DCHECK(error);')
(c.Append(' scoped_ptr<%s> out(new %s());' % (classname, classname))
.Append(' if (!Populate(%s))' % self._GenerateArgs(
('value', 'out.get()')))
.Append(' return scoped_ptr<%s>();' % classname)
.Append(' return out.Pass();')
.Append('}')
)
return c
def _GenerateTypeToValue(self, cpp_namespace, type_):
"""Generates a function that serializes the type into a base::Value.
E.g. for type "Foo" generates Foo::ToValue()
"""
if type_.property_type == PropertyType.OBJECT:
return self._GenerateObjectTypeToValue(cpp_namespace, type_)
elif type_.property_type == PropertyType.CHOICES:
return self._GenerateChoiceTypeToValue(cpp_namespace, type_)
else:
raise ValueError("Unsupported property type %s" % type_.type_)
def _GenerateObjectTypeToValue(self, cpp_namespace, type_):
"""Generates a function that serializes an object-representing type
into a base::DictionaryValue.
"""
c = Code()
(c.Sblock('scoped_ptr<base::DictionaryValue> %s::ToValue() const {' %
cpp_namespace)
.Append('scoped_ptr<base::DictionaryValue> value('
'new base::DictionaryValue());')
.Append()
)
for prop in type_.properties.values():
prop_var = 'this->%s' % prop.unix_name
if prop.optional:
# Optional enum values are generated with a NONE enum value.
underlying_type = self._type_helper.FollowRef(prop.type_)
if underlying_type.property_type == PropertyType.ENUM:
c.Sblock('if (%s != %s) {' %
(prop_var,
self._type_helper.GetEnumNoneValue(prop.type_)))
else:
c.Sblock('if (%s.get()) {' % prop_var)
# ANY is a base::Value which is abstract and cannot be a direct member, so
# it will always be a pointer.
is_ptr = prop.optional or prop.type_.property_type == PropertyType.ANY
c.Cblock(self._CreateValueFromType(
'value->SetWithoutPathExpansion("%s", %%s);' % prop.name,
prop.name,
prop.type_,
prop_var,
is_ptr=is_ptr))
if prop.optional:
c.Eblock('}')
if type_.additional_properties is not None:
if type_.additional_properties.property_type == PropertyType.ANY:
c.Append('value->MergeDictionary(&additional_properties);')
else:
# Non-copyable types will be wrapped in a linked_ptr for inclusion in
# maps, so we need to unwrap them.
needs_unwrap = (
not self._type_helper.IsCopyable(type_.additional_properties))
(c.Sblock('for (const auto& it : additional_properties) {')
.Cblock(self._CreateValueFromType(
'value->SetWithoutPathExpansion(it.first, %s);',
type_.additional_properties.name,
type_.additional_properties,
'%sit.second' % ('*' if needs_unwrap else '')))
.Eblock('}')
)
return (c.Append()
.Append('return value.Pass();')
.Eblock('}'))
def _GenerateChoiceTypeToValue(self, cpp_namespace, type_):
"""Generates a function that serializes a choice-representing type
into a base::Value.
"""
c = Code()
c.Sblock('scoped_ptr<base::Value> %s::ToValue() const {' % cpp_namespace)
c.Append('scoped_ptr<base::Value> result;')
for choice in type_.choices:
choice_var = 'as_%s' % choice.unix_name
(c.Sblock('if (%s) {' % choice_var)
.Append('DCHECK(!result) << "Cannot set multiple choices for %s";' %
type_.unix_name)
.Cblock(self._CreateValueFromType('result.reset(%s);',
choice.name,
choice,
'*%s' % choice_var))
.Eblock('}')
)
(c.Append('DCHECK(result) << "Must set at least one choice for %s";' %
type_.unix_name)
.Append('return result.Pass();')
.Eblock('}')
)
return c
def _GenerateFunction(self, function):
"""Generates the definitions for function structs.
"""
c = Code()
# TODO(kalman): use function.unix_name not Classname.
function_namespace = cpp_util.Classname(function.name)
# Windows has a #define for SendMessage, so to avoid any issues, we need
# to not use the name.
if function_namespace == 'SendMessage':
function_namespace = 'PassMessage'
(c.Append('namespace %s {' % function_namespace)
.Append()
)
# Params::Populate function
if function.params:
c.Concat(self._GeneratePropertyFunctions('Params', function.params))
(c.Append('Params::Params() {}')
.Append('Params::~Params() {}')
.Append()
.Cblock(self._GenerateFunctionParamsCreate(function))
)
# Results::Create function
if function.callback:
c.Concat(self._GenerateCreateCallbackArguments('Results',
function.callback))
c.Append('} // namespace %s' % function_namespace)
return c
def _GenerateEvent(self, event):
# TODO(kalman): use event.unix_name not Classname.
c = Code()
event_namespace = cpp_util.Classname(event.name)
(c.Append('namespace %s {' % event_namespace)
.Append()
.Cblock(self._GenerateEventNameConstant(event))
.Cblock(self._GenerateCreateCallbackArguments(None, event))
.Append('} // namespace %s' % event_namespace)
)
return c
def _CreateValueFromType(self, code, prop_name, type_, var, is_ptr=False):
"""Creates a base::Value given a type. Generated code passes ownership
to caller.
var: variable or variable*
E.g for std::string, generate new base::StringValue(var)
"""
c = Code()
underlying_type = self._type_helper.FollowRef(type_)
if underlying_type.property_type == PropertyType.ARRAY:
# Enums are treated specially because C++ templating thinks that they're
# ints, but really they're strings. So we create a vector of strings and
# populate it with the names of the enum in the array. The |ToString|
# function of the enum can be in another namespace when the enum is
# referenced. Templates can not be used here because C++ templating does
# not support passing a namespace as an argument.
item_type = self._type_helper.FollowRef(underlying_type.item_type)
if item_type.property_type == PropertyType.ENUM:
varname = ('*' if is_ptr else '') + '(%s)' % var
maybe_namespace = ''
if type_.item_type.property_type == PropertyType.REF:
maybe_namespace = '%s::' % item_type.namespace.unix_name
enum_list_var = '%s_list' % prop_name
# Scope the std::vector variable declaration inside braces.
(c.Sblock('{')
.Append('std::vector<std::string> %s;' % enum_list_var)
.Append('for (const auto& it : %s) {' % varname)
.Append('%s.push_back(%sToString(it));' % (enum_list_var,
maybe_namespace))
.Eblock('}'))
# Because the std::vector above is always created for both required and
# optional enum arrays, |is_ptr| is set to false and uses the
# std::vector to create the values.
(c.Append(code %
self._GenerateCreateValueFromType(type_, enum_list_var, False))
.Append('}'))
return c
c.Append(code % self._GenerateCreateValueFromType(type_, var, is_ptr))
return c
def _GenerateCreateValueFromType(self, type_, var, is_ptr):
"""Generates the statement to create a base::Value given a type.
type_: The type of the values being converted.
var: The name of the variable.
is_ptr: Whether |type_| is optional.
"""
underlying_type = self._type_helper.FollowRef(type_)
if (underlying_type.property_type == PropertyType.CHOICES or
underlying_type.property_type == PropertyType.OBJECT):
if is_ptr:
return '(%s)->ToValue().release()' % var
else:
return '(%s).ToValue().release()' % var
elif (underlying_type.property_type == PropertyType.ANY or
underlying_type.property_type == PropertyType.FUNCTION):
if is_ptr:
vardot = '(%s)->' % var
else:
vardot = '(%s).' % var
return '%sDeepCopy()' % vardot
elif underlying_type.property_type == PropertyType.ENUM:
maybe_namespace = ''
if type_.property_type == PropertyType.REF:
maybe_namespace = '%s::' % underlying_type.namespace.unix_name
return 'new base::StringValue(%sToString(%s))' % (maybe_namespace, var)
elif underlying_type.property_type == PropertyType.BINARY:
if is_ptr:
vardot = var + '->'
ref = var + '.get()'
else:
vardot = var + '.'
ref = '&' + var
return ('base::BinaryValue::CreateWithCopiedBuffer(vector_as_array(%s),'
' %ssize())' % (ref, vardot))
elif underlying_type.property_type == PropertyType.ARRAY:
return '%s.release()' % self._util_cc_helper.CreateValueFromArray(
var,
is_ptr)
elif underlying_type.property_type.is_fundamental:
if is_ptr:
var = '*%s' % var
if underlying_type.property_type == PropertyType.STRING:
return 'new base::StringValue(%s)' % var
else:
return 'new base::FundamentalValue(%s)' % var
else:
raise NotImplementedError('Conversion of %s to base::Value not '
'implemented' % repr(type_.type_))
def _GenerateParamsCheck(self, function, var):
"""Generates a check for the correct number of arguments when creating
Params.
"""
c = Code()
num_required = 0
for param in function.params:
if not param.optional:
num_required += 1
if num_required == len(function.params):
c.Sblock('if (%(var)s.GetSize() != %(total)d) {')
elif not num_required:
c.Sblock('if (%(var)s.GetSize() > %(total)d) {')
else:
c.Sblock('if (%(var)s.GetSize() < %(required)d'
' || %(var)s.GetSize() > %(total)d) {')
(c.Concat(self._GenerateError(
'"expected %%(total)d arguments, got " '
'+ base::IntToString(%%(var)s.GetSize())'))
.Append('return scoped_ptr<Params>();')
.Eblock('}')
.Substitute({
'var': var,
'required': num_required,
'total': len(function.params),
}))
return c
def _GenerateFunctionParamsCreate(self, function):
"""Generate function to create an instance of Params. The generated
function takes a base::ListValue of arguments.
E.g for function "Bar", generate Bar::Params::Create()
"""
c = Code()
(c.Append('// static')
.Sblock('scoped_ptr<Params> Params::Create(%s) {' % self._GenerateParams(
['const base::ListValue& args']))
)
if self._generate_error_messages:
c.Append('DCHECK(error);')
(c.Concat(self._GenerateParamsCheck(function, 'args'))
.Append('scoped_ptr<Params> params(new Params());')
)
for param in function.params:
c.Concat(self._InitializePropertyToDefault(param, 'params'))
for i, param in enumerate(function.params):
# Any failure will cause this function to return. If any argument is
# incorrect or missing, those following it are not processed. Note that
# for optional arguments, we allow missing arguments and proceed because
# there may be other arguments following it.
failure_value = 'scoped_ptr<Params>()'
c.Append()
value_var = param.unix_name + '_value'
(c.Append('const base::Value* %(value_var)s = NULL;')
.Append('if (args.Get(%(i)s, &%(value_var)s) &&')
.Sblock(' !%(value_var)s->IsType(base::Value::TYPE_NULL)) {')
.Concat(self._GeneratePopulatePropertyFromValue(
param, value_var, 'params', failure_value))
.Eblock('}')
)
if not param.optional:
(c.Sblock('else {')
.Concat(self._GenerateError('"\'%%(key)s\' is required"'))
.Append('return %s;' % failure_value)
.Eblock('}'))
c.Substitute({'value_var': value_var, 'i': i, 'key': param.name})
(c.Append()
.Append('return params.Pass();')
.Eblock('}')
.Append()
)
return c
def _GeneratePopulatePropertyFromValue(self,
prop,
src_var,
dst_class_var,
failure_value):
"""Generates code to populate property |prop| of |dst_class_var| (a
pointer) from a Value*. See |_GeneratePopulateVariableFromValue| for
semantics.
"""
return self._GeneratePopulateVariableFromValue(prop.type_,
src_var,
'%s->%s' % (dst_class_var,
prop.unix_name),
failure_value,
is_ptr=prop.optional)
def _GeneratePopulateVariableFromValue(self,
type_,
src_var,
dst_var,
failure_value,
is_ptr=False):
"""Generates code to populate a variable |dst_var| of type |type_| from a
Value* at |src_var|. The Value* is assumed to be non-NULL. In the generated
code, if |dst_var| fails to be populated then Populate will return
|failure_value|.
"""
c = Code()
underlying_type = self._type_helper.FollowRef(type_)
if underlying_type.property_type.is_fundamental:
if is_ptr:
(c.Append('%(cpp_type)s temp;')
.Sblock('if (!%s) {' % cpp_util.GetAsFundamentalValue(
self._type_helper.FollowRef(type_), src_var, '&temp'))
.Concat(self._GenerateError(
'"\'%%(key)s\': expected ' + '%s, got " + %s' % (
type_.name,
self._util_cc_helper.GetValueTypeString(
'%%(src_var)s', True)))))
c.Append('%(dst_var)s.reset();')
if not self._generate_error_messages:
c.Append('return %(failure_value)s;')
(c.Eblock('}')
.Append('else')
.Append(' %(dst_var)s.reset(new %(cpp_type)s(temp));')
)
else:
(c.Sblock('if (!%s) {' % cpp_util.GetAsFundamentalValue(
self._type_helper.FollowRef(type_),
src_var,
'&%s' % dst_var))
.Concat(self._GenerateError(
'"\'%%(key)s\': expected ' + '%s, got " + %s' % (
type_.name,
self._util_cc_helper.GetValueTypeString(
'%%(src_var)s', True))))
.Append('return %(failure_value)s;')
.Eblock('}')
)
elif underlying_type.property_type == PropertyType.OBJECT:
if is_ptr:
(c.Append('const base::DictionaryValue* dictionary = NULL;')
.Sblock('if (!%(src_var)s->GetAsDictionary(&dictionary)) {')
.Concat(self._GenerateError(
'"\'%%(key)s\': expected dictionary, got " + ' +
self._util_cc_helper.GetValueTypeString('%%(src_var)s', True))))
# If an optional property fails to populate, the population can still
# succeed with a warning. If no error messages are generated, this
# warning is not set and we fail out instead.
if not self._generate_error_messages:
c.Append('return %(failure_value)s;')
(c.Eblock('}')
.Sblock('else {')
.Append('scoped_ptr<%(cpp_type)s> temp(new %(cpp_type)s());')
.Append('if (!%%(cpp_type)s::Populate(%s)) {' % self._GenerateArgs(
('*dictionary', 'temp.get()')))
.Append(' return %(failure_value)s;')
)
(c.Append('}')
.Append('else')
.Append(' %(dst_var)s = temp.Pass();')
.Eblock('}')
)
else:
(c.Append('const base::DictionaryValue* dictionary = NULL;')
.Sblock('if (!%(src_var)s->GetAsDictionary(&dictionary)) {')
.Concat(self._GenerateError(
'"\'%%(key)s\': expected dictionary, got " + ' +
self._util_cc_helper.GetValueTypeString('%%(src_var)s', True)))
.Append('return %(failure_value)s;')
.Eblock('}')
.Append('if (!%%(cpp_type)s::Populate(%s)) {' % self._GenerateArgs(
('*dictionary', '&%(dst_var)s')))
.Append(' return %(failure_value)s;')
.Append('}')
)
elif underlying_type.property_type == PropertyType.FUNCTION:
if is_ptr:
c.Append('%(dst_var)s.reset(new base::DictionaryValue());')
elif underlying_type.property_type == PropertyType.ANY:
c.Append('%(dst_var)s.reset(%(src_var)s->DeepCopy());')
elif underlying_type.property_type == PropertyType.ARRAY:
# util_cc_helper deals with optional and required arrays
(c.Append('const base::ListValue* list = NULL;')
.Sblock('if (!%(src_var)s->GetAsList(&list)) {')
.Concat(self._GenerateError(
'"\'%%(key)s\': expected list, got " + ' +
self._util_cc_helper.GetValueTypeString('%%(src_var)s', True)))
)
if is_ptr and self._generate_error_messages:
c.Append('%(dst_var)s.reset();')
else:
c.Append('return %(failure_value)s;')
c.Eblock('}')
c.Sblock('else {')
item_type = self._type_helper.FollowRef(underlying_type.item_type)
if item_type.property_type == PropertyType.ENUM:
c.Concat(self._GenerateListValueToEnumArrayConversion(
item_type,
'list',
dst_var,
failure_value,
is_ptr=is_ptr))
else:
c.Sblock('if (!%s) {' % self._util_cc_helper.PopulateArrayFromList(
'list',
dst_var,
is_ptr))
c.Concat(self._GenerateError(
'"unable to populate array \'%%(parent_key)s\'"'))
if is_ptr and self._generate_error_messages:
c.Append('%(dst_var)s.reset();')
else:
c.Append('return %(failure_value)s;')
c.Eblock('}')
c.Eblock('}')
elif underlying_type.property_type == PropertyType.CHOICES:
if is_ptr:
(c.Append('scoped_ptr<%(cpp_type)s> temp(new %(cpp_type)s());')
.Append('if (!%%(cpp_type)s::Populate(%s))' % self._GenerateArgs(
('*%(src_var)s', 'temp.get()')))
.Append(' return %(failure_value)s;')
.Append('%(dst_var)s = temp.Pass();')
)
else:
(c.Append('if (!%%(cpp_type)s::Populate(%s))' % self._GenerateArgs(
('*%(src_var)s', '&%(dst_var)s')))
.Append(' return %(failure_value)s;'))
elif underlying_type.property_type == PropertyType.ENUM:
c.Concat(self._GenerateStringToEnumConversion(underlying_type,
src_var,
dst_var,
failure_value))
elif underlying_type.property_type == PropertyType.BINARY:
(c.Append('const base::BinaryValue* binary_value = NULL;')
.Sblock('if (!%(src_var)s->IsType(base::Value::TYPE_BINARY)) {')
.Concat(self._GenerateError(
'"\'%%(key)s\': expected binary, got " + ' +
self._util_cc_helper.GetValueTypeString('%%(src_var)s', True)))
)
if not self._generate_error_messages:
c.Append('return %(failure_value)s;')
(c.Eblock('}')
.Sblock('else {')
.Append(' binary_value =')
.Append(' static_cast<const base::BinaryValue*>(%(src_var)s);')
)
if is_ptr:
(c.Append('%(dst_var)s.reset(new std::vector<char>(')
.Append(' binary_value->GetBuffer(),')
.Append(' binary_value->GetBuffer() + binary_value->GetSize()));')
)
else:
(c.Append('%(dst_var)s.assign(')
.Append(' binary_value->GetBuffer(),')
.Append(' binary_value->GetBuffer() + binary_value->GetSize());')
)
c.Eblock('}')
else:
raise NotImplementedError(type_)
if c.IsEmpty():
return c
return Code().Sblock('{').Concat(c.Substitute({
'cpp_type': self._type_helper.GetCppType(type_),
'src_var': src_var,
'dst_var': dst_var,
'failure_value': failure_value,
'key': type_.name,
'parent_key': type_.parent.name,
})).Eblock('}')
def _GenerateListValueToEnumArrayConversion(self,
item_type,
src_var,
dst_var,
failure_value,
is_ptr=False):
"""Returns Code that converts a ListValue of string constants from
|src_var| into an array of enums of |type_| in |dst_var|. On failure,
returns |failure_value|.
"""
c = Code()
accessor = '.'
if is_ptr:
accessor = '->'
cpp_type = self._type_helper.GetCppType(item_type, is_in_container=True)
c.Append('%s.reset(new std::vector<%s>);' %
(dst_var, cpp_util.PadForGenerics(cpp_type)))
(c.Sblock('for (const auto& it : *(%s)) {' % src_var)
.Append('%s tmp;' % self._type_helper.GetCppType(item_type))
.Concat(self._GenerateStringToEnumConversion(item_type,
'(it)',
'tmp',
failure_value))
.Append('%s%spush_back(tmp);' % (dst_var, accessor))
.Eblock('}')
)
return c
def _GenerateStringToEnumConversion(self,
type_,
src_var,
dst_var,
failure_value):
"""Returns Code that converts a string type in |src_var| to an enum with
type |type_| in |dst_var|. In the generated code, if |src_var| is not
a valid enum name then the function will return |failure_value|.
"""
if type_.property_type != PropertyType.ENUM:
raise TypeError(type_)
c = Code()
enum_as_string = '%s_as_string' % type_.unix_name
cpp_type_namespace = ''
if type_.namespace != self._namespace:
cpp_type_namespace = '%s::' % type_.namespace.unix_name
(c.Append('std::string %s;' % enum_as_string)
.Sblock('if (!%s->GetAsString(&%s)) {' % (src_var, enum_as_string))
.Concat(self._GenerateError(
'"\'%%(key)s\': expected string, got " + ' +
self._util_cc_helper.GetValueTypeString('%%(src_var)s', True)))
.Append('return %s;' % failure_value)
.Eblock('}')
.Append('%s = %sParse%s(%s);' % (dst_var,
cpp_type_namespace,
cpp_util.Classname(type_.name),
enum_as_string))
.Sblock('if (%s == %s%s) {' % (dst_var,
cpp_type_namespace,
self._type_helper.GetEnumNoneValue(type_)))
.Concat(self._GenerateError(
'\"\'%%(key)s\': expected \\"' +
'\\" or \\"'.join(
enum_value.name
for enum_value in self._type_helper.FollowRef(type_).enum_values) +
'\\", got \\"" + %s + "\\""' % enum_as_string))
.Append('return %s;' % failure_value)
.Eblock('}')
.Substitute({'src_var': src_var, 'key': type_.name})
)
return c
def _GeneratePropertyFunctions(self, namespace, params):
"""Generates the member functions for a list of parameters.
"""
return self._GenerateTypes(namespace, (param.type_ for param in params))
def _GenerateTypes(self, namespace, types):
"""Generates the member functions for a list of types.
"""
c = Code()
for type_ in types:
c.Cblock(self._GenerateType(namespace, type_))
return c
def _GenerateEnumToString(self, cpp_namespace, type_):
"""Generates ToString() which gets the string representation of an enum.
"""
c = Code()
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
if cpp_namespace is not None:
c.Append('// static')
maybe_namespace = '' if cpp_namespace is None else '%s::' % cpp_namespace
c.Sblock('std::string %sToString(%s enum_param) {' %
(maybe_namespace, classname))
c.Sblock('switch (enum_param) {')
for enum_value in self._type_helper.FollowRef(type_).enum_values:
name = enum_value.name
if 'camel_case_enum_to_string' in self._namespace.compiler_options:
name = enum_value.CamelName()
(c.Append('case %s: ' % self._type_helper.GetEnumValue(type_, enum_value))
.Append(' return "%s";' % name))
(c.Append('case %s:' % self._type_helper.GetEnumNoneValue(type_))
.Append(' return "";')
.Eblock('}')
.Append('NOTREACHED();')
.Append('return "";')
.Eblock('}')
)
return c
def _GenerateEnumFromString(self, cpp_namespace, type_):
"""Generates FromClassNameString() which gets an enum from its string
representation.
"""
c = Code()
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
if cpp_namespace is not None:
c.Append('// static')
maybe_namespace = '' if cpp_namespace is None else '%s::' % cpp_namespace
c.Sblock('%s%s %sParse%s(const std::string& enum_string) {' %
(maybe_namespace, classname, maybe_namespace, classname))
for _, enum_value in enumerate(
self._type_helper.FollowRef(type_).enum_values):
# This is broken up into all ifs with no else ifs because we get
# "fatal error C1061: compiler limit : blocks nested too deeply"
# on Windows.
(c.Append('if (enum_string == "%s")' % enum_value.name)
.Append(' return %s;' %
self._type_helper.GetEnumValue(type_, enum_value)))
(c.Append('return %s;' % self._type_helper.GetEnumNoneValue(type_))
.Eblock('}')
)
return c
def _GenerateCreateCallbackArguments(self,
function_scope,
callback):
"""Generate all functions to create Value parameters for a callback.
E.g for function "Bar", generate Bar::Results::Create
E.g for event "Baz", generate Baz::Create
function_scope: the function scope path, e.g. Foo::Bar for the function
Foo::Bar::Baz(). May be None if there is no function scope.
callback: the Function object we are creating callback arguments for.
"""
c = Code()
params = callback.params
c.Concat(self._GeneratePropertyFunctions(function_scope, params))
(c.Sblock('scoped_ptr<base::ListValue> %(function_scope)s'
'Create(%(declaration_list)s) {')
.Append('scoped_ptr<base::ListValue> create_results('
'new base::ListValue());')
)
declaration_list = []
for param in params:
declaration_list.append(cpp_util.GetParameterDeclaration(
param, self._type_helper.GetCppType(param.type_)))
c.Cblock(self._CreateValueFromType('create_results->Append(%s);',
param.name,
param.type_,
param.unix_name))
c.Append('return create_results.Pass();')
c.Eblock('}')
c.Substitute({
'function_scope': ('%s::' % function_scope) if function_scope else '',
'declaration_list': ', '.join(declaration_list),
'param_names': ', '.join(param.unix_name for param in params)
})
return c
def _GenerateEventNameConstant(self, event):
"""Generates a constant string array for the event name.
"""
c = Code()
c.Append('const char kEventName[] = "%s.%s";' % (
self._namespace.name, event.name))
return c
def _InitializePropertyToDefault(self, prop, dst):
"""Initialize a model.Property to its default value inside an object.
E.g for optional enum "state", generate dst->state = STATE_NONE;
dst: Type*
"""
c = Code()
underlying_type = self._type_helper.FollowRef(prop.type_)
if (underlying_type.property_type == PropertyType.ENUM and
prop.optional):
namespace_prefix = ('%s::' % underlying_type.namespace.unix_name
if underlying_type.namespace != self._namespace
else '')
c.Append('%s->%s = %s%s;' % (
dst,
prop.unix_name,
namespace_prefix,
self._type_helper.GetEnumNoneValue(prop.type_)))
return c
def _GenerateError(self, body):
"""Generates an error message pertaining to population failure.
E.g 'expected bool, got int'
"""
c = Code()
if not self._generate_error_messages:
return c
(c.Append('if (error->length())')
.Append(' error->append(UTF8ToUTF16("; "));')
.Append('error->append(UTF8ToUTF16(%s));' % body))
return c
def _GenerateParams(self, params):
"""Builds the parameter list for a function, given an array of parameters.
"""
if self._generate_error_messages:
params = list(params) + ['base::string16* error']
return ', '.join(str(p) for p in params)
def _GenerateArgs(self, args):
"""Builds the argument list for a function, given an array of arguments.
"""
if self._generate_error_messages:
args = list(args) + ['error']
return ', '.join(str(a) for a in args)
|
|
#!/usr/bin/env python
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import ecto
import ecto_test
from util import fail
import sys, time
print "Hardware concurrency is", ecto.hardware_concurrency()
eps = 0.05
def makeplasm():
plasm = ecto.Plasm()
ping = ecto_test.Ping("Ping")
sleep0 = ecto_test.Sleep("Sleep_0", seconds=0.1)
sleep1 = ecto_test.Sleep("Sleep_1", seconds=0.1)
plasm.connect(ping[:] >> sleep0[:], sleep0[:] >> sleep1[:])
return plasm
def do_test(fn):
def impl(Sched):
times = { ecto.schedulers.Singlethreaded : 1.0,
ecto.schedulers.Multithreaded : 0.6 }
print "*"*80, "\n", fn.__name__, Sched.__name__
p = makeplasm()
s = Sched(p)
t = times[Sched]
print "Expecting finish in", t, "seconds"
fn(s, times[Sched])
map(impl, ecto.test.schedulers)
def sync(s, ex):
t = time.time()
assert not s.running()
print "starting"
s.execute(niter=5)
dur = time.time() - t
print "done after", dur
assert dur > ex
assert dur < (ex + eps)
assert not s.running()
def synctwice(s, ex):
start_t = time.time()
assert not s.running()
print "starting"
s.execute(niter=5)
dur = time.time() - start_t
print "HALFWAY:", dur
assert dur > ex
assert dur < ex + eps
s.execute(niter=5)
dur = time.time() - start_t
print "SECONDTIME:", dur
assert dur > (ex*2)
assert dur < ((ex*2) + eps)
assert not s.running()
def ex_async_twice(s, ex):
s.execute_async(niter=5)
print "once..."
assert s.running()
t = time.time()
try:
print "twice..."
s.execute_async(niter=5)
fail("that should have thrown")
except ecto.EctoException, e:
print "okay, threw"
print "whee"
s.wait()
elapsed = time.time() - t
print "elapsed:", elapsed, "expected:", ex
assert elapsed > ex
assert elapsed < (ex + eps)
def ex_async_then_sync_throws(s, ex):
s.execute_async(niter=5)
print "once..."
assert s.running()
t = time.time()
try:
print "twice..."
s.execute(niter=5)
fail("that should have thrown")
except ecto.EctoException, e:
print "okay, threw"
print "whee"
s.wait()
elapsed = time.time() - t
print "elapsed:", elapsed, "expected:", ex
assert elapsed > ex
assert elapsed < (ex + eps)
def wait_on_nothing(s, ex):
stime = time.time()
assert not s.running()
s.wait()
assert not s.running()
etime = time.time()
print etime-stime
assert eps > etime-stime
def running_check(s, ex):
assert not s.running()
s.execute_async(niter=5)
assert s.running()
time.sleep(ex+eps)
assert not s.running()
def wait_check(s, ex):
print __name__, s
t = time.time()
s.execute_async(niter=5)
assert time.time() - t < ex
s.wait()
print time.time() - t > ex+eps # we might be multithreaded
assert not s.running()
do_test(wait_on_nothing)
do_test(ex_async_then_sync_throws)
do_test(ex_async_twice)
do_test(sync)
do_test(synctwice)
do_test(running_check)
do_test(wait_check)
# Verify that the multithreaded completes in multiples of two seconds
# from the time stop was called, not the initial start
def stoppable_multi():
hc = ecto.hardware_concurrency()
def makeplasm():
plasm = ecto.Plasm()
ping = ecto_test.Ping("Ping")
sleeps = [ecto_test.Sleep("Sleep_0", seconds=1.0/hc)
for x in range(hc)]
plasm.connect(ping[:] >> sleeps[0][:])
for i in range(1,hc-1):
print "i=", i
plasm.connect(sleeps[i][:] >> sleeps[i+1][:])
return plasm
p = makeplasm()
st = ecto.schedulers.Multithreaded(p)
st.execute_async()
time.sleep(1.3) # wait until we are in steady state
start = time.time()
st.stop()
st.wait()
elapsed = time.time() - start
print "elapsed multithreaded:", elapsed
# we'll be partially through an iteration that has just started
print "hc=", hc, "(hc-1.0)/hc=", ((hc-1.0)/hc)
assert elapsed >= (hc-1.0)/hc
assert elapsed <= (1.0 + eps)
st.execute_async()
time.sleep(1.0)
# this time the start is just before stop is called, not
# when execute was called
start = time.time()
st.stop()
st.wait()
elapsed = time.time() - start
mintime = (hc-1.0)/hc
maxtime = 1.0 + (1.0/hc)
print "elapsed multithreade:", elapsed, "expected min:", mintime, \
"expected max:", maxtime
assert elapsed >= mintime
assert elapsed <= maxtime
stoppable_multi()
#
# Verify that the singlethreaded completes in multiples of two seconds
#
def stoppable():
def makeplasm():
plasm = ecto.Plasm()
ping = ecto_test.Ping("Ping")
sleeps = [ecto_test.Sleep("Sleep_0", seconds=0.1)
for x in range(20)]
plasm.connect(ping[:] >> sleeps[0][:])
for i in range(1,19):
print "i=", i
plasm.connect(sleeps[i][:] >> sleeps[i+1][:])
return plasm
p = makeplasm()
st = ecto.schedulers.Singlethreaded(p)
start = time.time()
st.execute_async()
time.sleep(0.01)
st.stop()
st.wait()
elapsed = time.time() - start
print "elapsed singlethreaded:", elapsed
assert elapsed > 2.0
assert elapsed < 2.1
start = time.time()
st.execute_async()
time.sleep(1.0)
st.stop()
st.wait()
elapsed = time.time() - start
print "elapsed singlethreaded:", elapsed
assert elapsed > 2.0
assert elapsed < 2.1
stoppable()
|
|
import skimage.io # bug. need to import this before tensorflow
import skimage.transform # bug. need to import this before tensorflow
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.training import moving_averages
from config import Config
import datetime
import numpy as np
import os
import time
MOVING_AVERAGE_DECAY = 0.9997
BN_DECAY = MOVING_AVERAGE_DECAY
BN_EPSILON = 0.001
CONV_WEIGHT_DECAY = 0.00004
CONV_WEIGHT_STDDEV = 0.1
FC_WEIGHT_DECAY = 0.00004
FC_WEIGHT_STDDEV = 0.01
RESNET_VARIABLES = 'resnet_variables'
UPDATE_OPS_COLLECTION = 'resnet_update_ops' # must be grouped with training op
IMAGENET_MEAN_BGR = [103.062623801, 115.902882574, 123.151630838, ]
tf.app.flags.DEFINE_integer('input_size', 224, "input image size")
activation = tf.nn.relu
def inference(x, is_training,
num_classes=1000,
num_blocks=[3, 4, 6, 3], # defaults to 50-layer network
use_bias=False, # defaults to using batch norm
bottleneck=True):
c = Config()
c['bottleneck'] = bottleneck
c['is_training'] = tf.convert_to_tensor(is_training,
dtype='bool',
name='is_training')
c['ksize'] = 3
c['stride'] = 1
c['use_bias'] = use_bias
c['fc_units_out'] = num_classes
c['num_blocks'] = num_blocks
c['stack_stride'] = 2
with tf.variable_scope('scale1'):
c['conv_filters_out'] = 64
c['ksize'] = 7
c['stride'] = 2
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('scale2'):
x = _max_pool(x, ksize=3, stride=2)
c['num_blocks'] = num_blocks[0]
c['stack_stride'] = 1
c['block_filters_internal'] = 64
x = stack(x, c)
with tf.variable_scope('scale3'):
c['num_blocks'] = num_blocks[1]
c['block_filters_internal'] = 128
assert c['stack_stride'] == 2
x = stack(x, c)
with tf.variable_scope('scale4'):
c['num_blocks'] = num_blocks[2]
c['block_filters_internal'] = 256
x = stack(x, c)
with tf.variable_scope('scale5'):
c['num_blocks'] = num_blocks[3]
c['block_filters_internal'] = 512
x = stack(x, c)
# post-net
x = tf.reduce_mean(x, axis=[1, 2], name="avg_pool")
if num_classes != None:
with tf.variable_scope('fc'):
x = fc(x, c)
return x
# This is what they use for CIFAR-10 and 100.
# See Section 4.2 in http://arxiv.org/abs/1512.03385
def inference_small(x,
is_training,
num_blocks=3, # 6n+2 total weight layers will be used.
use_bias=False, # defaults to using batch norm
num_classes=10):
c = Config()
c['is_training'] = tf.convert_to_tensor(is_training,
dtype='bool',
name='is_training')
c['use_bias'] = use_bias
c['fc_units_out'] = num_classes
c['num_blocks'] = num_blocks
c['num_classes'] = num_classes
inference_small_config(x, c)
def inference_small_config(x, c):
c['bottleneck'] = False
c['ksize'] = 3
c['stride'] = 1
with tf.variable_scope('scale1'):
c['conv_filters_out'] = 16
c['block_filters_internal'] = 16
c['stack_stride'] = 1
x = conv(x, c)
x = bn(x, c)
x = activation(x)
x = stack(x, c)
with tf.variable_scope('scale2'):
c['block_filters_internal'] = 32
c['stack_stride'] = 2
x = stack(x, c)
with tf.variable_scope('scale3'):
c['block_filters_internal'] = 64
c['stack_stride'] = 2
x = stack(x, c)
# post-net
x = tf.reduce_mean(x, axis=[1, 2], name="avg_pool")
if c['num_classes'] != None:
with tf.variable_scope('fc'):
x = fc(x, c)
return x
def _imagenet_preprocess(rgb):
"""Changes RGB [0,1] valued image to BGR [0,255] with mean subtracted."""
red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb * 255.0)
bgr = tf.concat(axis=3, values=[blue, green, red])
bgr -= IMAGENET_MEAN_BGR
return bgr
def loss(logits, labels):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss_ = tf.add_n([cross_entropy_mean] + regularization_losses)
tf.summary.scalar('loss', loss_)
return loss_
def stack(x, c):
for n in range(c['num_blocks']):
s = c['stack_stride'] if n == 0 else 1
c['block_stride'] = s
with tf.variable_scope('block%d' % (n + 1)):
x = block(x, c)
return x
def block(x, c):
filters_in = x.get_shape()[-1]
# Note: filters_out isn't how many filters are outputed.
# That is the case when bottleneck=False but when bottleneck is
# True, filters_internal*4 filters are outputted. filters_internal is how many filters
# the 3x3 convs output internally.
m = 4 if c['bottleneck'] else 1
filters_out = m * c['block_filters_internal']
shortcut = x # branch 1
c['conv_filters_out'] = c['block_filters_internal']
if c['bottleneck']:
with tf.variable_scope('a'):
c['ksize'] = 1
c['stride'] = c['block_stride']
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('b'):
c['ksize'] = 3
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('c'):
c['conv_filters_out'] = filters_out
c['ksize'] = 1
assert c['stride'] == 1
x = conv(x, c)
x = bn(x, c)
else:
with tf.variable_scope('A'):
c['stride'] = c['block_stride']
assert c['ksize'] == 3
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('B'):
c['conv_filters_out'] = filters_out
assert c['ksize'] == 3
assert c['stride'] == 1
x = conv(x, c)
x = bn(x, c)
with tf.variable_scope('shortcut'):
if filters_out != filters_in or c['block_stride'] != 1:
c['ksize'] = 1
c['stride'] = c['block_stride']
c['conv_filters_out'] = filters_out
shortcut = conv(shortcut, c)
shortcut = bn(shortcut, c)
return activation(x + shortcut)
def bn(x, c):
x_shape = x.get_shape()
params_shape = x_shape[-1:]
if c['use_bias']:
bias = _get_variable('bias', params_shape,
initializer=tf.zeros_initializer())
return x + bias
axis = list(range(len(x_shape) - 1))
beta = _get_variable('beta',
params_shape,
initializer=tf.zeros_initializer())
gamma = _get_variable('gamma',
params_shape,
initializer=tf.ones_initializer())
moving_mean = _get_variable('moving_mean',
params_shape,
initializer=tf.zeros_initializer(),
trainable=False)
moving_variance = _get_variable('moving_variance',
params_shape,
initializer=tf.ones_initializer(),
trainable=False)
# These ops will only be preformed when training.
mean, variance = tf.nn.moments(x, axis)
update_moving_mean = moving_averages.assign_moving_average(moving_mean,
mean, BN_DECAY)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, BN_DECAY)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
mean, variance = control_flow_ops.cond(
c['is_training'], lambda: (mean, variance),
lambda: (moving_mean, moving_variance))
x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)
#x.set_shape(inputs.get_shape()) ??
return x
def fc(x, c):
num_units_in = x.get_shape()[1]
num_units_out = c['fc_units_out']
weights_initializer = tf.truncated_normal_initializer(
stddev=FC_WEIGHT_STDDEV)
weights = _get_variable('weights',
shape=[num_units_in, num_units_out],
initializer=weights_initializer,
weight_decay=FC_WEIGHT_STDDEV)
biases = _get_variable('biases',
shape=[num_units_out],
initializer=tf.zeros_initializer())
x = tf.nn.xw_plus_b(x, weights, biases)
return x
def _get_variable(name,
shape,
initializer,
weight_decay=0.0,
dtype='float',
trainable=True):
"A little wrapper around tf.get_variable to do weight decay and add to"
"resnet collection"
if weight_decay > 0:
regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
else:
regularizer = None
collections = [tf.GraphKeys.GLOBAL_VARIABLES, RESNET_VARIABLES]
return tf.get_variable(name,
shape=shape,
initializer=initializer,
dtype=dtype,
regularizer=regularizer,
collections=collections,
trainable=trainable)
def conv(x, c):
ksize = c['ksize']
stride = c['stride']
filters_out = c['conv_filters_out']
filters_in = x.get_shape()[-1]
shape = [ksize, ksize, filters_in, filters_out]
initializer = tf.truncated_normal_initializer(stddev=CONV_WEIGHT_STDDEV)
weights = _get_variable('weights',
shape=shape,
dtype='float',
initializer=initializer,
weight_decay=CONV_WEIGHT_DECAY)
return tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME')
def _max_pool(x, ksize=3, stride=2):
return tf.nn.max_pool(x,
ksize=[1, ksize, ksize, 1],
strides=[1, stride, stride, 1],
padding='SAME')
|
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# Predicting Patient Retention Rates
# <markdowncell>
# Here I am looking for a simple method to predict which patients are likely to return. My idea is to look at the average time between visits across all patients and then across this specific patient.
# <codecell>
from __future__ import division
import os, os.path
import sys
import pandas as pd
import numpy as np
sys.path.append('/home/will/HIVReportGen/AnalysisCode/')
sys.path.append('/home/will/PySeqUtils/')
sys.path.append('/home/will/PatientPicker/')
import LoadingTools
#os.chdir('/home/will/HIVVariation/')
# <codecell>
store = pd.HDFStore('/home/will/HIVReportGen/Data/SplitRedcap/2013-01-16/EntireCohort.hdf')
redcap_data = store['redcap']
seq_data = store['seq_data']
t = redcap_data['Event Name'].dropna().apply(lambda x: x.split(' - ')[0])
t.unique()
redcap_data['VisitNum'] = redcap_data['Patient visit number'].combine_first(t)
redcap_data['VisitNum'][redcap_data['VisitNum']=='A03'] = 'R03'
# <codecell>
fields = ['Patient ID', 'VisitNum', 'Date of visit']
data = redcap_data[fields].rename(columns = {'Date of visit':'Date'})
# <headingcell level=2>
# Data Description
# <markdowncell>
# Here I define my return or not-return patients. In this case I'm defining every patient that _actually_ returned as True and every patient that has been more the 365\*2 days since thier last visit (using 1/16/2013 as the 'current date') as False. If its been less then two years the I exclude that visit from the analysis.
# <codecell>
from datetime import datetime
def get_diff_days(inser):
return np.diff(inser)/(1e9*60*60*24)
def get_visit_diffs(inpat):
nvisit = pd.DataFrame({
'Date':[datetime(2013,1,16)],
'VisitNum':['RN']
})
ndata = pd.concat([inpat, nvisit], axis = 0, ignore_index=True)
ndata.sort('Date', inplace=True)
ndata['DiffDate'] = pd.rolling_apply(ndata['Date'], 2, get_diff_days)
return ndata.set_index('VisitNum').drop('Patient ID', axis = 1)
odata = data.groupby('Patient ID').apply(get_visit_diffs).dropna()
print odata.head(n=30)
# <codecell>
from scipy.stats import norm
cohort_level_data = odata.groupby(level=1)['DiffDate'].agg({'std':'std',
'mean':'mean',
'count':'count'})
print cohort_level_data
# <markdowncell>
# Above is a table of the average times between visits. This only includes patients that _actually returned_ for the R0X visit. You can see that for the first few visits the average is well above the 6-month goal but it levels out around R05.
#
# <headingcell level=2>
# Prediction
# <markdowncell>
# Here I'm builing a cohort-level 'Surivial Function'. In this case I'm using mean and std from between-visit times for all patients at each visit. I'm assuming a Gaussian Distribution. Then I build a Patient-Level 'Survival Function' based on thier between-visit times. For patients with less than 3 visits I built a SF from all patients.
# <codecell>
cohort_norm = {}
for key, row in cohort_level_data.iterrows():
cohort_norm[key] = norm(loc = row['mean'], scale = row['std'])
# <codecell>
pat_mu = odata['DiffDate'].drop('RN', axis=0, level=1).mean()
pat_std = odata['DiffDate'].drop('RN', axis=0, level=1).std()
def add_sf_data(inpat):
if len(inpat) > 3:
mu = inpat['DiffDate'].mean()
st = inpat['DiffDate'].std()
obj = norm(loc=mu, scale=st)
else:
obj = norm(loc=pat_mu, scale=pat_std)
inpat['CohortSF'] = np.nan
inpat['PatSF'] = np.nan
inpat['Returned'] = True
inpat['Returned'].iloc[-1] = np.nan if inpat['DiffDate'].iloc[-1] < 365*2 else False
for key, row in inpat.iterrows():
inpat['CohortSF'].ix[key] = cohort_norm[key[1]].sf(row['DiffDate'])
inpat['PatSF'].ix[key] = obj.sf(row['DiffDate'])
return inpat
ndata = odata.groupby(level=0).apply(add_sf_data)
print ndata[['DiffDate', 'CohortSF', 'PatSF', 'Returned']].head(n = 30)
# <markdowncell>
# Guessing at how to combine these two Survival Functions is difficult. As such I'm using the SKLearn package to build a DecisionTree and a Naive-Bayes predictor using ONLY THESE PARAMETERS. This has the advantage of not directly biasing any future selection from these results. I'm also comparing this to a simple DummyClassifier which will guess that all patients return.
# <codecell>
from sklearn.metrics import auc_score, zero_one_loss, roc_curve
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.cross_validation import cross_val_score, Bootstrap
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import AdaBoostClassifier
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import LogisticRegression
X = ndata.dropna()[['CohortSF', 'PatSF']].values
y = ndata.dropna()['Returned'].values
# <codecell>
import matplotlib.pyplot as plt
from collections import defaultdict
classifiers = [(DecisionTreeClassifier(), 'DecisionTree', 'r'),
(GaussianNB(), 'NaiveBayes', 'g'),
(AdaBoostClassifier(), 'Adaboost', 'c'),
(LogisticRegression(), 'Logistic', 'k'),
(DummyClassifier(), 'Dummy', 'b')]
plt.figure(figsize = (10,10))
losses = defaultdict(float)
nreps = 5
for train, test in Bootstrap(len(y), nreps):
for pred, name, color in classifiers:
fitted = pred.fit(X[train, :], y[train])
pred_y = fitted.predict_proba(X[test, :])
fpr, tpr, _ = roc_curve(y[test], pred_y[:,1])
plt.plot(fpr, tpr, color, label=name)
losses[name] += zero_one_loss(y[test], fitted.predict(X[test, :]))/nreps
plt.xlabel('False-Positive-Rate')
plt.ylabel('True-Positve-Rate')
plt.legend(['DecisionTree', 'NaiveBayes', 'Adaboost', 'Logistic', 'Dummy'], 'lower right');
# <markdowncell>
# The ROC curve is a commno method to look at the effectiveness of a classifier. It measures the trade-off between True-Positves and False-Negatives. The larger the Area Under the Curve the better the score. A random coin flip would have an area of 0.5 (the blue line).
# <codecell>
scores = numpy.array([losses[name] for name in ['DecisionTree', 'NaiveBayes', 'Adaboost', 'Logistic', 'Dummy']])
plt.bar([1, 2, 3,4,5], scores, width = 0.5)
plt.ylabel('%Miss-classified')
plt.xticks([1.25,2.25,3.25,4.25,5.25], ['DecisionTree', 'NaiveBayes', 'Adaboost', 'Logistic', 'Dummy']);
# <markdowncell>
# This graph shows the effectiveness of each of the three methods. The y-axis represents the fraction of miss-classified samples (averaged over 5 trials). We can see that the DecisionTree has only a 3% likelihood of mis-classifying a patient as return or not return. We can use this classifier to prioritize which patients we call for return visits.
# <codecell>
def expand_sf_data(inpat):
dates = pd.date_range('1/1/2013', periods = 30, freq = 'M')
if len(inpat) > 3:
mu = inpat['DiffDate'].mean()
st = inpat['DiffDate'].std()
obj = norm(loc=mu, scale=st)
else:
obj = norm(loc=pat_mu, scale=pat_std)
outdata = pd.DataFrame(columns = ['CohortSF', 'PatSF', 'LastVisit', 'DiffDays'],
index = pd.Index(dates, name = 'Date'))
try:
ldate = inpat.iloc[-2]['Date']
lvisit = inpat.index[-2][1]
except IndexError:
lvisit = 'R01'
ldate = inpat.iloc[0]['Date']
outdata['LastVisit'] = lvisit
for date in dates:
diff_date = (date - ldate).days
outdata.ix[date]['CohortSF'] = cohort_norm[lvisit].sf(diff_date)
outdata.ix[date]['PatSF'] = obj.sf(diff_date)
outdata.ix[date]['DiffDays'] = diff_date
return outdata
edata = odata.groupby(level=0).apply(expand_sf_data)
# <codecell>
X = ndata.dropna()[['CohortSF', 'PatSF']].values
y = ndata.dropna()['Returned'].values
predictor = AdaBoostClassifier().fit(X,y)
edata['LikelyReturn'] = predictor.predict(edata[['CohortSF', 'PatSF']].values)
# <codecell>
date_count = edata.groupby(level = 'Date')['LikelyReturn'].sum()
date_count.plot(figsize=(15,10))
plt.title('Returnable Cohort Size')
plt.xlabel('Starting Date')
plt.ylabel('Patients Likely To Return')
# <codecell>
print date_count.diff().mean(), 'Patients lost per month wasted!'
# <markdowncell>
# The above figure shows the number of patients that are predicted to return if called given a particular starting date. We can see that the longer we wait the less patients we can keep for 'longitudinal' natures. From this graph we can estimate that we lose ~1.5 patients per week that we don't start!
# <headingcell level=2>
# Make Call-Back Sheets
# <markdowncell>
# Here I want to make a set of sheets for the clinic to use to re-call patients. For each month I'll make a list (sorted by likelihood) of patients who are likely to return. I'll also mark which patients have 3+ which should be seen by the neurologist.
# <codecell>
pred_pat_data = edata.swaplevel(0, 1)
pred_pat_data['ProbReturn'] = predictor.predict_proba(pred_pat_data[['CohortSF', 'PatSF']].values)[:,1]
out_file = '/home/will/RedcapQC/CallbackPats/HIV_DrexelMed_GeneticAnalysisCohort_recall_list.xlsx'
writer = pd.ExcelWriter(out_file)
sheet_name = '%i-%i'
for tdate, rows in pred_pat_data.groupby(level=0):
if tdate > datetime.now():
srows = rows.sort(['ProbReturn', 'LastVisit'], ascending=False)
srows['Neuro'] = ''
srows['Neuro'][srows['LastVisit']>='R03'] = 'Prefered Neuropsych visit'
rem_days = srows['DiffDays'] % 180
month_mask = (rem_days < 31)
tmp = srows[['Neuro']][month_mask].reset_index()
print tdate, month_mask.sum()
ndate = tdate.to_datetime()
tmp[['Patient ID', 'Neuro']].to_excel(writer,
sheet_name % (ndate.year, ndate.month),
index=False)
writer.save()
# <codecell>
pd.DataFrame().to_excel?
# <codecell>
pd.DataFrame().to_excel
# <codecell>
tmp[['Patient ID', 'Neuro']].to_excel
# <codecell>
data['Date'].map(lambda x: x.month).value_counts()
# <codecell>
|
|
# Copyright 2012 Grid Dynamics
# Copyright 2013 Inktank Storage, Inc.
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import tpool
from six.moves import urllib
try:
import rados
import rbd
except ImportError:
rados = None
rbd = None
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
from nova.compute import task_states
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import utils
from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
class RbdProxy(object):
"""A wrapper around rbd.RBD class instance to avoid blocking of process.
Offloads all calls to rbd.RBD class methods to native OS threads, so that
we do not block the whole process while executing the librbd code.
"""
def __init__(self):
self._rbd = tpool.Proxy(rbd.RBD())
def __getattr__(self, attr):
return getattr(self._rbd, attr)
class RBDVolumeProxy(object):
"""Context manager for dealing with an existing rbd volume.
This handles connecting to rados and opening an ioctx automatically, and
otherwise acts like a librbd Image object.
The underlying librados client and ioctx can be accessed as the attributes
'client' and 'ioctx'.
"""
def __init__(self, driver, name, pool=None, snapshot=None,
read_only=False):
client, ioctx = driver._connect_to_rados(pool)
try:
snap_name = snapshot.encode('utf8') if snapshot else None
self.volume = tpool.Proxy(rbd.Image(ioctx, name.encode('utf8'),
snapshot=snap_name,
read_only=read_only))
except rbd.ImageNotFound:
with excutils.save_and_reraise_exception():
LOG.debug("rbd image %s does not exist", name)
driver._disconnect_from_rados(client, ioctx)
except rbd.Error:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("error opening rbd image %s"), name)
driver._disconnect_from_rados(client, ioctx)
self.driver = driver
self.client = client
self.ioctx = ioctx
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
try:
self.volume.close()
finally:
self.driver._disconnect_from_rados(self.client, self.ioctx)
def __getattr__(self, attrib):
return getattr(self.volume, attrib)
class RADOSClient(object):
"""Context manager to simplify error handling for connecting to ceph."""
def __init__(self, driver, pool=None):
self.driver = driver
self.cluster, self.ioctx = driver._connect_to_rados(pool)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.driver._disconnect_from_rados(self.cluster, self.ioctx)
@property
def features(self):
features = self.cluster.conf_get('rbd_default_features')
if ((features is None) or (int(features) == 0)):
features = rbd.RBD_FEATURE_LAYERING
return int(features)
class RBDDriver(object):
def __init__(self, pool, ceph_conf, rbd_user):
self.pool = pool.encode('utf8')
# NOTE(angdraug): rados.Rados fails to connect if ceph_conf is None:
# https://github.com/ceph/ceph/pull/1787
self.ceph_conf = ceph_conf.encode('utf8') if ceph_conf else ''
self.rbd_user = rbd_user.encode('utf8') if rbd_user else None
if rbd is None:
raise RuntimeError(_('rbd python libraries not found'))
def _connect_to_rados(self, pool=None):
client = rados.Rados(rados_id=self.rbd_user,
conffile=self.ceph_conf)
try:
client.connect()
pool_to_open = pool or self.pool
ioctx = client.open_ioctx(pool_to_open.encode('utf-8'))
return client, ioctx
except rados.Error:
# shutdown cannot raise an exception
client.shutdown()
raise
def _disconnect_from_rados(self, client, ioctx):
# closing an ioctx cannot raise an exception
ioctx.close()
client.shutdown()
def ceph_args(self):
"""List of command line parameters to be passed to ceph commands to
reflect RBDDriver configuration such as RBD user name and location
of ceph.conf.
"""
args = []
if self.rbd_user:
args.extend(['--id', self.rbd_user])
if self.ceph_conf:
args.extend(['--conf', self.ceph_conf])
return args
def get_mon_addrs(self):
args = ['ceph', 'mon', 'dump', '--format=json'] + self.ceph_args()
out, _ = utils.execute(*args)
lines = out.split('\n')
if lines[0].startswith('dumped monmap epoch'):
lines = lines[1:]
monmap = jsonutils.loads('\n'.join(lines))
addrs = [mon['addr'] for mon in monmap['mons']]
hosts = []
ports = []
for addr in addrs:
host_port = addr[:addr.rindex('/')]
host, port = host_port.rsplit(':', 1)
hosts.append(host.strip('[]'))
ports.append(port)
return hosts, ports
def parse_url(self, url):
prefix = 'rbd://'
if not url.startswith(prefix):
reason = _('Not stored in rbd')
raise exception.ImageUnacceptable(image_id=url, reason=reason)
pieces = [urllib.parse.unquote(piece)
for piece in url[len(prefix):].split('/')]
if '' in pieces:
reason = _('Blank components')
raise exception.ImageUnacceptable(image_id=url, reason=reason)
if len(pieces) != 4:
reason = _('Not an rbd snapshot')
raise exception.ImageUnacceptable(image_id=url, reason=reason)
return pieces
def get_fsid(self):
with RADOSClient(self) as client:
return client.cluster.get_fsid()
def is_cloneable(self, image_location, image_meta):
url = image_location['url']
try:
fsid, pool, image, snapshot = self.parse_url(url)
except exception.ImageUnacceptable as e:
LOG.debug('not cloneable: %s', e)
return False
if self.get_fsid() != fsid:
reason = '%s is in a different ceph cluster' % url
LOG.debug(reason)
return False
if image_meta.get('disk_format') != 'raw':
reason = ("rbd image clone requires image format to be "
"'raw' but image {0} is '{1}'").format(
url, image_meta.get('disk_format'))
LOG.debug(reason)
return False
# check that we can read the image
try:
return self.exists(image, pool=pool, snapshot=snapshot)
except rbd.Error as e:
LOG.debug('Unable to open image %(loc)s: %(err)s' %
dict(loc=url, err=e))
return False
def clone(self, image_location, dest_name, dest_pool=None):
_fsid, pool, image, snapshot = self.parse_url(
image_location['url'])
LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to '
'%(dest_pool)s/%(dest_name)s',
dict(pool=pool, img=image, snap=snapshot,
dest_pool=dest_pool, dest_name=dest_name))
with RADOSClient(self, str(pool)) as src_client:
with RADOSClient(self, dest_pool) as dest_client:
try:
RbdProxy().clone(src_client.ioctx,
image.encode('utf-8'),
snapshot.encode('utf-8'),
dest_client.ioctx,
str(dest_name),
features=src_client.features)
except rbd.PermissionError:
raise exception.Forbidden(_('no write permission on '
'storage pool %s') % dest_pool)
def size(self, name):
with RBDVolumeProxy(self, name, read_only=True) as vol:
return vol.size()
def resize(self, name, size):
"""Resize RBD volume.
:name: Name of RBD object
:size: New size in bytes
"""
LOG.debug('resizing rbd image %s to %d', name, size)
with RBDVolumeProxy(self, name) as vol:
vol.resize(size)
def parent_info(self, volume, pool=None):
"""Returns the pool, image and snapshot name for the parent of an
RBD volume.
:volume: Name of RBD object
:pool: Name of pool
"""
try:
with RBDVolumeProxy(self, str(volume), pool=pool,
read_only=True) as vol:
return vol.parent_info()
except rbd.ImageNotFound:
raise exception.ImageUnacceptable(_("no usable parent snapshot "
"for volume %s") % volume)
def flatten(self, volume, pool=None):
""""Flattens" a snapshotted image with the parents' data,
effectively detaching it from the parent.
:volume: Name of RBD object
:pool: Name of pool
"""
LOG.debug('flattening %(pool)s/%(vol)s', dict(pool=pool, vol=volume))
with RBDVolumeProxy(self, str(volume), pool=pool) as vol:
vol.flatten()
def exists(self, name, pool=None, snapshot=None):
try:
with RBDVolumeProxy(self, name,
pool=pool,
snapshot=snapshot,
read_only=True):
return True
except rbd.ImageNotFound:
return False
def remove_image(self, name):
"""Remove RBD volume
:name: Name of RBD volume
"""
with RADOSClient(self, self.pool) as client:
try:
RbdProxy().remove(client.ioctx, name)
except rbd.ImageNotFound:
LOG.warning(_LW('image %(volume)s in pool %(pool)s can not be '
'found, failed to remove'),
{'volume': name, 'pool': self.pool})
except rbd.ImageHasSnapshots:
LOG.error(_LE('image %(volume)s in pool %(pool)s has '
'snapshots, failed to remove'),
{'volume': name, 'pool': self.pool})
def import_image(self, base, name):
"""Import RBD volume from image file.
Uses the command line import instead of librbd since rbd import
command detects zeroes to preserve sparseness in the image.
:base: Path to image file
:name: Name of RBD volume
"""
args = ['--pool', self.pool, base, name]
# Image format 2 supports cloning,
# in stable ceph rbd release default is not 2,
# we need to use it explicitly.
args += ['--image-format=2']
args += self.ceph_args()
utils.execute('rbd', 'import', *args)
def _destroy_volume(self, client, volume, pool=None):
"""Destroy an RBD volume, retrying as needed.
"""
def _cleanup_vol(ioctx, volume, retryctx):
try:
RbdProxy().remove(ioctx, volume)
raise loopingcall.LoopingCallDone(retvalue=False)
except rbd.ImageHasSnapshots:
self.remove_snap(volume, libvirt_utils.RESIZE_SNAPSHOT_NAME,
ignore_errors=True)
except (rbd.ImageBusy, rbd.ImageHasSnapshots):
LOG.warning(_LW('rbd remove %(volume)s in pool %(pool)s '
'failed'),
{'volume': volume, 'pool': self.pool})
retryctx['retries'] -= 1
if retryctx['retries'] <= 0:
raise loopingcall.LoopingCallDone()
# NOTE(danms): We let it go for ten seconds
retryctx = {'retries': 10}
timer = loopingcall.FixedIntervalLoopingCall(
_cleanup_vol, client.ioctx, volume, retryctx)
timed_out = timer.start(interval=1).wait()
if timed_out:
# NOTE(danms): Run this again to propagate the error, but
# if it succeeds, don't raise the loopingcall exception
try:
_cleanup_vol(client.ioctx, volume, retryctx)
except loopingcall.LoopingCallDone:
pass
def cleanup_volumes(self, instance):
with RADOSClient(self, self.pool) as client:
def belongs_to_instance(disk):
# NOTE(nic): On revert_resize, the cleanup steps for the root
# volume are handled with an "rbd snap rollback" command,
# and none of this is needed (and is, in fact, harmful) so
# filter out non-ephemerals from the list
if instance.task_state == task_states.RESIZE_REVERTING:
return (disk.startswith(instance.uuid) and
disk.endswith('disk.local'))
else:
return disk.startswith(instance.uuid)
volumes = RbdProxy().list(client.ioctx)
for volume in filter(belongs_to_instance, volumes):
self._destroy_volume(client, volume)
def get_pool_info(self):
with RADOSClient(self) as client:
stats = client.cluster.get_cluster_stats()
return {'total': stats['kb'] * units.Ki,
'free': stats['kb_avail'] * units.Ki,
'used': stats['kb_used'] * units.Ki}
def create_snap(self, volume, name, pool=None, protect=False):
"""Create a snapshot of an RBD volume.
:volume: Name of RBD object
:name: Name of snapshot
:pool: Name of pool
:protect: Set the snapshot to "protected"
"""
LOG.debug('creating snapshot(%(snap)s) on rbd image(%(img)s)',
{'snap': name, 'img': volume})
with RBDVolumeProxy(self, str(volume), pool=pool) as vol:
vol.create_snap(name)
if protect and not vol.is_protected_snap(name):
vol.protect_snap(name)
def remove_snap(self, volume, name, ignore_errors=False, pool=None,
force=False):
"""Removes a snapshot from an RBD volume.
:volume: Name of RBD object
:name: Name of snapshot
:ignore_errors: whether or not to log warnings on failures
:pool: Name of pool
:force: Remove snapshot even if it is protected
"""
with RBDVolumeProxy(self, str(volume), pool=pool) as vol:
if name in [snap.get('name', '') for snap in vol.list_snaps()]:
if vol.is_protected_snap(name):
if force:
vol.unprotect_snap(name)
elif not ignore_errors:
LOG.warning(_LW('snapshot(%(name)s) on rbd '
'image(%(img)s) is protected, '
'skipping'),
{'name': name, 'img': volume})
return
LOG.debug('removing snapshot(%(name)s) on rbd image(%(img)s)',
{'name': name, 'img': volume})
vol.remove_snap(name)
elif not ignore_errors:
LOG.warning(_LW('no snapshot(%(name)s) found on rbd '
'image(%(img)s)'),
{'name': name, 'img': volume})
def rollback_to_snap(self, volume, name):
"""Revert an RBD volume to its contents at a snapshot.
:volume: Name of RBD object
:name: Name of snapshot
"""
with RBDVolumeProxy(self, volume) as vol:
if name in [snap.get('name', '') for snap in vol.list_snaps()]:
LOG.debug('rolling back rbd image(%(img)s) to '
'snapshot(%(snap)s)', {'snap': name, 'img': volume})
vol.rollback_to_snap(name)
else:
raise exception.SnapshotNotFound(snapshot_id=name)
def destroy_volume(self, volume, pool=None):
"""A one-shot version of cleanup_volumes()
"""
with RADOSClient(self, pool) as client:
self._destroy_volume(client, volume)
|
|
#Copyright 2009 Almero Gouws, <14366037@sun.ac.za>
"""
This module supplies the classes used to implement different types
of cliques.
"""
__docformat__ = 'restructuredtext'
import numpy as np
import potentials
import general
class clique(object):
"""
A clique with an attached discete potential.
"""
def __init__(self, id_num=None, domain=None, node_sizes=None, T=None):
"""
Creates and initializes a clique object.
Parameters
----------
id_num : Integer
A identifier number for this clique. A tool to identify it among
a list of cliques. It is best to ensure that each clique object
has a unique identifier.
domain: List of integers
The list of nodes that this clique encompasses.
model: Graphical Model
T : Numpy array
The look up table of discrete probabalities assigned to this
clique.
"""
"""Check if this is supposed to be a blank clique"""
if (domain == None) and (node_sizes == None):
self.pot = None
self.unobserved_pot = None
else:
"""Set the identifier and the domain"""
self.id = id_num
self.domain = domain[:]
"""
Set the unobserved potential of the clique, this potential
will not be changed in any of the inference algorithm's, and
can only be changed explicitly. It is used to initialize the
observed potential based on any observed evidence.
"""
self.unobserved_pot = potentials.DiscretePotential(domain, node_sizes, T)
"""
This potential will be changed by entering evidence and running
inference algorithms.
"""
self.pot = self.unobserved_pot.copy()
self.node_sizes = node_sizes
"""Initialize expected sufficient statistics to 1"""
self.ess = np.array([1])
"""
The dictionary nbrs stores the I.D.'s of the cliques that are
neighbours to this clique, these are used as keys to find the
variable nodes that seperate this clique from its neighbours.
Therefore if clique i is a nighbour of this clique, and
self.nbrs[i] = [3, 5], then the variable nodes with ID's
3 and 5 seperate clique i from this clique.
"""
self.nbrs = dict()
def enter_evidence(self, evidence, maximize=False):
"""
Enter observed evidence into this clique's working potential.
Parameters
----------
evidence: List
A list of observed values for the nodes in this clique.
[] represents a hidden node.
"""
"""Reinitialize the working potential with the unobserved potential"""
self.pot = self.unobserved_pot.copy()
"""
If the potential is being used in a max-sum algorithm, log the
working potential.
"""
if maximize:
self.pot.T = np.log(self.pot.T)
"""Enter the evidence to the working potential"""
self.pot.enter_evidence(evidence)
def init_sep_pots(self, node_sizes, onodes, max_sum=False):
"""
Intialize the seperator potentials, which are the messages stored
at the variable nodes, before being sent to the cliques.
Parameters
----------
node_sizes: Array
A list of the sizes of each node in the model. If sizes[2] = 10,
then node 2 can assume 1 of 10 different states.
onodes: List
A list of all the observed nodes in the model.
max_sum: Bool
Max_sum is true, if the max_sum algorithm is going to be used
on this clique, or false otherwise. It indicates whether the
potentials must be initialized to ones (for sum-product),
or zeros (for max-sum). Since max-sum uses log's to evaluate the
maximum likely configuration.
"""
for i in self.nbrs.iterkeys():
"""Create intial potential object"""
domain = self.nbrs[i][0]
# Determine which potential type to create
_node_sizes = node_sizes.copy()
if len(onodes) != 0:
_node_sizes[onodes] = 1
sep_pot = potentials.DiscretePotential(domain, _node_sizes[domain])
sep_pot.observed_domain = onodes
"""
If this clique is being used in a max-sum algorithm,
initialize the tables message to zero instead of one.
"""
if max_sum == True:
sep_pot.T = sep_pot.T * 0
"""Assign potential object to the neighbour"""
self.nbrs[i][1] = sep_pot
def reset_ess(self):
"""
Reset the expected sufficient statistics for this clique.
"""
self.ess = np.zeros((1, np.prod(self.unobserved_pot.T.shape)))
def update_ess(self, sample, expected_vals, node_sizes):
"""
Update the expected sufficient statistics for this clique.
Parameters
----------
sample: List
A partially observed sample of the all the nodes in the model
this clique is part of. sample[i] = [] if node i in unobserved.
expected_vals: marginal
A marginal object containing the expected values for any unobserved
nodes in this clique.
node_sizes: Array
A list of the sizes of each node in the model. If sizes[2] = 10,
then node 2 can assume 1 of 10 different states.
"""
[hidden, observed] = general.determine_observed(sample)
if general.issubset(np.array(self.domain), np.array(hidden)):
"""
If the entire domain of the clique was unobserved in
the last sample. Then the marginal over the cliques domain will
be just the cliques entire potential. Therefore we can add this
directly to the cliques expected sufficient statistics.
"""
self.ess = self.ess + expected_vals.T.flatten()
else:
"""
If any part of the cliques domain was observed, the expected values
for the observed domain has been marginalized out. Therefore
we need to pump the marginal up to its correct dimensions based on
the observed evidence, and place the observed values where the
'expected' values were.
"""
expected_vals.add_ev_to_dmarginal(sample, node_sizes)
"""
Add the new values to the cliques expected sufficient statistics.
"""
self.ess = self.ess + expected_vals.T.flatten()
def maximize_params(self):
"""
Maximize the parameters from the expected sufficent statistics.
"""
ess = np.array(self.ess).reshape(self.unobserved_pot.T.shape)
self.unobserved_pot.T = general.mk_stochastic(ess)
def copy(self):
"""
Creates am exact copy of this clique.
"""
copy_clq = clique()
copy_clq.id = self.id
copy_clq.domain = self.domain[:]
copy_clq.pot = self.pot.copy()
copy_clq.unobserved_pot = self.unobserved_pot.copy()
copy_clq.ess = self.ess.copy()
copy_clq.nbrs = dict()
for nbr in self.nbrs:
copy_clq.nbrs[nbr] = [[], []]
copy_clq.nbrs[nbr][0] = self.nbrs[nbr][0]
if self.nbrs[nbr][1] != None:
copy_clq.nbrs[nbr][1] = self.nbrs[nbr][1].copy()
return copy_clq
|
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
from pyro.contrib.mue.statearrangers import Profile, mg2k
def simpleprod(lst):
# Product of list of scalar tensors, as numpy would do it.
if len(lst) == 0:
return torch.tensor(1.0)
else:
return torch.prod(torch.cat([elem[None] for elem in lst]))
@pytest.mark.parametrize("M", [2, 20])
@pytest.mark.parametrize("batch_size", [None, 5])
@pytest.mark.parametrize("substitute", [False, True])
def test_profile_alternate_imp(M, batch_size, substitute):
# --- Setup random model. ---
pf_arranger = Profile(M)
u1 = torch.rand((M + 1, 3))
u1[M, :] = 0 # Assume u_{M+1, j} = 0 for j in {0, 1, 2} in Eqn. S40.
u = torch.cat([(1 - u1)[:, :, None], u1[:, :, None]], dim=2)
r1 = torch.rand((M + 1, 3))
r1[M, :] = 1 # Assume r_{M+1, j} = 1 for j in {0, 1, 2} in Eqn. S40.
r = torch.cat([(1 - r1)[:, :, None], r1[:, :, None]], dim=2)
s = torch.rand((M, 4))
s = s / torch.sum(s, dim=1, keepdim=True)
c = torch.rand((M + 1, 4))
c = c / torch.sum(c, dim=1, keepdim=True)
if batch_size is not None:
s = torch.rand((batch_size, M, 4))
s = s / torch.sum(s, dim=2, keepdim=True)
u1 = torch.rand((batch_size, M + 1, 3))
u1[:, M, :] = 0
u = torch.cat([(1 - u1)[:, :, :, None], u1[:, :, :, None]], dim=3)
# Compute forward pass of state arranger to get HMM parameters.
# Don't use dimension M, assumed fixed by statearranger.
if substitute:
ll = torch.rand((4, 5))
ll = ll / torch.sum(ll, dim=1, keepdim=True)
a0ln, aln, eln = pf_arranger.forward(
torch.log(s),
torch.log(c),
torch.log(r[:-1, :]),
torch.log(u[..., :-1, :, :]),
torch.log(ll),
)
else:
a0ln, aln, eln = pf_arranger.forward(
torch.log(s),
torch.log(c),
torch.log(r[:-1, :]),
torch.log(u[..., :-1, :, :]),
)
# - Remake HMM parameters to check. -
# Here we implement Equation S40 from the MuE paper
# (https://www.biorxiv.org/content/10.1101/2020.07.31.231381v1.full.pdf)
# more directly, iterating over all the indices of the transition matrix
# and initial transition vector.
K = 2 * M + 1
if batch_size is None:
batch_dim_size = 1
r1 = r1.unsqueeze(0)
u1 = u1.unsqueeze(0)
s = s.unsqueeze(0)
c = c.unsqueeze(0)
if substitute:
ll = ll.unsqueeze(0)
else:
batch_dim_size = batch_size
r1 = r1[None, :, :] * torch.ones([batch_size, 1, 1])
c = c[None, :, :] * torch.ones([batch_size, 1, 1])
if substitute:
ll = ll.unsqueeze(0)
expected_a = torch.zeros((batch_dim_size, K, K))
expected_a0 = torch.zeros((batch_dim_size, K))
expected_e = torch.zeros((batch_dim_size, K, 4))
for b in range(batch_dim_size):
m, g = -1, 0
u1[b][-1] = 1e-32
for gp in range(2):
for mp in range(M + gp):
kp = mg2k(mp, gp, M)
if m + 1 - g == mp and gp == 0:
expected_a0[b, kp] = (1 - r1[b, m + 1 - g, g]) * (
1 - u1[b, m + 1 - g, g]
)
elif m + 1 - g < mp and gp == 0:
expected_a0[b, kp] = (
(1 - r1[b, m + 1 - g, g])
* u1[b, m + 1 - g, g]
* simpleprod(
[
(1 - r1[b, mpp, 2]) * u1[b, mpp, 2]
for mpp in range(m + 2 - g, mp)
]
)
* (1 - r1[b, mp, 2])
* (1 - u1[b, mp, 2])
)
elif m + 1 - g == mp and gp == 1:
expected_a0[b, kp] = r1[b, m + 1 - g, g]
elif m + 1 - g < mp and gp == 1:
expected_a0[b, kp] = (
(1 - r1[b, m + 1 - g, g])
* u1[b, m + 1 - g, g]
* simpleprod(
[
(1 - r1[b, mpp, 2]) * u1[b, mpp, 2]
for mpp in range(m + 2 - g, mp)
]
)
* r1[b, mp, 2]
)
for g in range(2):
for m in range(M + g):
k = mg2k(m, g, M)
for gp in range(2):
for mp in range(M + gp):
kp = mg2k(mp, gp, M)
if m + 1 - g == mp and gp == 0:
expected_a[b, k, kp] = (1 - r1[b, m + 1 - g, g]) * (
1 - u1[b, m + 1 - g, g]
)
elif m + 1 - g < mp and gp == 0:
expected_a[b, k, kp] = (
(1 - r1[b, m + 1 - g, g])
* u1[b, m + 1 - g, g]
* simpleprod(
[
(1 - r1[b, mpp, 2]) * u1[b, mpp, 2]
for mpp in range(m + 2 - g, mp)
]
)
* (1 - r1[b, mp, 2])
* (1 - u1[b, mp, 2])
)
elif m + 1 - g == mp and gp == 1:
expected_a[b, k, kp] = r1[b, m + 1 - g, g]
elif m + 1 - g < mp and gp == 1:
expected_a[b, k, kp] = (
(1 - r1[b, m + 1 - g, g])
* u1[b, m + 1 - g, g]
* simpleprod(
[
(1 - r1[b, mpp, 2]) * u1[b, mpp, 2]
for mpp in range(m + 2 - g, mp)
]
)
* r1[b, mp, 2]
)
elif m == M and mp == M and g == 0 and gp == 0:
expected_a[b, k, kp] = 1.0
for g in range(2):
for m in range(M + g):
k = mg2k(m, g, M)
if g == 0:
expected_e[b, k, :] = s[b, m, :]
else:
expected_e[b, k, :] = c[b, m, :]
if substitute:
expected_e = torch.matmul(expected_e, ll)
# --- Check ---
if batch_size is None:
expected_a = expected_a.squeeze()
expected_a0 = expected_a0.squeeze()
expected_e = expected_e.squeeze()
assert torch.allclose(
torch.sum(torch.exp(a0ln)), torch.tensor(1.0), atol=1e-3, rtol=1e-3
)
assert torch.allclose(
torch.sum(torch.exp(aln), axis=1),
torch.ones(2 * M + 1),
atol=1e-3,
rtol=1e-3,
)
assert torch.allclose(expected_a0, torch.exp(a0ln))
assert torch.allclose(expected_a, torch.exp(aln))
assert torch.allclose(expected_e, torch.exp(eln))
@pytest.mark.parametrize("batch_ancestor_seq", [False, True])
@pytest.mark.parametrize("batch_insert_seq", [False, True])
@pytest.mark.parametrize("batch_insert", [False, True])
@pytest.mark.parametrize("batch_delete", [False, True])
@pytest.mark.parametrize("batch_substitute", [False, True])
def test_profile_shapes(
batch_ancestor_seq, batch_insert_seq, batch_insert, batch_delete, batch_substitute
):
M, D, B = 5, 2, 3
K = 2 * M + 1
batch_size = 6
pf_arranger = Profile(M)
sln = torch.randn([batch_size] * batch_ancestor_seq + [M, D])
sln = sln - sln.logsumexp(-1, True)
cln = torch.randn([batch_size] * batch_insert_seq + [M + 1, D])
cln = cln - cln.logsumexp(-1, True)
rln = torch.randn([batch_size] * batch_insert + [M, 3, 2])
rln = rln - rln.logsumexp(-1, True)
uln = torch.randn([batch_size] * batch_delete + [M, 3, 2])
uln = uln - uln.logsumexp(-1, True)
lln = torch.randn([batch_size] * batch_substitute + [D, B])
lln = lln - lln.logsumexp(-1, True)
a0ln, aln, eln = pf_arranger.forward(sln, cln, rln, uln, lln)
if all([not batch_ancestor_seq, not batch_insert_seq, not batch_substitute]):
assert eln.shape == (K, B)
assert torch.allclose(eln.logsumexp(-1), torch.zeros(K))
else:
assert eln.shape == (batch_size, K, B)
assert torch.allclose(eln.logsumexp(-1), torch.zeros(batch_size, K))
if all([not batch_insert, not batch_delete]):
assert a0ln.shape == (K,)
assert torch.allclose(a0ln.logsumexp(-1), torch.zeros(1))
assert aln.shape == (K, K)
assert torch.allclose(aln.logsumexp(-1), torch.zeros(K))
else:
assert a0ln.shape == (batch_size, K)
assert torch.allclose(a0ln.logsumexp(-1), torch.zeros(batch_size))
assert aln.shape == (batch_size, K, K)
assert torch.allclose(aln.logsumexp(-1), torch.zeros((batch_size, K)))
@pytest.mark.parametrize("M", [2, 20]) # , 20
def test_profile_trivial_cases(M):
# Trivial case: indel probabability of zero. Expected value of
# HMM should match ancestral sequence times substitution matrix.
# --- Setup model. ---
D, B = 2, 2
batch_size = 5
pf_arranger = Profile(M)
sln = torch.randn([batch_size, M, D])
sln = sln - sln.logsumexp(-1, True)
cln = torch.randn([batch_size, M + 1, D])
cln = cln - cln.logsumexp(-1, True)
rln = torch.cat(
[torch.zeros([M, 3, 1]), -1 / pf_arranger.epsilon * torch.ones([M, 3, 1])],
axis=-1,
)
uln = torch.cat(
[torch.zeros([M, 3, 1]), -1 / pf_arranger.epsilon * torch.ones([M, 3, 1])],
axis=-1,
)
lln = torch.randn([D, B])
lln = lln - lln.logsumexp(-1, True)
a0ln, aln, eln = pf_arranger.forward(sln, cln, rln, uln, lln)
# --- Compute expected value per step. ---
Eyln = torch.zeros([batch_size, M, B])
ai = a0ln
for j in range(M):
Eyln[:, j, :] = torch.logsumexp(ai.unsqueeze(-1) + eln, axis=-2)
ai = torch.logsumexp(ai.unsqueeze(-1) + aln, axis=-2)
print(aln.exp())
no_indel = torch.logsumexp(sln.unsqueeze(-1) + lln.unsqueeze(-3), axis=-2)
assert torch.allclose(Eyln, no_indel)
|
|
import datetime
import decimal
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import transaction
from django.db.backends import utils
from django.utils import timezone
from django.utils.dateparse import parse_duration
from django.utils.encoding import force_text
class BaseDatabaseOperations:
"""
Encapsulate backend-specific differences, such as the way a backend
performs ordering or calculates the ID of a recently-inserted row.
"""
compiler_module = "django.db.models.sql.compiler"
# Integer field safe ranges by `internal_type` as documented
# in docs/ref/models/fields.txt.
integer_field_ranges = {
'SmallIntegerField': (-32768, 32767),
'IntegerField': (-2147483648, 2147483647),
'BigIntegerField': (-9223372036854775808, 9223372036854775807),
'PositiveSmallIntegerField': (0, 32767),
'PositiveIntegerField': (0, 2147483647),
}
set_operators = {
'union': 'UNION',
'intersection': 'INTERSECT',
'difference': 'EXCEPT',
}
# Mapping of Field.get_internal_type() (typically the model field's class
# name) to the data type to use for the Cast() function, if different from
# DatabaseWrapper.data_types.
cast_data_types = {}
# CharField data type if the max_length argument isn't provided.
cast_char_field_without_max_length = None
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Return any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def bulk_batch_size(self, fields, objs):
"""
Return the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
return len(objs)
def cache_key_culling_sql(self):
"""
Return an SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
culling.
"""
return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s"
def unification_cast_sql(self, output_field):
"""
Given a field instance, return the SQL that casts the result of a union
to that type. The resulting string should contain a '%s' placeholder
for the expression being cast.
"""
return '%s'
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month', or 'day', return the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method')
def date_interval_sql(self, timedelta):
"""
Implement the date interval functionality for expressions.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_interval_sql() method')
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month', or 'day', return the SQL that
truncates the given date field field_name to a date object with only
the given specificity.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetrunc_sql() method')
def datetime_cast_date_sql(self, field_name, tzname):
"""
Return the SQL to cast a datetime value to date value.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_date() method')
def datetime_cast_time_sql(self, field_name, tzname):
"""
Return the SQL to cast a datetime value to time value.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_time_sql() method')
def datetime_extract_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or
'second', return the SQL that extracts a value from the given
datetime field field_name.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method')
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or
'second', return the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunc_sql() method')
def time_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'hour', 'minute' or 'second', return the SQL
that truncates the given time field field_name to a time object with
only the given specificity.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a time_trunc_sql() method')
def time_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'hour', 'minute', or 'second', return the SQL
that extracts a value from the given time field field_name.
"""
return self.date_extract_sql(lookup_type, field_name)
def deferrable_sql(self):
"""
Return the SQL to make a constraint "initially deferred" during a
CREATE TABLE statement.
"""
return ''
def distinct_sql(self, fields):
"""
Return an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only check the given fields for
duplicates.
"""
if fields:
raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
else:
return 'DISTINCT'
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, return the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type, internal_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR') and an internal type
(e.g. 'GenericIPAddressField'), return the SQL to cast it before using
it in a WHERE statement. The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Return a list used in the "ORDER BY" clause to force no ordering at
all. Return an empty list to include nothing in the ordering.
"""
return []
def for_update_sql(self, nowait=False, skip_locked=False, of=()):
"""
Return the FOR UPDATE SQL clause to lock rows for an update operation.
"""
return 'FOR UPDATE%s%s%s' % (
' OF %s' % ', '.join(of) if of else '',
' NOWAIT' if nowait else '',
' SKIP LOCKED' if skip_locked else '',
)
def last_executed_query(self, cursor, sql, params):
"""
Return a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
# Convert params to contain string values.
def to_string(s):
return force_text(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple(to_string(val) for val in params)
elif params is None:
u_params = ()
else:
u_params = {to_string(k): to_string(v) for k, v in params.items()}
return "QUERY = %r - PARAMS = %r" % (sql, u_params)
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, return the newly created ID.
`pk_name` is the name of the primary-key column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type, internal_type=None):
"""
Return the string to use in a query when performing lookups
("contains", "like", etc.). It should contain a '%s' placeholder for
the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Return the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Return the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Return the value to use for the LIMIT when we are wanting "LIMIT
infinity". Return None if the limit clause can be omitted in this case.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method')
def pk_default_value(self):
"""
Return the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def prepare_sql_script(self, sql):
"""
Take an SQL script that may contain multiple lines and return a list
of statements to feed to successive cursor.execute() calls.
Since few databases are able to process raw SQL scripts in a single
cursor.execute() call and PEP 249 doesn't talk about this use case,
the default implementation is conservative.
"""
try:
import sqlparse
except ImportError:
raise ImproperlyConfigured(
"sqlparse is required if you don't split your SQL "
"statements manually."
)
else:
return [sqlparse.format(statement, strip_comments=True)
for statement in sqlparse.split(sql) if statement]
def process_clob(self, value):
"""
Return the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part of an
insert query, return the SQL and params to append to the INSERT query.
The returned fragment should contain a format string to hold the
appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Return the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Return a quoted version of the given table, index, or column name. Do
not quote the given name if it's already been quoted.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method')
def random_function_sql(self):
"""Return an SQL expression that returns a random value."""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Return the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). It should contain a '%s'
placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), raise
NotImplementedError.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method')
def savepoint_create_sql(self, sid):
"""
Return the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVEPOINT %s" % self.quote_name(sid)
def savepoint_commit_sql(self, sid):
"""
Return the SQL for committing the given savepoint.
"""
return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
def savepoint_rollback_sql(self, sid):
"""
Return the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
def set_time_zone_sql(self):
"""
Return the SQL that will set the connection's time zone.
Return '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Return a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves) and the SQL statements required to reset the sequences
passed in `sequences`.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
The `allow_cascade` argument determines whether truncation may cascade
to tables with foreign keys pointing the tables being truncated.
PostgreSQL requires a cascade even if these tables are empty.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations must provide an sql_flush() method')
def execute_sql_flush(self, using, sql_list):
"""Execute a list of SQL statements to flush the database."""
with transaction.atomic(using=using, savepoint=self.connection.features.can_rollback_ddl):
with self.connection.cursor() as cursor:
for sql in sql_list:
cursor.execute(sql)
def sequence_reset_by_name_sql(self, style, sequences):
"""
Return a list of the SQL statements required to reset sequences
passed in `sequences`.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return []
def sequence_reset_sql(self, style, model_list):
"""
Return a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""Return the SQL statement required to start a transaction."""
return "BEGIN;"
def end_transaction_sql(self, success=True):
"""Return the SQL statement required to end a transaction."""
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Return the SQL that will be used in a query to define the tablespace.
Return '' if the backend doesn't support tablespaces.
If `inline` is True, append the SQL to a row; otherwise append it to
the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepare a value for use in a LIKE query."""
return str(x).replace("\\", "\\\\").replace("%", r"\%").replace("_", r"\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). Raise a ValueError if the value is
invalid, otherwise return the validated value.
"""
return value
def adapt_unknown_value(self, value):
"""
Transform a value to something compatible with the backend driver.
This method only depends on the type of the value. It's designed for
cases where the target type isn't known, such as .raw() SQL queries.
As a consequence it may not work perfectly in all circumstances.
"""
if isinstance(value, datetime.datetime): # must be before date
return self.adapt_datetimefield_value(value)
elif isinstance(value, datetime.date):
return self.adapt_datefield_value(value)
elif isinstance(value, datetime.time):
return self.adapt_timefield_value(value)
elif isinstance(value, decimal.Decimal):
return self.adapt_decimalfield_value(value)
else:
return value
def adapt_datefield_value(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return str(value)
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return str(value)
def adapt_timefield_value(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if timezone.is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return str(value)
def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
return utils.format_number(value, max_digits, decimal_places)
def adapt_ipaddressfield_value(self, value):
"""
Transform a string representation of an IP address into the expected
type for the backend driver.
"""
return value or None
def year_lookup_bounds_for_date_field(self, value):
"""
Return a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.date(value, 1, 1)
second = datetime.date(value, 12, 31)
first = self.adapt_datefield_value(first)
second = self.adapt_datefield_value(second)
return [first, second]
def year_lookup_bounds_for_datetime_field(self, value):
"""
Return a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateTimeField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.datetime(value, 1, 1)
second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)
if settings.USE_TZ:
tz = timezone.get_current_timezone()
first = timezone.make_aware(first, tz)
second = timezone.make_aware(second, tz)
first = self.adapt_datetimefield_value(first)
second = self.adapt_datetimefield_value(second)
return [first, second]
def get_db_converters(self, expression):
"""
Return a list of functions needed to convert field data.
Some field types on some backends do not provide data in the correct
format, this is the hook for converter functions.
"""
return []
def convert_durationfield_value(self, value, expression, connection):
if value is not None:
value = str(decimal.Decimal(value) / decimal.Decimal(1000000))
value = parse_duration(value)
return value
def check_expression_support(self, expression):
"""
Check that the backend supports the provided expression.
This is used on specific backends to rule out known expressions
that have problematic or nonexistent implementations. If the
expression has a known problem, the backend should raise
NotImplementedError.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""
Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions).
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
return self.combine_expression(connector, sub_expressions)
def binary_placeholder_sql(self, value):
"""
Some backends require special syntax to insert binary content (MySQL
for example uses '_binary %s').
"""
return '%s'
def modify_insert_params(self, placeholder, params):
"""
Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
def integer_field_range(self, internal_type):
"""
Given an integer field internal type (e.g. 'PositiveIntegerField'),
return a tuple of the (min_value, max_value) form representing the
range of the column type bound to the field.
"""
return self.integer_field_ranges[internal_type]
def subtract_temporals(self, internal_type, lhs, rhs):
if self.connection.features.supports_temporal_subtraction:
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
return "(%s - %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
raise NotImplementedError("This backend does not support %s subtraction." % internal_type)
|
|
from tests.base_unittest import BaseUnitTest
from pypokerengine.engine.player import Player
from pypokerengine.engine.poker_constants import PokerConstants as Const
from pypokerengine.engine.action_checker import ActionChecker
class ActionCheckerTest(BaseUnitTest):
""" the case when no action is done before """
def test_check(self):
players = self.__setup_clean_players()
self.false(ActionChecker._ActionChecker__is_illegal(players, 0, 2.5, 'call', 0))
self.eq(0, ActionChecker.need_amount_for_action(players[0], 0))
self.eq(0, ActionChecker.need_amount_for_action(players[1], 0))
def test_call(self):
players = self.__setup_clean_players()
self.true(ActionChecker._ActionChecker__is_illegal(players, 0, 2.5, 'call', 10))
def test_too_small_raise(self):
players = self.__setup_clean_players()
self.true(ActionChecker._ActionChecker__is_illegal(players, 0, 2.5, 'raise', 4))
def test_legal_raise(self):
players = self.__setup_clean_players()
self.false(ActionChecker._ActionChecker__is_illegal(players, 0, 2.5, 'raise', 5))
""" the case when agree amount = $10, minimum bet = $15"""
def test__fold(self):
players = self.__setup_blind_players()
self.false(ActionChecker._ActionChecker__is_illegal(players, 0, 2.5, 'fold'))
def test__call(self):
players = self.__setup_blind_players()
self.true(ActionChecker._ActionChecker__is_illegal(players, 0, 2.5, 'call', 9))
self.false(ActionChecker._ActionChecker__is_illegal(players, 0, 2.5, 'call', 10))
self.true(ActionChecker._ActionChecker__is_illegal(players, 0, 2.5, 'call', 11))
self.eq(5, ActionChecker.need_amount_for_action(players[0], 10))
self.eq(0, ActionChecker.need_amount_for_action(players[1], 10))
def test__raise(self):
players = self.__setup_blind_players()
self.true(ActionChecker._ActionChecker__is_illegal(players, 0, 2.5, 'raise', 14))
self.false(ActionChecker._ActionChecker__is_illegal(players, 0, 2.5, 'raise', 15))
self.false(ActionChecker._ActionChecker__is_illegal(players, 0, 2.5, 'raise', 16))
self.eq(10, ActionChecker.need_amount_for_action(players[0], 15))
self.eq(5, ActionChecker.need_amount_for_action(players[1], 15))
def test__short_of_money(self):
players = self.__setup_blind_players()
players[0].collect_bet(88) # p1 stack is $7
self.false(ActionChecker._ActionChecker__is_illegal(players, 0, 2.5, "call", 10))
self.true(ActionChecker._ActionChecker__is_illegal(players, 0, 2.5, "call", 15))
def test_small_blind_allin_raise(self):
players = self.__setup_blind_players()
self.false(ActionChecker._ActionChecker__is_illegal(players, 0, 2.5, "raise", 100))
def test_big_blind_allin_call(self):
players = self.__setup_blind_players()
players[0].add_action_history(Const.Action.RAISE, 100, 95)
self.false(ActionChecker._ActionChecker__is_illegal(players, 1, 2.5, "call", 100))
players[1].collect_bet(1)
self.true(ActionChecker._ActionChecker__is_illegal(players, 1, 2.5, "call", 100))
def test_allin_check_on_call(self):
player = self.__setup_clean_players()[0]
self.false(ActionChecker.is_allin(player, "call", 99))
self.true(ActionChecker.is_allin(player, "call", 100))
self.true(ActionChecker.is_allin(player, "call", 101))
def test_allin_check_on_raise(self):
player = self.__setup_clean_players()[0]
self.false(ActionChecker.is_allin(player, "raise", 99))
self.true(ActionChecker.is_allin(player, "raise", 100))
self.false(ActionChecker.is_allin(player, "raise", 101))
def test_allin_check_on_fold(self):
player = self.__setup_clean_players()[0]
self.false(ActionChecker.is_allin(player, "fold", 0))
def test_correct_action_on_allin_call(self):
players = self.__setup_clean_players()
players[0].add_action_history(Const.Action.RAISE, 50, 50)
players[1].add_action_history(Const.Action.BIG_BLIND, sb_amount=5)
players[1].stack = 30
action, bet_amount = ActionChecker.correct_action(players, 1, 2.5, 'call', 50)
self.eq('call', action)
self.eq(40, bet_amount)
def test_correct_action_on_allin_raise(self):
players = self.__setup_clean_players()
action, bet_amount = ActionChecker.correct_action(players, 0, 2.5, 'raise', 100)
self.eq('raise', action)
self.eq(100, bet_amount)
def test_correct_illegal_call(self):
players = self.__setup_clean_players()
action, bet_amount = ActionChecker.correct_action(players, 0, 2.5, 'call', 10)
self.eq('fold', action)
self.eq(0, bet_amount)
def test_correct_correct_action_on_call_regression(self):
players = self.__setup_clean_players()
for player, stack in zip(players,[130, 70]):
player.stack = stack
players[0].collect_bet(5)
players[0].pay_info.update_by_pay(5)
players[0].add_action_history(Const.Action.SMALL_BLIND, sb_amount=5)
players[1].collect_bet(10)
players[1].pay_info.update_by_pay(10)
players[1].add_action_history(Const.Action.BIG_BLIND, sb_amount=5)
players[0].collect_bet(55)
players[0].pay_info.update_by_pay(55)
players[0].add_action_history(Const.Action.RAISE, 60, 55)
action, bet_amount = ActionChecker.correct_action(players, 1, 2.5, 'call', 60)
self.eq('call', action)
self.eq(60, bet_amount)
def test_correct_illegal_raise(self):
players = self.__setup_clean_players()
action, bet_amount = ActionChecker.correct_action(players, 0, 2.5, 'raise', 101)
self.eq('fold', action)
self.eq(0, bet_amount)
def test_correct_action_when_legal(self):
players = self.__setup_clean_players()
action, bet_amount = ActionChecker.correct_action(players, 0, 2.5, 'call', 0)
self.eq('call', action)
self.eq(0, bet_amount)
def test_correct_action_when_legal(self):
players = self.__setup_clean_players()
action, bet_amount = ActionChecker.correct_action(players, 0, 2.5, 'raise', 100)
self.eq('raise', action)
self.eq(100, bet_amount)
def test_legal_actions(self):
players = self.__setup_blind_players()
legal_actions = ActionChecker.legal_actions(players, 0, 2.5)
self.eq({"action":"fold", "amount":0}, legal_actions[0])
self.eq({"action":"call", "amount":10}, legal_actions[1])
self.eq({"action":"raise", "amount": { "min":15, "max":100} }, legal_actions[2])
def test_legal_actions_when_short_of_money(self):
players = self.__setup_blind_players()
players[0].stack = 9
legal_actions = ActionChecker.legal_actions(players, 0, 2.5)
self.eq({"action":"fold", "amount":0}, legal_actions[0])
self.eq({"action":"call", "amount":10}, legal_actions[1])
self.eq({"action":"raise", "amount": { "min":-1, "max":-1} }, legal_actions[2])
def test_need_amount_after_ante(self):
# situation => SB=$5 (players[0]), BB=$10 (players[1]), ANTE=$3
players = [Player("uuid", 100, name="name") for _ in range(3)]
for player in players:
player.collect_bet(3)
player.add_action_history(Const.Action.ANTE, 3)
player.pay_info.update_by_pay(3)
players[0].collect_bet(5)
players[0].add_action_history(Const.Action.SMALL_BLIND, sb_amount=5)
players[0].pay_info.update_by_pay(5)
players[1].collect_bet(10)
players[1].add_action_history(Const.Action.BIG_BLIND, sb_amount=5)
players[1].pay_info.update_by_pay(10)
def set_stack(stacks, ps):
for stack, p in zip(stacks, ps):
p.stack = stack
set_stack([7,7,7], players)
self.eq(("call", 10), ActionChecker.correct_action(players, 0, 5, "call", 10))
self.eq(("call", 10), ActionChecker.correct_action(players, 1, 5, "call", 10))
self.eq(("call", 7), ActionChecker.correct_action(players, 2, 5, "call", 10))
self.true(ActionChecker.is_allin(players[2], "call", 8))
self.false(ActionChecker.is_allin(players[2], "raise", 10))
self.eq(5, ActionChecker.need_amount_for_action(players[0], 10))
self.eq(0, ActionChecker.need_amount_for_action(players[1], 10))
self.eq(10, ActionChecker.need_amount_for_action(players[2], 10))
set_stack([12,12,12], players)
actions = ActionChecker.legal_actions(players, 2, 5)
self.eq(-1, actions[2]["amount"]["max"])
set_stack([10,5,12], players)
self.eq(("raise", 15), ActionChecker.correct_action(players, 0, 5, "raise", 15))
self.eq(("raise", 15), ActionChecker.correct_action(players, 1, 5, "raise", 15))
self.eq(("fold", 0), ActionChecker.correct_action(players, 2, 5, "raise", 15))
def __setup_clean_players(self):
return [Player("uuid", 100) for _ in range(2)]
def __setup_blind_players(self):
return [self.__create_blind_player(flg) for flg in [True, False]]
def __create_blind_player(self, small_blind=True):
name = "sb" if small_blind else "bb"
blind = 5 if small_blind else 10
player = Player("uuid", 100, name=name)
player.add_action_history(Const.Action.RAISE, blind, 5)
player.collect_bet(blind)
player.pay_info.update_by_pay(blind)
return player
|
|
"""
OpenNSA backend for Juniper EX switches supporting ccc encapsulation.
Even though mpls is used here, mpls to other devices is not supported. MPLS is
merely to facilitate private switching between two units. So only vlan-vlan
connections are supported.
Authors:
Original GTS backend: Tamas Varga <vargat@niif.hu>
Modified for EX4550 Michal Hazlinksy <hazlinsky@cesnet.cz>
"""
import random
from twisted.python import log
from twisted.internet import defer
from opennsa import config
from opennsa.backends.common import genericbackend, ssh
# parameterized commands
COMMAND_CONFIGURE = 'edit private'
COMMAND_COMMIT = 'commit'
COMMAND_SET_INTERFACES = 'set interfaces %(port)s encapsulation ethernet-ccc' # port, source vlan, source vlan
COMMAND_SET_INTERFACES_CCC = 'set interfaces %(port)s unit 0 family ccc'
COMMAND_SET_INTERFACES_MTU = 'set interfaces %(port)s mtu 9000'
COMMAND_SET_INTERFACE_VLN_T = 'set interfaces %(port)s vlan-tagging'
COMMAND_SET_INTERFACE_ENC_V = 'set interfaces %(port)s encapsulation vlan-ccc'
COMMAND_SET_VLAN_ENCAP = 'set interfaces %(port)s unit %(vlan)s encapsulation vlan-ccc'
COMMAND_SET_VLAN_ID = 'set interfaces %(port)s unit %(vlan)s vlan-id %(vlan)s'
COMMAND_SET_SWAP_PUSH_POP = 'set interfaces %(port)s unit %(vlan)s swap-by-poppush'
COMMAND_DELETE_INTERFACES = 'delete interfaces %(port)s' # port / vlan
COMMAND_DELETE_INTERFACES_VL= 'delete interfaces %(port)s.%(vlan)s'
COMMAND_DELETE_CONNECTIONS = 'delete protocols connections interface-switch %(switch)s' # switch
COMMAND_DELETE_MPLS_LSP = 'delete protocols mpls label-switched-path %(unique-id)s'
COMMAND_DELETE_REMOTE_INT_SW= 'delete protocols connections remote-interface-switch %(connectionid)s'
COMMAND_LOCAL_CONNECTIONS = 'set protocols connections interface-switch %(switch)s interface %(interface)s.%(subinterface)s'
COMMAND_REMOTE_LSP_OUT_TO = 'set protocols mpls label-switched-path %(unique-id)s to %(remote_ip)s'
COMMAND_REMOTE_LSP_OUT_NOCSPF = 'set protocols mpls label-switched-path %(unique-id)s no-cspf'
COMMAND_REMOTE_CONNECTIONS_INT = 'set protocols connections remote-interface-switch %(connectionid)s interface %(port)s'
COMMAND_REMOTE_CONNECTIONS_TRANSMIT_LSP = 'set protocols connections remote-interface-switch %(connectionid)s transmit-lsp %(unique-id)s'
COMMAND_REMOTE_CONNECTIONS_RECEIVE_LSP = 'set protocols connections remote-interface-switch %(connectionid)s receive-lsp %(unique-id)s'
LOG_SYSTEM = 'EX4550'
class SSHChannel(ssh.SSHChannel):
name = 'session'
def __init__(self, conn):
ssh.SSHChannel.__init__(self, conn=conn)
self.line = ''
self.wait_defer = None
self.wait_line = None
@defer.inlineCallbacks
def sendCommands(self, commands):
LT = '\r' # line termination
try:
yield self.conn.sendRequest(self, 'shell', '', wantReply=1)
d = self.waitForLine('{master:0}[edit]')
self.write(COMMAND_CONFIGURE + LT)
yield d
log.msg('Entered configure mode', debug=True, system=LOG_SYSTEM)
for cmd in commands:
log.msg('CMD> %s' % cmd, system=LOG_SYSTEM)
d = self.waitForLine('{master:0}[edit]')
self.write(cmd + LT)
yield d
# commit commands, check for 'commit complete' as success
# not quite sure how to handle failure here
## test stuff
#d = self.waitForLine('[edit]')
#self.write('commit check' + LT)
d = self.waitForLine('commit complete')
self.write(COMMAND_COMMIT + LT)
yield d
except Exception as e:
log.msg('Error sending commands: %s' % str(e))
raise e
log.msg('Commands successfully committed', debug=True, system=LOG_SYSTEM)
self.sendEOF()
self.closeIt()
def waitForLine(self, line):
self.wait_line = line
self.wait_defer = defer.Deferred()
return self.wait_defer
def matchLine(self, line):
if self.wait_line and self.wait_defer:
if self.wait_line == line.strip():
d = self.wait_defer
self.wait_line = None
self.wait_defer = None
d.callback(self)
else:
pass
def dataReceived(self, data):
if len(data) == 0:
pass
else:
self.line += data
if '\n' in data:
lines = [ line.strip() for line in self.line.split('\n') if line.strip() ]
self.line = ''
for l in lines:
self.matchLine(l)
class JunosEx4550CommandSender:
def __init__(self, host, port, ssh_host_fingerprint, user, ssh_public_key_path, ssh_private_key_path,
network_name):
self.ssh_connection_creator = \
ssh.SSHConnectionCreator(host, port, [ ssh_host_fingerprint ], user, ssh_public_key_path, ssh_private_key_path)
self.ssh_connection = None # cached connection
self.connection_lock = defer.DeferredLock()
self.network_name = network_name
def _getSSHChannel(self):
def setSSHConnectionCache(ssh_connection):
log.msg('SSH Connection created and cached', system=LOG_SYSTEM)
self.ssh_connection = ssh_connection
return ssh_connection
def gotSSHConnection(ssh_connection):
channel = SSHChannel(conn = ssh_connection)
ssh_connection.openChannel(channel)
return channel.channel_open
if self.ssh_connection:
log.msg('Reusing SSH connection', debug=True, system=LOG_SYSTEM)
return gotSSHConnection(self.ssh_connection)
else:
# since creating a new connection should be uncommon, we log it
# this makes it possible to see if something fucks up and creates connections continuously
log.msg('Creating new SSH connection', system=LOG_SYSTEM)
d = self.ssh_connection_creator.getSSHConnection()
d.addCallback(setSSHConnectionCache)
d.addCallback(gotSSHConnection)
return d
@defer.inlineCallbacks
def _sendCommands(self, commands):
channel = yield self._getSSHChannel()
log.msg('Acquiring ssh session lock', debug=True, system=LOG_SYSTEM)
yield self.connection_lock.acquire()
log.msg('Got ssh session lock', debug=True, system=LOG_SYSTEM)
try:
yield channel.sendCommands(commands)
finally:
log.msg('Releasing ssh session lock', debug=True, system=LOG_SYSTEM)
self.connection_lock.release()
log.msg('Released ssh session lock', debug=True, system=LOG_SYSTEM)
def setupLink(self, connection_id, source_port, dest_port, bandwidth):
cg = JunosEx4550CommandGenerator(connection_id,source_port,dest_port,self.network_name,bandwidth)
commands = cg.generateActivateCommand()
return self._sendCommands(commands)
def teardownLink(self, connection_id, source_port, dest_port, bandwidth):
cg = JunosEx4550CommandGenerator(connection_id,source_port,dest_port,self.network_name,bandwidth)
commands = cg.generateDeactivateCommand()
return self._sendCommands(commands)
class JunosEx4550Target(object):
def __init__(self, port, original_port,value=None):
self.port = port
self.value = value
self.original_port = original_port
# NEVER USE : in port name!
def __str__(self):
if self.port.remote_network is None:
return '<JuniperEX4550Target %s#%s=%s>' % (self.original_port,self.port.label.type_,self.value)
else:
return '<JuniperEX4550Target %s#%s=%s -> %s>' % (self.original_port,self.port.label.type_,self.value,self.port.remote_port,)
class JunosEx4550ConnectionManager:
def __init__(self, port_map, host, port, host_fingerprint, user, ssh_public_key, ssh_private_key,
network_name):
self.network_name = network_name
self.port_map = port_map
self.command_sender = JunosEx4550CommandSender(host, port, host_fingerprint, user, ssh_public_key, ssh_private_key,
network_name)
def getResource(self, port, label):
return self.port_map[port] + ':' + '' if label is None else str(label.labelValue())
def getTarget(self, port, label):
return JunosEx4550Target(self.port_map[port], port,label.labelValue())
def createConnectionId(self, source_target, dest_target):
return 'JuniperEx4550-' + str(random.randint(100000,999999))
def canSwapLabel(self, label_type):
return True
def setupLink(self, connection_id, source_target, dest_target, bandwidth):
def linkUp(_):
log.msg('Link %s -> %s up' % (source_target, dest_target), system=LOG_SYSTEM)
d = self.command_sender.setupLink(connection_id,source_target, dest_target,bandwidth)
d.addCallback(linkUp)
return d
def teardownLink(self, connection_id, source_target, dest_target, bandwidth):
def linkDown(_):
log.msg('Link %s -> %s down' % (source_target, dest_target), system=LOG_SYSTEM)
d = self.command_sender.teardownLink(connection_id,source_target, dest_target, bandwidth)
d.addCallback(linkDown)
return d
def JunosEXBackend(network_name, nrm_ports , parent_requester, cfg):
name = 'JunosEX %s' % network_name
nrm_map = dict( [ (p.name, p) for p in nrm_ports ] ) # for the generic backend
port_map = dict( [ (p.name, p) for p in nrm_ports ] ) # for the nrm backend
host = cfg[config.JUNIPER_HOST]
port = cfg.get(config.JUNIPER_PORT, 22)
host_fingerprint = cfg[config.JUNIPER_HOST_FINGERPRINT]
user = cfg[config.JUNIPER_USER]
ssh_public_key = cfg[config.JUNIPER_SSH_PUBLIC_KEY]
ssh_private_key = cfg[config.JUNIPER_SSH_PRIVATE_KEY]
cm = JunosEx4550ConnectionManager(port_map, host, port, host_fingerprint, user, ssh_public_key, ssh_private_key,
network_name)
return genericbackend.GenericBackend(network_name, nrm_map, cm, parent_requester, name)
class JunosEx4550CommandGenerator(object):
def __init__(self,connection_id,src_port,dest_port,network_name,bandwidth=None):
self.connection_id = connection_id
self.src_port = src_port
self.dest_port = dest_port
self.bandwidth = bandwidth
self.network_name = network_name
log.msg('Initialised with params src %s dst %s bandwidth %s connectionid %s' %
(src_port,dest_port,bandwidth,connection_id), debug=True, system=LOG_SYSTEM)
def generateActivateCommand(self):
commands = []
source_port = self.src_port.port
dest_port = self.dest_port.port
log.msg("%s %s " % (source_port,dest_port))
log.msg("Activate commands between %s:%s:%s and %s:%s:%s " %
(source_port.remote_network, source_port.interface, source_port.label.type_,
dest_port.remote_network, dest_port.interface, dest_port.label.type_), debug=True,
system=LOG_SYSTEM)
# Local connection
if source_port.remote_network is None and dest_port.remote_network is None:
commands = self._generateLocalConnectionActivate()
elif source_port.remote_network is not None and dest_port.remote_network is not None:
commands = self._generateLocalConnectionActivate()
log.msg('Transit connection-HERE SHOULD BE COMMANDS FOR TRANSIT', system=LOG_SYSTEM)
else:
#commands = self._generateRemoteConnectionActivate() All cases are the same tODO: remove IFs competely here
commands = self._generateLocalConnectionActivate()
return commands
def generateDeactivateCommand(self):
commands = {}
source_port = self.src_port.port
dest_port = self.dest_port.port
log.msg("Deactivate commands between %s:%s#%s=%s and %s:%s#%s=%s " %
(source_port.remote_network, source_port.interface, source_port.label.type_,self.src_port.value,
dest_port.remote_network, dest_port.interface, dest_port.label.type_,self.dest_port.value), debug=True,
system=LOG_SYSTEM)
# Local connection
if source_port.remote_network is None and dest_port.remote_network is None:
commands = self._generateLocalConnectionDeActivate()
elif source_port.remote_network is not None and dest_port.remote_network is not None:
#commands = ["Transit connection"]
commands = self._generateLocalConnectionDeActivate()
else:
#commands = self._generateRemoteConnectionDeactivate() DTTO as activate
commands = self._generateLocalConnectionDeActivate()
return commands
def _createSwitchName(self,connection_id):
switch_name = 'OpenNSA-local-%s' % (connection_id)
return switch_name
def _generateLocalConnectionActivate(self):
commands = []
switch_name = self._createSwitchName( self.connection_id )
""" For configuration reason, we're going to generate port things first, then the interface-switch commands"""
for gts_port in self.src_port,self.dest_port:
#if gts_port.port.label is not None and gts_port.port.label.type_ == "port":
# commands.append( COMMAND_SET_INTERFACES % { 'port':gts_port.port.interface} )
# commands.append( COMMAND_SET_INTERFACES_MTU % { 'port':gts_port.port.interface} )
# commands.append( COMMAND_SET_INTERFACES_CCC % { 'port':gts_port.port.interface} )
# tODO remove this as ports are not supported
if gts_port.port.label is not None and gts_port.port.label.type_ == "vlan":
commands.append( COMMAND_SET_INTERFACE_VLN_T % {'port':gts_port.port.interface, 'vlan':gts_port.value} )
commands.append( COMMAND_SET_INTERFACES_MTU % { 'port':gts_port.port.interface} )
commands.append( COMMAND_SET_INTERFACE_ENC_V % {'port':gts_port.port.interface, 'vlan':gts_port.value} )
commands.append( COMMAND_SET_VLAN_ENCAP % {'port':gts_port.port.interface, 'vlan':gts_port.value} )
commands.append( COMMAND_SET_VLAN_ID % {'port':gts_port.port.interface, 'vlan':gts_port.value} )
commands.append( COMMAND_SET_SWAP_PUSH_POP % {'port':gts_port.port.interface, 'vlan':gts_port.value} )
for gts_port in self.src_port,self.dest_port:
commands.append( COMMAND_LOCAL_CONNECTIONS % { 'switch':switch_name,
'interface':"%s" % gts_port.port.interface,
'subinterface': "%s" % gts_port.value if
gts_port.port.label.type_ == "vlan" else '0' } )
return commands
def _generateLocalConnectionDeActivate(self):
commands = []
switch_name = self._createSwitchName( self.connection_id )
for gts_port in self.src_port,self.dest_port:
#if gts_port.port.label.type_ == "port":
# commands.append( COMMAND_DELETE_INTERFACES % { 'port':gts_port.port.interface } )
if gts_port.port.label is not None and gts_port.port.label.type_ == "vlan":
commands.append( COMMAND_DELETE_INTERFACES_VL % { 'port':gts_port.port.interface, 'vlan' : "%s"
% gts_port.value})
commands.append( COMMAND_DELETE_CONNECTIONS % { 'switch':switch_name } )
return commands
# def _generateRemoteConnectionActivate(self):
# commands = []
#
# local_port = self.src_port if self.src_port.port.remote_network is None else self.dest_port
# remote_port = self.src_port if self.src_port.port.remote_network is not None else self.dest_port
# log.msg("%s" % local_port.original_port)
# log.msg("%s" % remote_port.original_port)
#
# if local_port.port.label.type_ == "port":
# commands.append( COMMAND_SET_INTERFACES % { 'port':local_port.port.interface} )
# commands.append( COMMAND_SET_INTERFACES_MTU % { 'port':local_port.port.interface} )
# commands.append( COMMAND_SET_INTERFACES_CCC % { 'port':local_port.port.interface} )
# if local_port.port.label.type_ == "vlan":
# commands.append( COMMAND_SET_INTERFACE_VLN_T % {'port':local_port.port.interface, 'vlan':local_port.value} )
# commands.append( COMMAND_SET_INTERFACE_ENC_V % {'port':local_port.port.interface, 'vlan':local_port.value} )
# commands.append( COMMAND_SET_VLAN_ENCAP % {'port':local_port.port.interface, 'vlan':local_port.value} )
# commands.append( COMMAND_SET_VLAN_ID % {'port':local_port.port.interface, 'vlan':local_port.value} )
# commands.append( COMMAND_SET_SWAP_PUSH_POP % {'port':local_port.port.interface, 'vlan':local_port.value} )
#
# if remote_port.port.label.type_ == "mpls":
# remote_sw_ip = self._getRouterLoopback(remote_port.port.remote_network)
#
# commands.append(COMMAND_REMOTE_LSP_OUT_TO % {
# 'unique-id':"T-"+remote_port.port.remote_network+"-F-"+self.network_name+"-mpls"+str(remote_port.value),
# 'remote_ip':remote_sw_ip } )
# commands.append(COMMAND_REMOTE_LSP_OUT_NOCSPF % {
# 'unique-id':"T-"+remote_port.port.remote_network+"-F-"+self.network_name+"-mpls"+str(remote_port.value),
# 'remote_ip':remote_sw_ip } )
#
#
# if local_port.port.label.type_ == "port":
# commands.append(COMMAND_REMOTE_CONNECTIONS_INT % { 'connectionid' : self.connection_id,
# 'port' : local_port.port.interface
# } )
# if local_port.port.label.type_ == "vlan":
# commands.append(COMMAND_REMOTE_CONNECTIONS_INT % { 'connectionid' : self.connection_id,
# 'port' : local_port.port.interface + "." + str(local_port.value)
# } )
#
# commands.append(COMMAND_REMOTE_CONNECTIONS_TRANSMIT_LSP % { 'connectionid' : self.connection_id,
# 'unique-id':"T-"+remote_port.port.remote_network+"-F-"+self.network_name+"-mpls"+str(remote_port.value)
# } )
# commands.append(COMMAND_REMOTE_CONNECTIONS_RECEIVE_LSP % { 'connectionid' : self.connection_id,
# 'unique-id':"T-"+self.network_name+"-F-"+remote_port.port.remote_network+"-mpls"+str(remote_port.value)
# } )
# if remote_port.port.label.type_ == "vlan":
# switch_name = self._createSwitchName( self.connection_id )
#
# commands.append( COMMAND_SET_INTERFACE_VLN_T % {'port':remote_port.port.interface, 'vlan':remote_port.value} )
# commands.append( COMMAND_SET_INTERFACE_ENC_V % {'port':remote_port.port.interface, 'vlan':remote_port.value} )
# commands.append( COMMAND_SET_VLAN_ENCAP % {'port':remote_port.port.interface, 'vlan':remote_port.value} )
# commands.append( COMMAND_SET_VLAN_ID % {'port':remote_port.port.interface, 'vlan':remote_port.value} )
# commands.append( COMMAND_SET_SWAP_PUSH_POP % {'port':remote_port.port.interface, 'vlan':remote_port.value} )
#
# for gts_port in local_port,remote_port:
# commands.append( COMMAND_LOCAL_CONNECTIONS % { 'switch':switch_name,
# 'interface':"%s" % gts_port.port.interface,
# 'subinterface': "%s" % gts_port.value if
# gts_port.port.label.type_ == "vlan" else '0' } )
#
#
# return commands
#
#
# def _generateRemoteConnectionDeactivate(self):
# commands = []
#
# local_port = self.src_port if self.src_port.port.remote_network is None else self.dest_port
# remote_port = self.src_port if self.src_port.port.remote_network is not None else self.dest_port
#
# if local_port.port.label.type_ == "port":
# commands.append( COMMAND_DELETE_INTERFACES % { 'port':local_port.port.interface } )
# if local_port.port.label.type_ == "vlan":
# commands.append( COMMAND_DELETE_INTERFACES_VL % { 'port':local_port.port.interface, 'vlan' : "%s"
# % local_port.value})
#
# if remote_port.port.label.type_ == "mpls":
# remote_sw_ip = self._getRouterLoopback(remote_port.port.remote_network)
# commands.append( COMMAND_DELETE_MPLS_LSP % {
# 'unique-id' : "T-"+remote_port.port.remote_network+"-F-"+self.network_name+"-mpls"+str(remote_port.value)
# } )
# commands.append( COMMAND_DELETE_REMOTE_INT_SW % { 'connectionid' :
# self.connection_id } )
# if remote_port.port.label.type_ == "vlan":
# switch_name = self._createSwitchName( self.connection_id )
# commands.append( COMMAND_DELETE_INTERFACES_VL % { 'port':remote_port.port.interface, 'vlan' : "%s"
# % remote_port.value})
# commands.append( COMMAND_DELETE_CONNECTIONS % { 'switch':switch_name } )
#
# return commands
#def _getRouterLoopback(self,network_name):
#
# if ":topology" in network_name:
# network_name = network_name.replace(":topology","")
# if network_name in self.gts_routers:
# return self.gts_routers[network_name]
# else:
# raise Exception("Can't find loopback IP address for network %s " % network_name)
|
|
#!/usr/bin/python3
'''
Author : Zachary Harvey
'''
import sqlite3 as sql
from datetime import datetime
import logging
from . import Set, Card, Collection, Deck, get_class
from .sqlments import *
from .externalapis.mtgsdkreader import where_card, where_set, find_many_cards
from .externalapis import scryfalldealer
from utils.csvhandlers import tcgplayer_csv_to_cards
class MTGDatabaseHandler:
def __init__(self):
self.__dbfile = ''
self.__dirty = False
self.openDB = None
def open_file(self, dbfile):
self.close_db_no_error()
self.openDB = sql.connect(dbfile)
self.dbfile = dbfile
def create_new_db(self, dbfile):
'''
'''
self.close_db_no_error()
self.open_file(dbfile)
cur = self.openDB.cursor()
cur.execute(MTGSETS_SQL)
cur.execute(MTGCARDS_SQL)
cur.execute(MTG_USERBUILD_SQL)
self.openDB.commit()
def close_db_no_error(self):
try:
self.openDB.close()
except Exception:
pass
def gettables(self):
tables = self.openDB.execute("SELECT name FROM sqlite_master WHERE type='table';").fetchall()
reVal = []
for t in tables:
reVal.extend(t)
return reVal
def find_by_like(self, table, item_type, **kwargs):
cur = self.openDB.cursor()
reSets = []
return self.get_items(cur.execute('select * from ' + table + ' where ' + self.where_like_statement(**kwargs)), item_type)
def find_by_exact(self, table, item_type, orderby=None, **kwargs):
wheres = []
for k, v in kwargs.items():
wheres.append('"' + k + '" =:' + k + '')
statement = 'SELECT * FROM '+table + ' WHERE ' + ' and '.join(wheres) + '; '
if orderby is not None:
statement = 'SELECT * FROM '+table + ' WHERE ' + ' and '.join(wheres) + ' ORDER BY ' + orderby + ' DESC ; '
return self.get_items(self.openDB.execute(statement, kwargs), item_type)
def get_items(self, cursor, item_type):
sets = []
for vals in cursor.fetchall():
sets.append(item_type.from_db_values(vals))
return sets
def where_like_statement(self, **kwargs):
reStr = ''
for k, v in kwargs.items():
reStr += k + ' LIKE "%' + v + '%" '
return reStr
#Set handlers Begin..
def find_sets_by_like(self, **kwargs):
return self.find_by_like(MTGSETS_TABLE_NAME, Set, **kwargs)
def find_sets_exact(self, **kwargs):
return self.find_by_exact(MTGSETS_TABLE_NAME, Set, **kwargs)
def all_set_codes(self):
result = self.openDB.execute('SELECT set_code FROM '+MTGSETS_TABLE_NAME).fetchall()
reLst = []
for r in result:
reLst.extend(r)
return reLst
def all_sets(self):
return [Set.from_db_values(r) for r in self.openDB.execute('SELECT * FROM ' + MTGSETS_TABLE_NAME).fetchall()]
def insert_sets(self, sets):
if not isinstance(sets, (tuple, list)):
sets = [sets]
inserts = [s.get_db_values() for s in sets]
cursor = self.openDB.cursor()
cursor.executemany('INSERT INTO "' + MTGSETS_TABLE_NAME + '" VALUES ('+', '.join(['?']*len(MTGSETS_KEYS_TYPES.keys())) + ');', inserts)
self.openDB.commit()
def get_all_cards_from_set(self, set, orderby='collectors_number'):
return self.find_cards_exact(orderby=orderby, set_code=set.set_code)
def get_all_cards_from_set_external(self, set):
cards = self.get_all_cards_from_set(set)
if len(cards) != set.card_count:
cards = scryfalldealer.find_cards_by_set(set)
fails = self.insert_cards_ignore_exception(cards)
if len(fails):
logging.error(str(len(fails)) + ' cards failed to be inserted and will be ignored')
return cards
def get_all_sets_by_type(self):
reDict = {}
for s in self.all_sets():
try:
reDict[s.set_type].append(s)
except KeyError:
reDict[s.set_type] = [s]
return reDict
#..End Set handlers
#Card handlers Begin...
def find_cards_by_like(self, orderby=None, **kwargs):
return self.find_by_like(MTGCARDS_TABLE_NAME, Card, orderby=orderby, **kwargs)
def find_cards_exact(self, orderby=None, **kwargs):
return self.find_by_exact(MTGCARDS_TABLE_NAME, Card, orderby=orderby, **kwargs)
def insert_cards(self, cards):
if not isinstance(cards, (tuple, list)):
cards = [cards]
inserts = [c.get_db_values() for c in cards]
cursor = self.openDB.cursor()
cursor.executemany('INSERT INTO "' + MTGCARDS_TABLE_NAME + '" VALUES (' + ', '.join(['?']*len(MTGCARDS_KEYS_TYPES.keys())) + ');', inserts)
self.openDB.commit()
def insert_cards_ignore_exception(self, cards):
if not isinstance(cards, (tuple, list)):
cards = [cards]
fails = []
cursor = self.openDB.cursor()
for c in cards:
i = c.get_db_values()
try:
cursor.execute('INSERT INTO "' + MTGCARDS_TABLE_NAME + '" VALUES (' + ', '.join(['?']*len(MTGCARDS_KEYS_TYPES.keys())) + ');', i)
except sql.DatabaseError as ex:
logging.exception('Can not insert card %s %s', c.name, c.set_code)
fails.append(c)
self.openDB.commit()
return fails
def find_cards_by_like_to_external(self, **kwargs):
'''
This is only going to work for single items. The or of `|` won't find anything in the local
sqlite DB.
'''
cards = self.find_cards_by_like(**kwargs)
if 0 == len(cards):
cards = where_card(**kwargs)
self.insert_cards(cards)
return self.find_cards_by_like(**kwargs)
def find_cards_exact_to_external(self, **kwargs):
'''
This is only going to work for single items. The or of `|` won't find anything in the local
sqlite DB.
'''
cards = self.find_cards_exact(**kwargs)
if 0 == len(cards):
cards = where_card(**kwargs)
self.insert_cards(cards)
return self.find_cards_exact(**kwargs)
def find_cards_from_cards(self, cards):
fails = []
success = []
for c in cards:
finds = []
if c.name is not None and c.set_code is not None:
finds = self.find_cards_exact(name=c.name, set_code=c.set_code)
elif c.name is not None:
finds = self.find_cards_exact(name=c.name)
else:
fails.append(c)
if 0 == len(finds):
logging.debug("Failure NO FIND to find {}".format(c.name))
fails.append(c)
else:
cnt = 0
while len(finds) > cnt:
finds[cnt].count = c.count
cnt += 1
success += finds
return success, fails
def find_cards_from_cards_external(self, cards):
'''
Pass in a list of cards to find. Will query locally to find what cards are already in the local
database. Any cards not found in the DB will be passed to a mtgsdk querier to find there.
Those will be inserted into the local DB and then requeried locally.
Exceptions: ValueError passed up from externalapis.mtgsdkreader.find_many_cards
Return: a tuple being (list, list) each list is a list of cards. Index 0 is the cards found.
Index 1 is the cards passed in but not found.
'''
success, fails = self.find_cards_from_cards(cards)
if 0 < len(fails):
#self.insert_cards(find_many_cards(fails))
self.insert_cards_ignore_exception(scryfalldealer.find_cards_by_name(fails))
success, fails = self.find_cards_from_cards(cards)
return success, fails
def tcgplayer_csv_import(self, csvFile):
cards = tcgplayer_csv_to_cards(csvFile, self.all_set_codes())
suc, fails = self.find_cards_from_cards_external(cards)
for f in fails:
print('FAILED Name: ', f.name, ' Set Code: ', f.set_code)
return suc + fails
#...End Card handlers
#Userbuild handlers Begin...
def insert_userbuild_collection(self, collection, cards=[]):
format = None
if hasattr(collection, 'format'): format = collection.format
return self.insert_userbuild(collection.name, collection.path, collection.type, type(collection), format, cards)
def insert_userbuild(self, name, path, type, retype, format=None, cards=[]):
cursor = self.openDB.cursor()
cursor.execute('INSERT INTO "' + MTG_USERBUILD_TABLE_NAME + '" VALUES (' + ', '.join(['?']*len(MTG_USERBUILD_KEYS_TYPES.keys())) + ');', (None, path, name, format, type, None))
rowid = cursor.lastrowid
self.openDB.commit()
coll = self.get_userbuild(name, path, retype, rowid)[0]
cursor.execute('UPDATE "' + MTG_USERBUILD_TABLE_NAME + '" SET tablename = "' + coll.tablename + '" WHERE unqkey = ' + str(coll.unqkey) + ';')
table = retype.sql_create_table()(coll.tablename)
logging.debug('Excuting ' + table)
cursor.execute(table)
self.openDB.commit()
coll.cards = cards
self.insert_cards_userbuild(coll)
return coll
def get_userbuild(self, name, path, retype, unqkey=None):
reLst = []
cols = []
if unqkey is None:
cols = self.openDB.execute('SELECT * FROM "' + MTG_USERBUILD_TABLE_NAME + '" WHERE "name" = :name AND "path" = :path AND type = :collection', {"name":name, "path": path, "collection": retype.collection_type()}).fetchall()
else:
cols = self.openDB.execute('SELECT * FROM "' + MTG_USERBUILD_TABLE_NAME + '" WHERE "name" = :name AND "path" = :path AND type = :collection AND unqkey = :unqkey', {"name":name, "path": path, "collection": retype.collection_type(), "unqkey": unqkey}).fetchall()
for c in cols:
reLst.append(retype.from_db_values(c))
return reLst
def insert_cards_userbuild(self, collection):
inserts = collection.get_card_inserts()
cursor = self.openDB.cursor()
try:
cursor.executemany(insert_statement(collection.tablename, collection.__class__.database_keytypes()), inserts)
except sql.IntegrityError as ex:
print(ex)
self.openDB.commit()
def add_cards_userbuild(self, collection, cards):
if not isinstance(cards, (list, tuple)):
cards = [cards]
inserts = []
for c in cards:
incard = collection.contains(c)
if incard is None:
if c.count < 1:
c.count = 1
inserts.append(c)
else:
print('IN CARD ', incard.name, ' COUNT ', incard.count)
print('ADD CARD ', c.name, ' COUNT ', c.count)
incard.count += c.count
self.openDB.execute('UPDATE "' + collection.tablename + '" SET count = ' + str(incard.count) + ' WHERE id = "' + incard.id + '";')
if len(inserts):
self.openDB.executemany(insert_statement(collection.tablename, collection.__class__.database_keytypes()), collection.get_card_inserts(inserts))
self.openDB.commit()
def get_all_userbuilds(self):
collections = []
typeindex = list(MTG_USERBUILD_KEYS_TYPES.keys()).index('type')
cursor = self.openDB.cursor()
for r in cursor.execute('SELECT * FROM ' + MTG_USERBUILD_TABLE_NAME + ';').fetchall():
collections.append(get_class(r[typeindex]).from_db_values(r))
collections[-1].cards = self.get_cards_from_collection(collections[-1])
return collections
def get_cards_from_collection(self, collection): #select * from mtgcards join Pauper using (id);
if not collection.tablename:
raise ValueError('Collection must contain a table name')
keytypes = {}
keytypes.update(MTGCARDS_KEYS_TYPES)
keytypes.update(collection.database_keytypes())
cursor = self.openDB.execute('SELECT * FROM ' + MTGCARDS_TABLE_NAME + ' join "' + collection.tablename + '" using (id);')
cards = []
for c in cursor.fetchall():
cards.append(Card.from_db_values(c, keytypes))
return cards
#...End Userbuild handlers
def create_collection(self, name, path, cards=[]):
return self.insert_userbuild(name, path, Collection.collection_type(), Collection, None, cards)
def create_deck(self, name, path, format='kitchen', cards=[]):
return self.insert_userbuild(name, path, Deck.collection_type(), Deck, format, cards)
def create_qube(self, name, path):
cursor = self.openDB.cursor()
cursor.execute('INSERT INTO ' + MTG_USERBUILD_TABLE_NAME + ' VALUES (' + ', '.join(['?']*len(MTG_USERBUILD_KEYS_TYPES.keys())) + ');', (None, path, name, None, 'QUBE'))
cursor.execute(deck_sql(name))
self.openDB.commit()
if __name__ == '__main__':
store = MTGDatabaseHandler()
store.create_new_db('./fuckkkk.mtg')
|
|
"Multiclass Scatterplot Module."
from __future__ import annotations
from collections import defaultdict
import numpy as np
from progressivis.core.module import Module, ReturnRunStep, JSon
from progressivis.table.nary import NAry
from progressivis.stats import MCHistogram2D, Sample
from progressivis.table.range_query_2d import RangeQuery2d
from progressivis.utils.errors import ProgressiveError
from progressivis.core.utils import is_notebook, get_physical_base
from progressivis.io import DynVar
from ..table.table_base import BaseTable
from progressivis.core import notNone
from typing import (
Optional,
Tuple,
Sequence,
List,
Dict,
cast,
Union,
Any,
Literal,
TYPE_CHECKING,
)
Bounds = Tuple[float, float, float, float]
if TYPE_CHECKING:
from progressivis.core.scheduler import Scheduler
from progressivis.core.slot import Slot
class _DataClass:
def __init__(
self,
name: str,
that: MCScatterPlot,
x_column: str,
y_column: str,
scheduler: Scheduler,
approximate: bool = False,
**kwds: Any,
):
self.name = name
self._group = that.name
self.x_column = x_column
self.y_column = y_column
self._approximate = approximate
self._scheduler = scheduler
self.input_module: Optional[Module] = None
self.input_slot: Optional[str] = None
self.min: Any = None
self.max: Any = None
self.histogram2d: Optional[MCHistogram2D] = None
self.heatmap = None
self.min_value: Optional[DynVar] = that.min_value
self.max_value: Optional[DynVar] = that.max_value
self.sample: Union[None, Literal["default"], Module] = None
self.range_query_2d: Optional[Module] = None
def scheduler(self) -> Scheduler:
return self._scheduler
def create_dependent_modules(
self,
input_module: Module,
input_slot: str,
histogram2d: Optional[MCHistogram2D] = None,
heatmap: Optional[Module] = None,
**kwds: Any,
) -> _DataClass:
if self.input_module is not None:
return self
scheduler = self.scheduler()
with scheduler:
self.input_module = input_module
self.input_slot = input_slot
range_query_2d = RangeQuery2d(
column_x=self.x_column,
column_y=self.y_column,
group=self._group,
approximate=self._approximate,
scheduler=scheduler,
)
range_query_2d.create_dependent_modules(
input_module, input_slot, min_value=False, max_value=False
)
assert self.min_value is not None and self.max_value is not None
range_query_2d.input.lower = self.min_value.output.result
range_query_2d.input.upper = self.max_value.output.result
if histogram2d is None:
histogram2d = MCHistogram2D(
self.x_column,
self.y_column,
group=self._group,
scheduler=scheduler,
)
histogram2d.input.data = range_query_2d.output.result
if self.sample == "default":
self.sample = Sample(
samples=100, group=self._group, scheduler=scheduler
)
if isinstance(self.sample, Sample):
self.sample.input.table = range_query_2d.output.result
self.histogram2d = histogram2d
# self.sample = sample
# self.select = select
self.min = range_query_2d.min.output.result
self.max = range_query_2d.max.output.result
self.range_query_2d = range_query_2d
scatterplot = self
return scatterplot
class MCScatterPlot(NAry):
"Module visualizing a multiclass scatterplot."
def __init__(
self,
classes: Union[Sequence[Dict[str, Any]], Sequence[Tuple[str, ...]]],
x_label: str = "x",
y_label: str = "y",
approximate: bool = False,
**kwds: Any,
) -> None:
"""Multiclass ..."""
super(MCScatterPlot, self).__init__(output_required=False, **kwds)
self.tags.add(self.TAG_VISUALIZATION)
self._classes = classes # TODO: check it ...
self._x_label = x_label
self._y_label = y_label
syn_x: Tuple[str, ...]
syn_y: Tuple[str, ...]
if isinstance(classes[0], tuple):
syn_x, syn_y = zip(*[(x, y) for (_, x, y, *ignored) in classes])
elif isinstance(classes[0], dict):
syn_x, syn_y = zip(*[(d["x_column"], d["y_column"]) for d in classes]) # type: ignore
self._translation = {x_label: syn_x, y_label: syn_y}
self._translated_keys = set(syn_x) | set(syn_y)
self._approximate = approximate
self._json_cache: Optional[JSon] = None
self.input_module: Optional[Module] = None
self.input_slot: Optional[str] = None
self._data_class_dict: Dict[str, _DataClass] = {}
self.min_value: Optional[DynVar] = None
self.max_value: Optional[DynVar] = None
self._ipydata: bool = is_notebook()
self.hist_tensor: Optional[np.ndarray[Any, Any]] = None
self.sample_tensor: Optional[np.ndarray[Any, Any]] = None
def forget_changes(self, input_slot: Slot) -> bool:
changes = False
if input_slot.deleted.any():
input_slot.deleted.next()
changes = True
if input_slot.created.any():
input_slot.created.next()
changes = True
if input_slot.updated.any():
input_slot.updated.next()
changes = True
return changes
def get_visualization(self) -> str:
return "mcscatterplot"
def predict_step_size(self, duration: float) -> int:
return 1
def group_inputs(
self,
) -> Tuple[bool, Dict[str, Dict[str, Tuple[Slot, float, float]]]]:
"""
Group inputs by classes using meta field on slots
"""
ret: Dict[str, Dict[str, Tuple[Slot, float, float]]] = defaultdict(dict)
changes = False
for name in self.input_slot_names():
if not self.has_input_slot(name):
continue
input_slot = self.get_input_slot(name)
meta = input_slot.meta
if meta is None:
continue
assert isinstance(meta, dict)
input_type: str = cast(str, meta["inp"])
class_: str = cast(str, meta["class_"])
if input_type not in ("hist", "sample"):
raise ValueError(f"{input_type} not in [hist, sample]")
changes |= self.forget_changes(input_slot)
ret[class_].update(
{
input_type: (
input_slot,
cast(float, meta["x"]),
cast(float, meta["y"]),
)
}
)
return changes, ret
def build_heatmap(self, inp: Slot, domain: Any, plan: int) -> Optional[JSon]:
inp_table = inp.data()
if inp_table is None:
return None
assert isinstance(inp_table, BaseTable)
if len(inp_table) == 0:
return None
row = notNone(inp_table.last()).to_dict()
json_: JSon = {}
if not (
np.isnan(row["xmin"])
or np.isnan(row["xmax"])
or np.isnan(row["ymin"])
or np.isnan(row["ymax"])
):
data = row["array"]
json_["bounds"] = (row["xmin"], row["ymin"], row["xmax"], row["ymax"])
if self._ipydata:
assert isinstance(plan, int)
json_["binnedPixels"] = plan
self.hist_tensor[:, :, plan] = row["array"] # type: ignore
else:
data = np.copy(row["array"]) # type: ignore
json_["binnedPixels"] = data
json_["range"] = [np.min(data), np.max(data)] # type: ignore
json_["count"] = np.sum(data)
json_["value"] = domain
return json_
return None
def make_json(self, json: JSon) -> JSon:
buffers = []
domain = []
samples: List[Tuple[List[Any], List[Any]]] = []
count = 0
xmin = ymin = -np.inf
xmax = ymax = np.inf
changes, grouped_inputs = self.group_inputs()
z = len(grouped_inputs)
if self._ipydata and self.hist_tensor is None:
for sl in grouped_inputs.values():
hi = sl["hist"][0]
xbins = hi.output_module.params.xbins
ybins = hi.output_module.params.ybins
self.hist_tensor = np.zeros((xbins, ybins, z), dtype="int32")
break
for i, (cname, inputs) in enumerate(grouped_inputs.items()):
hist_input = inputs["hist"][0]
buff = self.build_heatmap(hist_input, cname, i)
if buff is None:
return json
xmin_, ymin_, xmax_, ymax_ = buff.pop("bounds")
xmin = max(xmin, xmin_)
ymin = max(ymin, ymin_)
xmax = min(xmax, xmax_)
ymax = min(ymax, ymax_)
buffers.append(buff)
count += buff["count"]
domain.append(cname)
if "sample" in inputs:
sample_input = inputs["sample"][0]
select = sample_input.data()
x_column, y_column = inputs["sample"][1], inputs["sample"][2]
else:
select = None
if self._ipydata:
smpl: Tuple[List[Any], List[Any]]
if select is not None:
ph_x = get_physical_base(select[x_column])
ph_y = get_physical_base(select[y_column])
smpl = (
ph_x.loc[select[x_column].index.index],
ph_y.loc[select[y_column].index.index],
)
else:
smpl = ([], [])
else:
smpl = (
select.to_json(orient="split", columns=[x_column, y_column])
if select is not None
else []
)
samples.append(smpl)
if self._ipydata:
samples_counter: List[int] = []
for vx, vy in samples:
len_s = len(vx)
assert len_s == len(vy)
samples_counter.append(len_s)
nsam = max(samples_counter)
self.sample_tensor = np.zeros((nsam, 2, z), dtype="float32")
for i, (vx, vy) in enumerate(samples):
if not len(vx):
continue
self.sample_tensor[:, 0, i] = vx
self.sample_tensor[:, 1, i] = vy
json["samples_counter"] = samples_counter
samples = []
# TODO: check consistency among classes (e.g. same xbin, ybin etc.)
if self._ipydata:
assert self.hist_tensor is not None
xbins, ybins = self.hist_tensor.shape[:-1]
else:
xbins, ybins = buffers[0]["binnedPixels"].shape
encoding = {
"x": {
"bin": {"maxbins": xbins},
"aggregate": "count",
"field": self._x_label,
"type": "quantitative",
"scale": {"domain": [-7, 7], "range": [0, xbins]},
},
"z": {"field": "category", "type": "nominal", "scale": {"domain": domain}},
"y": {
"bin": {"maxbins": ybins},
"aggregate": "count",
"field": self._y_label,
"type": "quantitative",
"scale": {"domain": [-7, 7], "range": [0, ybins]},
},
}
source = {"program": "progressivis", "type": "python", "rows": count}
json["chart"] = dict(buffers=buffers, encoding=encoding, source=source)
json["bounds"] = dict(xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax)
s_data: List[float] = []
# Code note executed and probably wrong
for i, s in enumerate(samples):
if not s or not isinstance(s, dict):
continue
d = s["data"]
for row in d:
row.append(i)
s_data.extend(d)
json["sample"] = dict(data=s_data, index=list(range(len(s_data))))
json["columns"] = [self._x_label, self._y_label]
if self._ipydata:
json["hist_tensor"] = self.hist_tensor
json["sample_tensor"] = self.sample_tensor
return json
def run_step(
self, run_number: int, step_size: int, howlong: float
) -> ReturnRunStep:
for name in self.get_input_slot_multiple(self.nary):
slot = self.get_input_slot(name)
# slot.update(run_number)
if slot.has_buffered():
slot.clear_buffers()
self._json_cache = None
return self._return_run_step(self.state_blocked, steps_run=0)
def run(self, run_number: int) -> None:
super(MCScatterPlot, self).run(run_number)
if self._ipydata:
return
if self._json_cache is not None:
return
self._json_cache = self._to_json_impl()
def to_json(self, short: bool = False, with_speed: bool = True) -> JSon:
if self._json_cache:
return self._json_cache
self._json_cache = self._to_json_impl(short, with_speed)
return self._json_cache
def _to_json_impl(self, short: bool = False, with_speed: bool = True) -> JSon:
self.image = None
json = super(MCScatterPlot, self).to_json(short, with_speed=with_speed)
if short:
return json
return self.make_json(json)
def create_dependent_modules(
self,
input_module: Optional[Module] = None,
input_slot: str = "result",
sample: str = "default",
**kwds: Any,
) -> None:
self.input_module = input_module
self.input_slot = input_slot
with self.grouped():
scheduler = self.scheduler()
self.min_value = DynVar(
{k: None for k in self._translated_keys},
translation=self._translation,
scheduler=scheduler,
)
self.max_value = DynVar(
{k: None for k in self._translated_keys},
translation=self._translation,
scheduler=scheduler,
)
for cl in self._classes:
if isinstance(cl, tuple):
self._add_class(*cl) # type: ignore
elif isinstance(cl, dict):
self._add_class(**cl)
else:
raise ValueError(f"Invalid data {cl} in classes")
self._finalize()
def __getitem__(self, _class: str) -> _DataClass:
return self._data_class_dict[_class]
def _finalize(self) -> None:
for dc in self._data_class_dict.values():
assert dc.histogram2d is not None
for dc2 in self._data_class_dict.values():
assert dc2.x_column is not None and dc2.y_column is not None
x, y = dc2.x_column, dc2.y_column
rq2d = dc2.range_query_2d
assert rq2d is not None and rq2d.output is not None
dc.histogram2d.input["table", ("min", x, y)] = rq2d.output.min
dc.histogram2d.input["table", ("max", x, y)] = rq2d.output.max
def _add_class(
self,
name: str,
x_column: str,
y_column: str,
sample: Union[Literal["default"], Module] = "default",
sample_slot: str = "result",
input_module: Optional[Module] = None,
input_slot: Optional[str] = None,
) -> None:
if self.input_module is None and input_module is None:
raise ProgressiveError("Input module is not defined!")
if self.input_module is not None and input_module is not None:
raise ProgressiveError("Input module is defined twice!")
if self.input_slot is None and input_slot is None:
raise ProgressiveError("Input slot is not defined!")
if (
self.input_slot is not None
and input_slot is not None
and self.input_slot != input_slot
):
raise ProgressiveError("Input slot is defined twice!")
data_class = _DataClass(
name,
self,
x_column,
y_column,
approximate=self._approximate,
scheduler=self._scheduler,
)
data_class.sample = sample
input_module = input_module or self.input_module
input_slot = input_slot or self.input_slot
if input_module is not None and input_slot is not None:
data_class.create_dependent_modules(input_module, input_slot)
col_translation = {self._x_label: x_column, self._y_label: y_column}
hist_meta = dict(inp="hist", class_=name, **col_translation)
if data_class.histogram2d is not None:
self.input["table", hist_meta] = data_class.histogram2d.output.result
if isinstance(data_class.sample, Module):
meta = dict(inp="sample", class_=name, **col_translation)
self.input["table", meta] = data_class.sample.output[sample_slot]
self._data_class_dict[name] = data_class
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2017, AGB & GC
# Full license can be found in License.md
# -----------------------------------------------------------------------------
""" Tests the ocb_scaling class and functions
"""
from io import StringIO
import logging
import numpy as np
from os import path
from sys import version_info
import unittest
import ocbpy
class TestOCBScalingLogFailure(unittest.TestCase):
def setUp(self):
""" Initialize the test class
"""
# Initialize the logging info
self.lwarn = u""
self.lout = u""
self.log_capture = StringIO()
ocbpy.logger.addHandler(logging.StreamHandler(self.log_capture))
ocbpy.logger.setLevel(logging.INFO)
# Initialize the testing variables
test_file = path.join(path.dirname(ocbpy.__file__), "tests",
"test_data", "test_north_circle")
self.assertTrue(path.isfile(test_file))
self.ocb = ocbpy.ocboundary.OCBoundary(filename=test_file,
instrument='image')
self.ocb.rec_ind = 27
self.vdata = ocbpy.ocb_scaling.VectorData(0, self.ocb.rec_ind, 75.0,
22.0, aacgm_n=50.0,
aacgm_e=86.5, aacgm_z=5.0,
dat_name="Test",
dat_units="$m s^{-1}$")
def tearDown(self):
""" Tear down the test case
"""
del self.lwarn, self.lout, self.log_capture, self.ocb, self.vdata
def test_no_scale_func(self):
""" Test OCBScaling initialization with no scaling function
"""
self.lwarn = u"no scaling function provided"
# Initialize the VectorData class without a scaling function
self.vdata.set_ocb(self.ocb)
self.assertIsNone(self.vdata.scale_func)
self.lout = self.log_capture.getvalue()
# Test logging error message for each bad initialization
self.assertTrue(self.lout.find(self.lwarn) >= 0)
def test_inconsistent_vector_warning(self):
""" Test init failure with inconsistent AACGM components
"""
self.lwarn = u"inconsistent AACGM"
# Initalize the VectorData class with inconsistent vector magnitudes
self.vdata = ocbpy.ocb_scaling.VectorData(0, self.ocb.rec_ind,
75.0, 22.0,
aacgm_mag=100.0,
dat_name="Test",
dat_units="$m s^{-1}$")
self.lout = self.log_capture.getvalue()
# Test logging error message for each bad initialization
self.assertTrue(self.lout.find(self.lwarn) >= 0)
class TestOCBScalingMethods(unittest.TestCase):
def setUp(self):
""" Initialize the OCBoundary and VectorData objects
"""
test_file = path.join(path.dirname(ocbpy.__file__), "tests",
"test_data", "test_north_circle")
self.assertTrue(path.isfile(test_file))
self.ocb = ocbpy.ocboundary.OCBoundary(filename=test_file,
instrument='image')
self.ocb.rec_ind = 27
self.vdata = ocbpy.ocb_scaling.VectorData(0, self.ocb.rec_ind, 75.0,
22.0, aacgm_n=50.0,
aacgm_e=86.5, aacgm_z=5.0,
dat_name="Test",
dat_units="$m s^{-1}$")
self.wdata = ocbpy.ocb_scaling.VectorData(0, self.ocb.rec_ind, 75.0,
22.0, aacgm_n=50.0,
aacgm_e=86.5, aacgm_z=5.0,
aacgm_mag=100.036243432,
dat_name="Test",
dat_units="$m s^{-1}$")
self.zdata = ocbpy.ocb_scaling.VectorData(0, self.ocb.rec_ind, 87.2,
21.22, aacgm_n=0.0,
aacgm_e=0.0,
dat_name="Test Zero",
dat_units="$m s^{-1}$")
if version_info.major == 2:
self.assertRegex = self.assertRegexpMatches
def tearDown(self):
del self.ocb, self.vdata, self.wdata, self.zdata
def test_init_nez(self):
""" Test the initialisation of the VectorData object without magnitude
"""
self.assertAlmostEqual(self.vdata.aacgm_mag, 100.036243432)
self.assertAlmostEqual(self.zdata.aacgm_mag, 0.0)
def test_init_mag(self):
""" Test the initialisation of the VectorData object with magnitude
"""
self.assertAlmostEqual(self.wdata.aacgm_mag, 100.036243432)
def test_vector_repr_str(self):
""" Test the VectorData print statement using repr and str
"""
self.assertTrue(self.vdata.__repr__() == self.vdata.__str__())
def test_vector_repr_no_scaling(self):
""" Test the VectorData print statement without a scaling function
"""
out = self.vdata.__repr__()
self.assertRegex(out, "Vector data:")
self.assertRegex(out, "No magnitude scaling function")
del out
def test_vector_repr_with_scaling(self):
""" Test the VectorData print statement with a scaling function
"""
self.vdata.set_ocb(self.ocb, scale_func=ocbpy.ocb_scaling.normal_evar)
out = self.vdata.__repr__()
self.assertRegex(out, "Vector data:")
self.assertRegex(out, "Scaling function")
def test_vector_bad_lat(self):
""" Test the VectorData output with data from the wrong hemisphere
"""
self.vdata.aacgm_lat *= -1.0
self.vdata.set_ocb(self.ocb, scale_func=ocbpy.ocb_scaling.normal_evar)
self.assertTrue(np.isnan(self.vdata.ocb_lat))
self.assertTrue(np.isnan(self.vdata.ocb_mlt))
self.assertTrue(np.isnan(self.vdata.r_corr))
self.assertTrue(np.isnan(self.vdata.ocb_n))
self.assertTrue(np.isnan(self.vdata.ocb_e))
self.assertTrue(np.isnan(self.vdata.ocb_z))
def test_calc_large_pole_angle(self):
""" Test the OCB polar angle calculation with angles > 90 deg
"""
self.zdata.ocb_aacgm_mlt = 1.260677777
self.zdata.ocb_aacgm_lat = 83.99
self.zdata.ocb_lat = 84.838777192
self.zdata.ocb_mlt = 15.1110383783
self.zdata.calc_vec_pole_angle()
self.assertAlmostEqual(self.zdata.pole_angle, 91.72024697182087)
def test_calc_vec_pole_angle_acute(self):
""" Test the polar angle calculation with an acute angle
"""
self.vdata.set_ocb(self.ocb)
self.assertAlmostEqual(self.vdata.pole_angle, 8.67527923)
def test_calc_vec_pole_angle_zero(self):
""" Test the polar angle calculation with an angle of zero
"""
self.vdata.set_ocb(self.ocb)
self.vdata.aacgm_mlt = self.vdata.ocb_aacgm_mlt
self.vdata.calc_vec_pole_angle()
self.assertEqual(self.vdata.pole_angle, 0.0)
def test_calc_vec_pole_angle_flat(self):
""" Test the polar angle calculation with an angle of 180 deg
"""
self.vdata.set_ocb(self.ocb)
self.vdata.ocb_aacgm_mlt = 6.0
self.vdata.aacgm_mlt = 6.0
self.vdata.aacgm_lat = 45.0 + 0.5 * self.vdata.ocb_aacgm_lat
self.vdata.calc_vec_pole_angle()
self.assertEqual(self.vdata.pole_angle, 180.0)
def test_calc_vec_pole_angle_right_isosceles(self):
""" Test the polar angle calculation with a right isosceles triangle
"""
# Set the distance between the data point and the OCB is equal to the
# distance between the AACGM pole and the OCB so that the triangles
# we're examining are isosceles triangles. If the triangles were flat,
# the angle would be 45 degrees
self.vdata.set_ocb(self.ocb)
self.vdata.ocb_aacgm_mlt = 0.0
self.vdata.aacgm_mlt = 6.0
self.vdata.aacgm_lat = self.vdata.ocb_aacgm_lat
self.vdata.calc_vec_pole_angle()
self.assertAlmostEqual(self.vdata.pole_angle, 45.03325090532819)
def test_calc_vec_pole_angle_oblique(self):
""" Test the polar angle calculation with an isosceles triangle
"""
self.vdata.set_ocb(self.ocb)
self.vdata.aacgm_mlt = self.vdata.ocb_aacgm_mlt - 1.0
self.vdata.aacgm_lat = 45.0 + 0.5 * self.vdata.ocb_aacgm_lat
self.vdata.calc_vec_pole_angle()
self.assertAlmostEqual(self.vdata.pole_angle, 150.9561733411)
def test_define_quadrants(self):
""" Test the assignment of quadrants
"""
# Set the initial values
self.vdata.ocb_aacgm_mlt = self.ocb.phi_cent[self.vdata.ocb_ind] / 15.0
self.vdata.ocb_aacgm_lat = 90.0 - self.ocb.r_cent[self.vdata.ocb_ind]
(self.vdata.ocb_lat, self.vdata.ocb_mlt,
self.vdata.r_corr) = self.ocb.normal_coord(self.vdata.aacgm_lat,
self.vdata.aacgm_mlt)
self.vdata.calc_vec_pole_angle()
# Get the test quadrants
self.vdata.define_quadrants()
self.assertEqual(self.vdata.ocb_quad, 1)
self.assertEqual(self.vdata.vec_quad, 1)
def test_define_quadrants_neg_adj_mlt_west(self):
""" Test the quadrant assignment with a negative AACGM MLT and W vect
"""
self.vdata.aacgm_mlt = -22.0
self.vdata.aacgm_e *= -1.0
self.vdata.set_ocb(self.ocb)
self.assertGreater(self.vdata.ocb_aacgm_mlt-self.vdata.aacgm_mlt, 24)
self.assertEqual(self.vdata.ocb_quad, 1)
self.assertEqual(self.vdata.vec_quad, 2)
def test_define_quadrants_neg_north(self):
""" Test the quadrant assignment with a vector pointing south
"""
self.vdata.aacgm_n *= -1.0
self.vdata.set_ocb(self.ocb, scale_func=ocbpy.ocb_scaling.normal_evar)
self.assertEqual(self.vdata.ocb_quad, 1)
self.assertEqual(self.vdata.vec_quad, 4)
def test_define_quadrants_noon_north(self):
""" Test the quadrant assignment with a vector pointing north from noon
"""
self.vdata.aacgm_mlt = 12.0
self.vdata.set_ocb(self.ocb, scale_func=ocbpy.ocb_scaling.normal_evar)
self.assertEqual(self.vdata.ocb_quad, 2)
self.assertEqual(self.vdata.vec_quad, 1)
def test_define_quadrants_aligned_poles_southwest(self):
""" Test quad assignment w/vector pointing SW and both poles aligned
"""
self.vdata.set_ocb(self.ocb, scale_func=ocbpy.ocb_scaling.normal_evar)
self.vdata.aacgm_mlt = self.vdata.ocb_aacgm_mlt + 12.0
self.vdata.aacgm_n = -10.0
self.vdata.aacgm_e = -10.0
self.vdata.set_ocb(self.ocb, scale_func=ocbpy.ocb_scaling.normal_evar)
self.assertEqual(self.vdata.ocb_quad, 2)
self.assertEqual(self.vdata.vec_quad, 3)
def test_define_quadrants_ocb_south_night(self):
""" Test the quadrant assignment with the OCB pole in a south/night quad
"""
self.vdata.aacgm_mlt = 0.0
self.vdata.set_ocb(self.ocb, scale_func=ocbpy.ocb_scaling.normal_evar)
self.vdata.ocb_aacgm_mlt = 23.0
self.vdata.ocb_aacgm_lat = self.vdata.aacgm_lat - 2.0
self.vdata.calc_vec_pole_angle()
self.vdata.define_quadrants()
self.assertEqual(self.vdata.ocb_quad, 3)
self.assertEqual(self.vdata.vec_quad, 1)
def test_define_quadrants_ocb_south_day(self):
""" Test the quadrant assignment with the OCB pole in a south/day quad
"""
self.vdata.aacgm_mlt = 0.0
self.vdata.set_ocb(self.ocb, scale_func=ocbpy.ocb_scaling.normal_evar)
self.vdata.ocb_aacgm_mlt = 1.0
self.vdata.ocb_aacgm_lat = self.vdata.aacgm_lat - 2.0
self.vdata.calc_vec_pole_angle()
self.vdata.define_quadrants()
self.assertEqual(self.vdata.ocb_quad, 4)
self.assertEqual(self.vdata.vec_quad, 1)
def test_undefinable_quadrants(self):
""" Test OCBScaling initialization for undefinable quadrants
"""
self.vdata.aacgm_lat = 0.0
self.vdata.set_ocb(self.ocb, scale_func=ocbpy.ocb_scaling.normal_evar)
self.assertEqual(self.vdata.ocb_quad, 0)
self.assertEqual(self.vdata.vec_quad, 0)
def test_lost_ocb_quadrant(self):
""" Test OCBScaling initialization for unset quadrants
"""
self.vdata.set_ocb(self.ocb, scale_func=ocbpy.ocb_scaling.normal_evar)
self.assertEqual(self.vdata.ocb_quad, 1)
self.assertEqual(self.vdata.vec_quad, 1)
self.vdata.ocb_quad = 0
self.vdata.scale_vector()
self.assertEqual(self.vdata.ocb_quad, 1)
def test_lost_vec_quadrant(self):
""" Test OCBScaling initialization for unset quadrants
"""
self.vdata.set_ocb(self.ocb, scale_func=ocbpy.ocb_scaling.normal_evar)
self.assertEqual(self.vdata.ocb_quad, 1)
self.assertEqual(self.vdata.vec_quad, 1)
self.vdata.vec_quad = 0
self.vdata.scale_vector()
self.assertEqual(self.vdata.vec_quad, 1)
def test_calc_ocb_vec_sign(self):
""" Test the calculation of the OCB vector signs
"""
# Set the initial values
self.vdata.ocb_aacgm_mlt = self.ocb.phi_cent[self.vdata.ocb_ind] / 15.0
self.vdata.ocb_aacgm_lat = 90.0 - self.ocb.r_cent[self.vdata.ocb_ind]
(self.vdata.ocb_lat, self.vdata.ocb_mlt,
self.vdata.r_corr) = self.ocb.normal_coord(self.vdata.aacgm_lat,
self.vdata.aacgm_mlt)
self.vdata.calc_vec_pole_angle()
self.vdata.define_quadrants()
vmag = np.sqrt(self.vdata.aacgm_n**2 + self.vdata.aacgm_e**2)
self.vdata.aacgm_naz = np.degrees(np.arccos(self.vdata.aacgm_n / vmag))
# Calculate the vector data signs
vsigns = self.vdata.calc_ocb_vec_sign(north=True, east=True)
self.assertTrue(vsigns['north'])
self.assertTrue(vsigns['east'])
del vmag, vsigns
def test_scale_vec(self):
""" Test the calculation of the OCB vector signs
"""
# Set the initial values
self.vdata.ocb_aacgm_mlt = self.ocb.phi_cent[self.vdata.ocb_ind] / 15.0
self.vdata.ocb_aacgm_lat = 90.0 - self.ocb.r_cent[self.vdata.ocb_ind]
(self.vdata.ocb_lat, self.vdata.ocb_mlt,
self.vdata.r_corr) = self.ocb.normal_coord(self.vdata.aacgm_lat,
self.vdata.aacgm_mlt)
self.vdata.calc_vec_pole_angle()
self.vdata.define_quadrants()
vmag = np.sqrt(self.vdata.aacgm_n**2 + self.vdata.aacgm_e**2)
self.vdata.aacgm_naz = np.degrees(np.arccos(self.vdata.aacgm_n / vmag))
# Scale the data vector
self.vdata.scale_vector()
# Test the North and East components
self.assertAlmostEqual(self.vdata.ocb_n, 62.4751208491)
self.assertAlmostEqual(self.vdata.ocb_e, 77.9686428950)
# Test to see that the magnitudes and z-components are the same
self.assertAlmostEqual(self.vdata.aacgm_mag, self.vdata.ocb_mag)
self.assertAlmostEqual(self.vdata.ocb_z, self.vdata.aacgm_z)
del vmag
def test_scale_vec_z_zero(self):
""" Test the calculation of the OCB vector sign with no vertical aacgm_z
"""
# Re-assing the necessary variable
self.vdata.aacgm_z = 0.0
# Run the scale_vector routine
self.vdata.set_ocb(self.ocb, scale_func=ocbpy.ocb_scaling.normal_evar)
# Assess the ocb_z component
self.assertEqual(self.vdata.ocb_z,
self.vdata.scale_func(0.0, self.vdata.unscaled_r,
self.vdata.scaled_r))
@unittest.skipIf(version_info.major == 2,
'Python 2.7 does not support subTest')
def test_scale_vec_pole_angle_zero(self):
""" Test the calculation of the OCB vector sign with no pole angle
"""
self.vdata.set_ocb(self.ocb)
self.vdata.pole_angle = 0.0
nscale = ocbpy.ocb_scaling.normal_evar(self.vdata.aacgm_n,
self.vdata.unscaled_r,
self.vdata.scaled_r)
escale = ocbpy.ocb_scaling.normal_evar(self.vdata.aacgm_e,
self.vdata.unscaled_r,
self.vdata.scaled_r)
# Cycle through all the possible options for a pole angle of zero/180
for tset in [('scale_func', None, self.vdata.aacgm_n,
self.vdata.aacgm_e),
('scale_func', ocbpy.ocb_scaling.normal_evar, nscale,
escale),
('ocb_aacgm_lat', self.vdata.aacgm_lat, -1.0 * nscale,
-1.0 * escale)]:
with self.subTest(tset=tset):
setattr(self.vdata, tset[0], tset[1])
# Run the scale_vector routine with the new attributes
self.vdata.scale_vector()
# Assess the ocb north and east components
self.assertEqual(self.vdata.ocb_n, tset[2])
self.assertEqual(self.vdata.ocb_e, tset[3])
del nscale, escale, tset
@unittest.skipIf(version_info.major > 2, 'Already tested')
def test_scale_vec_pole_angle_zero_none(self):
""" Test the OCB vector sign routine with no pole angle or scaling
"""
self.vdata.set_ocb(self.ocb)
self.vdata.pole_angle = 0.0
# Run the scale_vector routine with the new attributes
self.vdata.scale_vector()
# Assess the ocb north and east components
self.assertEqual(self.vdata.ocb_n, self.vdata.aacgm_n)
self.assertEqual(self.vdata.ocb_e, self.vdata.aacgm_e)
@unittest.skipIf(version_info.major > 2, 'Already tested')
def test_scale_vec_pole_angle_zero_scale_at_pole(self):
""" Test the OCB vector sign routine with no pole angle and data at pole
"""
self.vdata.set_ocb(self.ocb, scale_func=ocbpy.ocb_scaling.normal_evar)
self.vdata.pole_angle = 0.0
self.vdata.ocb_aacgm_lat = self.vdata.aacgm_lat
nscale = -1.0 * ocbpy.ocb_scaling.normal_evar(self.vdata.aacgm_n,
self.vdata.unscaled_r,
self.vdata.scaled_r)
escale = -1.0 * ocbpy.ocb_scaling.normal_evar(self.vdata.aacgm_e,
self.vdata.unscaled_r,
self.vdata.scaled_r)
# Run the scale_vector routine with the new attributes
self.vdata.scale_vector()
# Assess the ocb north and east components
self.assertEqual(self.vdata.ocb_n, nscale)
self.assertEqual(self.vdata.ocb_e, escale)
del nscale, escale
def test_set_ocb_zero(self):
""" Test setting of OCB values in VectorData without any magnitude
"""
# Set the OCB values without any E-field scaling, test to see that the
# AACGM and OCB vector magnitudes are the same
self.zdata.set_ocb(self.ocb)
self.assertEqual(self.zdata.ocb_mag, 0.0)
def test_set_ocb_none(self):
""" Test setting of OCB values without scaling
"""
# Set the OCB values without any E-field scaling, test to see that the
# AACGM and OCB vector magnitudes are the same
self.vdata.set_ocb(self.ocb)
self.assertAlmostEqual(self.vdata.aacgm_mag, self.vdata.ocb_mag)
def test_set_ocb_evar(self):
""" Test setting of OCB values with E field scaling
"""
# Set the OCB values with scaling for a variable proportional to
# the electric field
self.vdata.set_ocb(self.ocb, scale_func=ocbpy.ocb_scaling.normal_evar)
self.assertAlmostEqual(self.vdata.ocb_mag, 88.094416872365)
def test_set_ocb_curl_evar(self):
""" Test setting of OCB values with Curl E scaling
"""
# Set the OCB values with scaling for a variable proportional to
# the curl of the electric field
self.vdata.set_ocb(self.ocb,
scale_func=ocbpy.ocb_scaling.normal_curl_evar)
self.assertAlmostEqual(self.vdata.ocb_mag, 77.57814585822645)
def test_scaled_r(self):
""" Test that the scaled radius is correct
"""
self.vdata.set_ocb(self.ocb, None)
self.assertEqual(self.vdata.scaled_r, 16.0)
def test_unscaled_r(self):
""" Test that the unscaled radius is correct
"""
self.vdata.set_ocb(self.ocb, None)
self.assertEqual(self.vdata.unscaled_r, 14.09)
class TestVectorDataRaises(unittest.TestCase):
def setUp(self):
""" Initialize the tests for calc_vec_pole_angle
"""
test_file = path.join(path.dirname(ocbpy.__file__), "tests",
"test_data", "test_north_circle")
self.assertTrue(path.isfile(test_file))
self.ocb = ocbpy.ocboundary.OCBoundary(filename=test_file,
instrument='image')
self.ocb.rec_ind = 27
self.vdata = ocbpy.ocb_scaling.VectorData(0, self.ocb.rec_ind, 75.0,
22.0, aacgm_n=50.0,
aacgm_e=86.5, aacgm_z=5.0,
dat_name="Test",
dat_units="$m s^{-1}$")
self.input_attrs = list()
self.bad_input = [np.nan, np.full(shape=2, fill_value=np.nan)]
self.raise_out = list()
self.hold_val = None
if version_info.major == 2:
self.assertRaisesRegex = self.assertRaisesRegexp
def tearDown(self):
del self.ocb, self.vdata, self.input_attrs, self.bad_input
del self.raise_out, self.hold_val
def test_init_ocb_array_failure(self):
""" Test init failure with mismatched OCB and input array input
"""
self.input_attrs = [0, [27, 31], 75.0, 22.0]
self.bad_input = {'aacgm_n': 100.0, 'aacgm_e': 100.0,
'aacgm_z': 10.0, 'ocb_lat': 81.0,
'ocb_mlt': [2.0, 5.8, 22.5]}
with self.assertRaisesRegex(ValueError, "OCB index and input shapes"):
self.vdata = ocbpy.ocb_scaling.VectorData(*self.input_attrs,
**self.bad_input)
def test_init_ocb_vector_failure(self):
""" Test init failure with mismatched OCB and data array input
"""
self.input_attrs = [[3, 6, 0], [27, 31], [75.0, 87.2, 65.0],
[22.0, 21, 22]]
self.bad_input = {'aacgm_n': [100.0, 110.0, 30.0],
'aacgm_e': [100.0, 110.0, 30.0],
'aacgm_z': [10.0, 10.0, 3.0]}
with self.assertRaisesRegex(ValueError,
"Mismatched OCB and Vector input shapes"):
self.vdata = ocbpy.ocb_scaling.VectorData(*self.input_attrs,
**self.bad_input)
@unittest.skipIf(version_info.major == 2,
'Python 2.7 does not support subTest')
def test_init_vector_failure(self):
""" Test init failure with a bad mix of vector and scalar input
"""
self.input_attrs = [[0, self.ocb.rec_ind, [75.0, 70.0], [22.0, 20.0]],
[[0, 1], self.ocb.rec_ind, [75.0, 70.0], 22.0],
[[0, 1], self.ocb.rec_ind, [75.0, 70.0],
[22.0, 20.0, 23.0]]]
self.bad_input = [{'aacgm_n': 10.0},
{'aacgm_n': [100.0, 110.0, 30.0]},
{'aacgm_n': [100.0, 110.0, 30.0]}]
self.raise_out = ['data index shape must match vector shape',
'mismatched VectorData input shapes',
'mismatched VectorData input shapes']
for i, iattrs in enumerate(self.input_attrs):
tset = [iattrs, self.bad_input[i], self.raise_out[i]]
with self.subTest(tset=tset):
with self.assertRaisesRegex(ValueError, tset[2]):
self.vdata = ocbpy.ocb_scaling.VectorData(*tset[0],
**tset[1])
@unittest.skipIf(version_info.major > 2, 'Already tested with subTest')
def test_init_vector_failure_dat_ind(self):
""" Test init failure with a bad data index shape
"""
self.input_attrs = [0, self.ocb.rec_ind, [75.0, 70.0], [22.0, 20.0]]
self.bad_input = {'aacgm_n': 10.0}
with self.assertRaisesRegex(
ValueError, "data index shape must match vector shape"):
self.vdata = ocbpy.ocb_scaling.VectorData(*self.input_attrs,
**self.bad_input)
@unittest.skipIf(version_info.major > 2, 'Already tested with subTest')
def test_init_vector_failure_many_array_size(self):
""" Test init failure with a bad vector lengths
"""
self.input_attrs = [[0, 1], self.ocb.rec_ind, [75.0, 70.0], 20.0]
self.bad_input = {'aacgm_n': [100.0, 110.0, 30.0]}
with self.assertRaisesRegex(ValueError,
"mismatched VectorData input shapes"):
self.vdata = ocbpy.ocb_scaling.VectorData(*self.input_attrs,
**self.bad_input)
@unittest.skipIf(version_info.major > 2, 'Already tested with subTest')
def test_init_vector_failure_bad_lat_array_size(self):
""" Test init failure with a bad vector lengths
"""
self.input_attrs = [[0, 1, 3], self.ocb.rec_ind, [75.0, 70.0],
[22.0, 20.0, 23.0]]
self.bad_input = {'aacgm_n': [100.0, 110.0, 30.0]}
with self.assertRaisesRegex(ValueError,
"mismatched VectorData input shapes"):
self.vdata = ocbpy.ocb_scaling.VectorData(*self.input_attrs,
**self.bad_input)
@unittest.skipIf(version_info.major == 2,
'Python 2.7 does not support subTest')
def test_bad_calc_vec_pole_angle(self):
"""Test calc_vec_pole_angle failure with bad input"""
self.input_attrs = ['aacgm_mlt', 'ocb_aacgm_mlt', 'aacgm_lat',
'ocb_aacgm_lat']
self.raise_out = ["AACGM MLT of Vector", "AACGM MLT of OCB pole",
"AACGM latitude of Vector",
"AACGM latitude of OCB pole"]
tsets = [(iattrs, bi, self.raise_out[i])
for i, iattrs in enumerate(self.input_attrs)
for bi in self.bad_input]
self.vdata.set_ocb(self.ocb, None)
for tset in tsets:
with self.subTest(tset=tset):
self.hold_val = getattr(self.vdata, tset[0])
setattr(self.vdata, tset[0], tset[1])
with self.assertRaisesRegex(ValueError, tset[2]):
self.vdata.calc_vec_pole_angle()
setattr(self.vdata, tset[0], self.hold_val)
@unittest.skipIf(version_info.major > 2, 'Already tested with subTest')
def test_bad_calc_vec_pole_angle_mlt_float(self):
"""Test calc_vec_pole_angle failure with bad AACGM MLT
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.aacgm_mlt = self.bad_input[0]
with self.assertRaisesRegex(ValueError, "AACGM MLT of Vector"):
self.vdata.calc_vec_pole_angle()
@unittest.skipIf(version_info.major > 2, 'Already tested with subTest')
def test_bad_calc_vec_pole_angle_ocb_mlt_float_small(self):
"""Test calc_vec_pole_angle failure with small bad OCB MLT
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.ocb_aacgm_mlt = self.bad_input[0]
with self.assertRaisesRegex(ValueError, "AACGM MLT of OCB pole"):
self.vdata.calc_vec_pole_angle()
@unittest.skipIf(version_info.major > 2, 'Already tested with subTest')
def test_bad_calc_vec_pole_angle_vec_mlat_float(self):
"""Test calc_vec_pole_angle failure with bad vector latitude
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.aacgm_lat = self.bad_input[0]
with self.assertRaisesRegex(ValueError, "AACGM latitude of Vector"):
self.vdata.calc_vec_pole_angle()
@unittest.skipIf(version_info.major > 2, 'Already tested with subTest')
def test_bad_calc_vec_pole_angle_mlt_array(self):
"""Test calc_vec_pole_angle failure with bad AACGM MLT
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.aacgm_mlt = self.bad_input[1]
with self.assertRaisesRegex(ValueError, "AACGM MLT of Vector"):
self.vdata.calc_vec_pole_angle()
@unittest.skipIf(version_info.major > 2, 'Already tested with subTest')
def test_bad_calc_vec_pole_angle_ocb_mlt_float_big(self):
"""Test calc_vec_pole_angle failure with bad OCB MLT
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.ocb_aacgm_mlt = self.bad_input[1]
with self.assertRaisesRegex(ValueError, "AACGM MLT of OCB pole"):
self.vdata.calc_vec_pole_angle()
@unittest.skipIf(version_info.major > 2, 'Already tested with subTest')
def test_bad_calc_vec_pole_angle_ocb_mlat_float_small(self):
"""Test calc_vec_pole_angle failure with barely bad OCB latitude
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.ocb_aacgm_lat = self.bad_input[0]
with self.assertRaisesRegex(ValueError, "AACGM latitude of OCB pole"):
self.vdata.calc_vec_pole_angle()
@unittest.skipIf(version_info.major > 2, 'Already tested with subTest')
def test_bad_calc_vec_pole_angle_ocb_mlat_float_big(self):
"""Test calc_vec_pole_angle failure with bad OCB latitude
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.ocb_aacgm_lat = self.bad_input[1]
with self.assertRaisesRegex(ValueError, "AACGM latitude of OCB pole"):
self.vdata.calc_vec_pole_angle()
@unittest.skipIf(version_info.major > 2, 'Already tested with subTest')
def test_bad_calc_vec_aacgm_lat_float(self):
"""Test calc_vec_pole_angle failure with bad vector latitude
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.aacgm_lat = self.bad_input[1]
with self.assertRaisesRegex(ValueError, "AACGM latitude of Vector"):
self.vdata.calc_vec_pole_angle()
def test_no_ocb_lat(self):
""" Test failure when OCB latitude is not available
"""
self.vdata.ocb_lat = np.nan
with self.assertRaisesRegex(ValueError, 'OCB coordinates required'):
self.vdata.scale_vector()
def test_no_ocb_mlt(self):
""" Test failure when OCB MLT is not available
"""
self.vdata.ocb_mlt = np.nan
with self.assertRaisesRegex(ValueError, 'OCB coordinates required'):
self.vdata.scale_vector()
def test_no_ocb_pole_location(self):
""" Test failure when OCB pole location is not available
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.ocb_aacgm_mlt = np.nan
with self.assertRaisesRegex(ValueError, "OCB pole location required"):
self.vdata.scale_vector()
def test_no_ocb_pole_angle(self):
""" Test failure when pole angle is not available
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.pole_angle = np.nan
with self.assertRaisesRegex(
ValueError, "vector angle in poles-vector triangle required"):
self.vdata.scale_vector()
def test_bad_ocb_quad(self):
""" Test failure when OCB quadrant is wrong
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.ocb_quad = -1
with self.assertRaisesRegex(ValueError, "OCB quadrant undefined"):
self.vdata.calc_ocb_polar_angle()
def test_bad_vec_quad(self):
""" Test failure when vector quadrant is wrong
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.vec_quad = -1
with self.assertRaisesRegex(ValueError, "Vector quadrant undefined"):
self.vdata.calc_ocb_polar_angle()
def test_bad_quad_polar_angle(self):
""" Test failure when quadrant polar angle is bad
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.aacgm_naz = np.nan
with self.assertRaisesRegex(ValueError,
"AACGM polar angle undefined"):
self.vdata.calc_ocb_polar_angle()
def test_bad_quad_vector_angle(self):
""" Test failure when quandrant vector angle is bad
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.pole_angle = np.nan
with self.assertRaisesRegex(ValueError, "Vector angle undefined"):
self.vdata.calc_ocb_polar_angle()
def test_bad_calc_vec_sign_direction(self):
""" Test calc_vec_sign failure when no direction is provided
"""
self.vdata.set_ocb(self.ocb, None)
with self.assertRaisesRegex(ValueError,
"must set at least one direction"):
self.vdata.calc_ocb_vec_sign()
def test_bad_calc_sign_ocb_quad(self):
""" Test calc_vec_sign failure with bad OCB quadrant
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.ocb_quad = -1
with self.assertRaisesRegex(ValueError, "OCB quadrant undefined"):
self.vdata.calc_ocb_vec_sign(north=True)
def test_bad_calc_sign_vec_quad(self):
""" Test calc_vec_sign failure with bad vector quadrant
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.vec_quad = -1
with self.assertRaisesRegex(ValueError, "Vector quadrant undefined"):
self.vdata.calc_ocb_vec_sign(north=True)
def test_bad_calc_sign_polar_angle(self):
""" Test calc_vec_sign failure with bad polar angle
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.aacgm_naz = np.nan
with self.assertRaisesRegex(ValueError,
"AACGM polar angle undefined"):
self.vdata.calc_ocb_vec_sign(north=True)
def test_bad_calc_sign_pole_angle(self):
""" Test calc_vec_sign failure with bad pole angle
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.pole_angle = np.nan
with self.assertRaisesRegex(ValueError, "Vector angle undefined"):
self.vdata.calc_ocb_vec_sign(north=True)
def test_bad_define_quandrants_pole_mlt(self):
"""Test define_quadrants failure with bad pole MLT
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.ocb_aacgm_mlt = np.nan
with self.assertRaisesRegex(ValueError, "OCB pole location required"):
self.vdata.define_quadrants()
def test_bad_define_quandrants_vec_mlt(self):
"""Test define_quadrants failure with bad vector MLT
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.aacgm_mlt = np.nan
with self.assertRaisesRegex(ValueError,
"Vector AACGM location required"):
self.vdata.define_quadrants()
def test_bad_define_quandrants_pole_angle(self):
"""Test define_quadrants failure with bad pole angle
"""
self.vdata.set_ocb(self.ocb, None)
self.vdata.pole_angle = np.nan
with self.assertRaisesRegex(
ValueError, "vector angle in poles-vector triangle required"):
self.vdata.define_quadrants()
class TestHaversine(unittest.TestCase):
def setUp(self):
""" Initialize the tests for the haversine and archaversine functions
"""
self.input_angles = np.linspace(-2.0*np.pi, 2.0*np.pi, 9)
self.hav_out = np.array([0.0, 0.5, 1.0, 0.5, 0.0, 0.5, 1.0, 0.5, 0.0])
# archaversine is confinded to 0-pi
self.ahav_out = abs(np.array([aa - np.sign(aa) * 2.0 * np.pi
if abs(aa) > np.pi
else aa for aa in self.input_angles]))
self.out = None
def tearDown(self):
del self.input_angles, self.hav_out, self.out, self.ahav_out
@unittest.skipIf(version_info.major == 2,
'Python 2.7 does not support subTest')
def test_haversine(self):
""" Test implimentation of the haversine
"""
# Cycle through all the possible input options
for i, tset in enumerate([(self.input_angles[0], self.hav_out[0]),
(list(self.input_angles), self.hav_out),
(self.input_angles, self.hav_out)]):
with self.subTest(tset=tset):
self.out = ocbpy.ocb_scaling.hav(tset[0])
# Assess the output
if i == 0:
self.assertAlmostEqual(self.out, tset[1])
else:
self.assertTrue(np.all(abs(self.out - tset[1]) < 1.0e-7))
del tset
@unittest.skipIf(version_info.major > 2, 'Already tested with subTest')
def test_haversine_float(self):
""" Test implimentation of the haversine with float inputs
"""
for i, alpha in enumerate(self.input_angles):
self.assertAlmostEqual(ocbpy.ocb_scaling.hav(alpha),
self.hav_out[i])
@unittest.skipIf(version_info.major > 2, 'Already tested with subTest')
def test_haversine_list(self):
""" Test implimentation of the haversine with a list input
"""
self.out = ocbpy.ocb_scaling.hav(list(self.input_angles))
self.assertTrue(np.all(abs(self.out - self.hav_out) < 1.0e-7))
@unittest.skipIf(version_info.major > 2, 'Already tested with subTest')
def test_haversine_array(self):
""" Test implimentation of the haversine with a array input
"""
self.out = ocbpy.ocb_scaling.hav(self.input_angles)
self.assertTrue(np.all(abs(self.out - self.hav_out) < 1.0e-7))
@unittest.skipIf(version_info.major == 2,
'Python 2.7 does not support subTest')
def test_inverse_haversine(self):
""" Test the implemenation of the inverse haversine
"""
# Cycle through all the possible input options
for i, tset in enumerate([(self.hav_out[0], self.ahav_out[0]),
(list(self.hav_out), self.ahav_out),
(self.hav_out, self.ahav_out)]):
with self.subTest(tset=tset):
self.out = ocbpy.ocb_scaling.archav(tset[0])
# Assess the output
if i == 0:
self.assertEqual(self.out, abs(tset[1]))
else:
self.assertTrue(np.all(self.out - tset[1] < 1.0e-7))
del tset
@unittest.skipIf(version_info.major > 2,
'Python 2.7 does not support subTest')
def test_inverse_haversine_float(self):
""" Test implimentation of the inverse haversine with float input
"""
for i, self.out in enumerate(self.ahav_out):
self.assertAlmostEqual(ocbpy.ocb_scaling.archav(self.hav_out[i]),
self.out)
@unittest.skipIf(version_info.major > 2,
'Python 2.7 does not support subTest')
def test_inverse_haversine_list(self):
""" Test implimentation of the inverse haversine with list input
"""
self.out = ocbpy.ocb_scaling.archav(list(self.hav_out))
self.assertTrue(np.all(abs(self.out - self.ahav_out)
< 1.0e-7))
@unittest.skipIf(version_info.major > 2,
'Python 2.7 does not support subTest')
def test_inverse_haversine_array(self):
""" Test implimentation of the inverse haversine with array input
"""
self.out = ocbpy.ocb_scaling.archav(self.hav_out)
self.assertTrue(np.all(abs(self.out - self.ahav_out) < 1.0e-7))
def test_inverse_haversine_small_float(self):
""" Test implimentation of the inverse haversine with very small numbers
"""
self.assertEqual(ocbpy.ocb_scaling.archav(1.0e-17), 0.0)
self.assertEqual(ocbpy.ocb_scaling.archav(-1.0e-17), 0.0)
def test_inverse_haversine_nan_float(self):
""" Test implimentation of the inverse haversine with NaN
"""
self.assertTrue(np.isnan(ocbpy.ocb_scaling.archav(np.nan)))
def test_inverse_haversine_negative_float(self):
""" Test implimentation of the inverse haversine with negative input
"""
self.assertTrue(np.isnan(ocbpy.ocb_scaling.archav(-1.0)))
def test_inverse_haversine_mixed(self):
""" Test the inverse haversine with array input of good and bad values
"""
# Update the test input and output
self.hav_out[0] = 1.0e-17
self.ahav_out[0] = 0.0
self.hav_out[1] = np.nan
self.ahav_out[1] = np.nan
self.hav_out[2] = -1.0
self.ahav_out[2] = np.nan
self.out = ocbpy.ocb_scaling.archav(self.hav_out)
for i, hout in enumerate(self.out):
if np.isnan(hout):
self.assertTrue(np.isnan(self.ahav_out[i]))
else:
self.assertAlmostEqual(hout, self.ahav_out[i])
class TestOCBScalingArrayMethods(unittest.TestCase):
def setUp(self):
""" Initialize the OCBoundary and array VectorData objects
"""
test_file = path.join(path.dirname(ocbpy.__file__), "tests",
"test_data", "test_north_circle")
self.ocb = ocbpy.ocboundary.OCBoundary(filename=test_file,
instrument='image')
# Construct a set of test vectors that have all the different OCB
# and vector combinations and one with no magnitude. The test OCB pole
# is at 87.24 deg, 5.832 h
lats = np.full(shape=(17,), fill_value=75.0)
lats[8:] = 89.0
mlts = np.zeros(shape=(17,))
mlts[4:8] = 15.0
mlts[8:12] = 7.0
mlts[12:] = 5.0
north = [10.0, 10.0, -10.0, -10.0, 10.0, 10.0, -10.0, -10.0,
10.0, 10.0, -10.0, -10.0, 10.0, 10.0, -10.0, -10.0, 0.0]
east = [3.0, -3.0, -3.0, 3.0, 3.0, -3.0, -3.0, 3.0, 3.0, -3.0, -3.0,
3.0, 3.0, -3.0, -3.0, 3.0, 0.0]
vert = np.zeros(shape=(17,))
vert[0] = 5.0
self.ref_quads = {'ocb': [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
4, 4],
'vec': [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3,
4, 1]}
self.vargs = [np.arange(0, 17, 1), 27, lats, mlts]
self.vkwargs = {'aacgm_n': np.array(north), 'aacgm_e': np.array(east),
'aacgm_z': np.array(vert), 'dat_name': 'Test',
'dat_units': 'm/s'}
self.vdata = None
self.out = None
self.aacgm_mag = np.full(shape=(17,), fill_value=10.44030650891055)
self.aacgm_mag[0] = 11.575836902790225
self.aacgm_mag[-1] = 0.0
if version_info.major == 2:
self.assertRegex = self.assertRegexpMatches
self.assertNotRegex = self.assertNotRegexpMatches
def tearDown(self):
del self.ocb, self.vargs, self.vkwargs, self.out, self.vdata
del self.aacgm_mag, self.ref_quads
def set_vector_ocb_ind(self):
""" Update the input vector arguements to have vector OCB index input
"""
self.vargs[1] = np.full(shape=self.vargs[-1].shape, fill_value=27)
self.vargs[1][8:] = 31
def test_array_vector_repr_not_calc(self):
""" Test the VectorData print statement with uncalculated array input
"""
self.vdata = ocbpy.ocb_scaling.VectorData(*self.vargs, **self.vkwargs)
self.out = self.vdata.__repr__()
self.assertRegex(self.out, "Index")
self.assertRegex(self.out, "nan, nan, {:d}".format(self.vargs[1]))
def test_array_vector_repr_calc(self):
""" Test the VectorData print statement with calculated array input
"""
self.vdata = ocbpy.ocb_scaling.VectorData(*self.vargs, **self.vkwargs)
self.vdata.set_ocb(self.ocb)
self.out = self.vdata.__repr__()
self.assertRegex(self.out, "Index")
self.assertNotRegex(self.out, "nan, nan")
def test_array_vector_repr_calc_ocb_vec_array(self):
""" Test the VectorData print statement with calculated ocb/vec arrays
"""
self.set_vector_ocb_ind()
self.vdata = ocbpy.ocb_scaling.VectorData(*self.vargs, **self.vkwargs)
self.vdata.set_ocb(self.ocb)
self.out = self.vdata.__repr__()
self.assertRegex(self.out, "Index")
self.assertNotRegex(self.out, "nan, nan")
def test_array_vector_repr_calc_ocb_array(self):
""" Test the VectorData print statement with calculated ocb arrays
"""
self.vargs[0] = self.vargs[0][0]
self.set_vector_ocb_ind()
self.vargs[2] = self.vargs[2][0]
self.vargs[3] = self.vargs[3][0]
self.vkwargs['aacgm_n'] = self.vkwargs['aacgm_n'][0]
self.vkwargs['aacgm_e'] = self.vkwargs['aacgm_e'][0]
self.vkwargs['aacgm_z'] = self.vkwargs['aacgm_z'][0]
self.vdata = ocbpy.ocb_scaling.VectorData(*self.vargs, **self.vkwargs)
self.vdata.set_ocb(self.ocb)
self.out = self.vdata.__repr__()
self.assertRegex(self.out, "Index")
self.assertNotRegex(self.out, "nan, nan")
def test_init_nez_vec_array(self):
""" Test VectorData initialisation with vector array components
"""
self.vdata = ocbpy.ocb_scaling.VectorData(*self.vargs, **self.vkwargs)
self.assertEqual(len(self.vdata.aacgm_mag), len(self.vargs[0]))
self.assertEqual(len(self.vdata.ocb_mag), len(self.vargs[0]))
for i, self.out in enumerate(self.vdata.aacgm_mag):
self.assertAlmostEqual(self.out, self.aacgm_mag[i])
def test_init_nez_ocb_vec_array(self):
""" Test VectorData initialisation with ocb and vector array components
"""
self.set_vector_ocb_ind()
self.vdata = ocbpy.ocb_scaling.VectorData(*self.vargs, **self.vkwargs)
self.assertEqual(len(self.vdata.aacgm_mag), len(self.vargs[0]))
self.assertEqual(len(self.vdata.ocb_mag), len(self.vargs[0]))
for i, self.out in enumerate(self.vdata.aacgm_mag):
self.assertAlmostEqual(self.out, self.aacgm_mag[i])
def test_init_nez_ocb_array(self):
""" Test VectorData initialisation with ocb array components
"""
self.set_vector_ocb_ind()
self.vargs[2] = self.vargs[2][0]
self.vargs[3] = self.vargs[3][0]
self.vkwargs['aacgm_n'] = self.vkwargs['aacgm_n'][0]
self.vkwargs['aacgm_e'] = self.vkwargs['aacgm_e'][0]
self.vkwargs['aacgm_z'] = self.vkwargs['aacgm_z'][0]
self.vdata = ocbpy.ocb_scaling.VectorData(*self.vargs, **self.vkwargs)
self.assertEqual(len(self.vdata.ocb_mag), len(self.vargs[1]))
self.assertAlmostEqual(self.vdata.aacgm_mag, self.aacgm_mag[0])
def test_init_mag(self):
""" Test the initialisation of the VectorData array input with magnitude
"""
self.vkwargs['aacgm_mag'] = self.aacgm_mag
self.vdata = ocbpy.ocb_scaling.VectorData(*self.vargs, **self.vkwargs)
self.assertEqual(len(self.vdata.aacgm_mag), len(self.vargs[0]))
for i, self.out in enumerate(self.vdata.aacgm_mag):
self.assertAlmostEqual(self.out, self.aacgm_mag[i])
def test_vector_all_bad_lat(self):
""" Test the VectorData output with all data from the wrong hemisphere
"""
self.vargs[2] *= -1.0
self.vdata = ocbpy.ocb_scaling.VectorData(*self.vargs, **self.vkwargs)
self.vdata.set_ocb(self.ocb)
self.assertTrue(len(self.vdata.ocb_lat), len(self.vargs[2]))
self.assertTrue(np.all(np.isnan(self.vdata.ocb_lat)))
self.assertTrue(np.all(np.isnan(self.vdata.ocb_mlt)))
self.assertTrue(np.all(np.isnan(self.vdata.ocb_n)))
self.assertTrue(np.all(np.isnan(self.vdata.ocb_e)))
self.assertTrue(np.all(np.isnan(self.vdata.ocb_z)))
# Ensure that input is not overwritten
for vkey in self.vkwargs.keys():
self.out = getattr(self.vdata, vkey)
if vkey.find('aacgm_') == 0:
for i, val in enumerate(self.vkwargs[vkey]):
if np.isnan(val):
self.assertTrue(np.isnan(self.out[i]))
else:
self.assertEqual(self.out[i], val)
else:
self.assertRegex(self.out, self.vkwargs[vkey])
def test_vector_some_bad_lat(self):
""" Test the VectorData output with mixed hemisphere input
"""
self.vargs[2][0] *= -1.0
self.vdata = ocbpy.ocb_scaling.VectorData(*self.vargs, **self.vkwargs)
self.vdata.set_ocb(self.ocb)
self.assertTrue(len(self.vdata.ocb_lat), len(self.vargs[2]))
# Ensure the wrong hemisphere is NaN
self.assertTrue(np.isnan(self.vdata.ocb_lat[0]))
self.assertTrue(np.isnan(self.vdata.ocb_mlt[0]))
self.assertTrue(np.isnan(self.vdata.ocb_n[0]))
self.assertTrue(np.isnan(self.vdata.ocb_e[0]))
self.assertTrue(np.isnan(self.vdata.ocb_z[0]))
# Ensure that input is not overwritten
for vkey in self.vkwargs.keys():
self.out = getattr(self.vdata, vkey)
if vkey.find('aacgm_') == 0:
for i, val in enumerate(self.vkwargs[vkey]):
if np.isnan(val):
self.assertTrue(np.isnan(self.out[i]))
else:
self.assertEqual(self.out[i], val)
else:
self.assertRegex(self.out, self.vkwargs[vkey])
# Ensure the right hemisphere is good
self.assertAlmostEqual(self.vdata.aacgm_mag[1], self.aacgm_mag[1])
self.assertAlmostEqual(self.vdata.ocb_mag[1], self.aacgm_mag[1])
def test_calc_vec_pole_angle_flat(self):
""" Test the polar angle calculation with angles of 0 and 180 deg
"""
self.vargs[3] = np.full(shape=self.vargs[2].shape,
fill_value=ocbpy.ocb_time.deg2hr(
self.ocb.phi_cent[self.vargs[1]]))
self.vargs[3][np.array(self.ref_quads['ocb']) > 2] += 12.0
self.vdata = ocbpy.ocb_scaling.VectorData(*self.vargs, **self.vkwargs)
self.vdata.set_ocb(self.ocb)
self.assertTrue(np.all([quad in [2, 4]
for quad in self.vdata.ocb_quad]))
self.assertEqual(list(self.vdata.vec_quad), self.ref_quads['vec'])
self.assertTrue(np.all([self.vdata.pole_angle[i] == 0.0
for i, quad in enumerate(self.vdata.ocb_quad)
if quad == 2]))
self.assertTrue(np.all([self.vdata.pole_angle[i] == 180.0
for i, quad in enumerate(self.vdata.ocb_quad)
if quad == 4]))
def test_array_vec_quad(self):
""" Test the assignment of vector quadrants with array input
"""
self.vdata = ocbpy.ocb_scaling.VectorData(*self.vargs, **self.vkwargs)
self.vdata.set_ocb(self.ocb)
self.assertEqual(len(self.vargs[0]), len(self.vdata.vec_quad))
self.assertEqual(list(self.vdata.vec_quad), self.ref_quads['vec'])
def test_array_ocb_quad(self):
""" Test the assignment of OCB quadrants with array input
"""
self.vdata = ocbpy.ocb_scaling.VectorData(*self.vargs, **self.vkwargs)
self.vdata.set_ocb(self.ocb)
self.assertEqual(len(self.vargs[0]), len(self.vdata.ocb_quad))
self.assertEqual(list(self.vdata.ocb_quad), self.ref_quads['ocb'])
def test_one_undefinable_ocb_quadrant(self):
""" Test VectorData array initialization for a undefinable OCB quadrant
"""
self.vargs[2][1] = 0.0
self.vdata = ocbpy.ocb_scaling.VectorData(*self.vargs, **self.vkwargs)
self.vdata.set_ocb(self.ocb)
self.assertEqual(self.vdata.ocb_quad[0], 1)
self.assertEqual(self.vdata.ocb_quad[1], 0)
def test_one_undefinable_vec_quadrant(self):
""" Test VectorData array initialization for a undefinable vec quadrant
"""
self.vkwargs['aacgm_n'][1] = np.nan
self.vdata = ocbpy.ocb_scaling.VectorData(*self.vargs, **self.vkwargs)
self.vdata.set_ocb(self.ocb)
self.assertEqual(self.vdata.vec_quad[0], 1)
self.assertEqual(self.vdata.vec_quad[1], 0)
def test_define_quadrants_neg_adj_mlt(self):
""" Test the quadrant assignment with a negative AACGM MLT
"""
self.vargs[3] -= 24.0
self.vdata = ocbpy.ocb_scaling.VectorData(*self.vargs, **self.vkwargs)
self.vdata.set_ocb(self.ocb, scale_func=ocbpy.ocb_scaling.normal_evar)
self.assertGreater(self.vdata.ocb_aacgm_mlt - self.vargs[3][0], 24)
self.assertEqual(list(self.vdata.ocb_quad), self.ref_quads['ocb'])
self.assertEqual(list(self.vdata.vec_quad), self.ref_quads['vec'])
@unittest.skipIf(version_info.major == 2,
'Python 2.7 does not support subTest')
def test_scale_vec_pole_angle_zero(self):
""" Test the calculation of the OCB vector sign with no pole angle
"""
self.vdata = ocbpy.ocb_scaling.VectorData(*self.vargs, **self.vkwargs)
self.vdata.set_ocb(self.ocb)
# If the OCB pole is closer to the AACGM pole than the vector, set
# the pole angle to zero deg. Otherwise, set it to 180.0 deg
self.vdata.pole_angle = np.zeros(shape=self.vargs[2].shape)
self.vdata.pole_angle[self.vdata.ocb_quad > 2] = 180.0
nscale = ocbpy.ocb_scaling.normal_evar(self.vdata.aacgm_n,
self.vdata.unscaled_r,
self.vdata.scaled_r)
escale = ocbpy.ocb_scaling.normal_evar(self.vdata.aacgm_e,
self.vdata.unscaled_r,
self.vdata.scaled_r)
# Cycle through all the possible options for a pole angle of zero/180
for tset in [('scale_func', None, self.vkwargs['aacgm_n'],
self.vkwargs['aacgm_e']),
('scale_func', ocbpy.ocb_scaling.normal_evar, nscale,
escale)]:
with self.subTest(tset=tset):
setattr(self.vdata, tset[0], tset[1])
# Run the scale_vector routine with the new attributes
self.vdata.scale_vector()
# Assess the ocb north and east components
self.assertTrue(np.all(self.vdata.ocb_n == tset[2]))
self.assertTrue(np.all(self.vdata.ocb_e == tset[3]))
del nscale, escale, tset
@unittest.skipIf(version_info.major > 2, 'Already tested with subTest')
def test_scale_vec_pole_angle_zero_noscale(self):
""" Test the OCB vector sign calc with no pole angle or scaling
"""
self.vdata = ocbpy.ocb_scaling.VectorData(*self.vargs, **self.vkwargs)
self.vdata.set_ocb(self.ocb)
self.vdata.pole_angle = np.zeros(shape=self.vargs[2].shape)
self.vdata.pole_angle[self.vdata.ocb_quad > 2] = 180.0
# Run the scale_vector routine with the new attributes
self.vdata.scale_vector()
# Assess the ocb north and east components
self.assertTrue(np.all(self.vdata.ocb_n == self.vkwargs['aacgm_n']))
self.assertTrue(np.all(self.vdata.ocb_e == self.vkwargs['aacgm_e']))
@unittest.skipIf(version_info.major > 2, 'Already tested with subTest')
def test_scale_vec_pole_angle_zero_scale(self):
""" Test the OCB vector sign calc with scaling but no pole angle
"""
self.vdata = ocbpy.ocb_scaling.VectorData(*self.vargs, **self.vkwargs)
self.vdata.set_ocb(self.ocb, scale_func=ocbpy.ocb_scaling.normal_evar)
self.vdata.pole_angle = np.zeros(shape=self.vargs[2].shape)
self.vdata.pole_angle[self.vdata.ocb_quad > 2] = 180.0
# Run the scale_vector routine with the new attributes
self.vdata.scale_vector()
# Assess the ocb north and east components
self.out = ocbpy.ocb_scaling.normal_evar(self.vkwargs['aacgm_n'],
self.vdata.unscaled_r,
self.vdata.scaled_r)
self.assertTrue(np.all(self.vdata.ocb_n == self.out))
self.out = ocbpy.ocb_scaling.normal_evar(self.vkwargs['aacgm_e'],
self.vdata.unscaled_r,
self.vdata.scaled_r)
self.assertTrue(np.all(self.vdata.ocb_e == self.out))
|
|
#
# persist.py
# Part of SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Ryan Hileman and Aparajita Fishman
#
# Project: https://github.com/SublimeLinter/SublimeLinter3
# License: MIT
#
"""This module provides persistent global storage for other modules."""
from collections import defaultdict
from copy import deepcopy
import json
import os
import re
import sublime
import sys
from . import util
PLUGIN_NAME = 'SublimeLinter'
# Get the name of the plugin directory, which is the parent of this file's directory
PLUGIN_DIRECTORY = os.path.basename(os.path.dirname(os.path.dirname(__file__)))
LINT_MODES = (
('background', 'Lint whenever the text is modified'),
('load/save', 'Lint only when a file is loaded or saved'),
('save only', 'Lint only when a file is saved'),
('manual', 'Lint only when requested')
)
SYNTAX_RE = re.compile(r'(?i)/([^/]+)\.(?:tmLanguage|sublime-syntax)$')
DEFAULT_GUTTER_THEME_PATH = 'Packages/SublimeLinter/gutter-themes/Default/Default.gutter-theme'
class Settings:
"""This class provides global access to and management of plugin settings."""
def __init__(self):
"""Initialize a new instance."""
self.settings = {}
self.previous_settings = {}
self.changeset = set()
self.plugin_settings = None
self.on_update_callback = None
def load(self, force=False):
"""Load the plugin settings."""
if force or not self.settings:
self.observe()
self.on_update()
self.observe_prefs()
def has_setting(self, setting):
"""Return whether the given setting exists."""
return setting in self.settings
def get(self, setting, default=None):
"""Return a plugin setting, defaulting to default if not found."""
return self.settings.get(setting, default)
def set(self, setting, value, changed=False):
"""
Set a plugin setting to the given value.
Clients of this module should always call this method to set a value
instead of doing settings['foo'] = 'bar'.
If the caller knows for certain that the value has changed,
they should pass changed=True.
"""
self.copy()
self.settings[setting] = value
if changed:
self.changeset.add(setting)
def pop(self, setting, default=None):
"""
Remove a given setting and return default if it is not in self.settings.
Clients of this module should always call this method to pop a value
instead of doing settings.pop('foo').
"""
self.copy()
return self.settings.pop(setting, default)
def copy(self):
"""Save a copy of the plugin settings."""
self.previous_settings = deepcopy(self.settings)
def observe_prefs(self, observer=None):
"""Observe changes to the ST prefs."""
prefs = sublime.load_settings('Preferences.sublime-settings')
prefs.clear_on_change('sublimelinter-pref-settings')
prefs.add_on_change('sublimelinter-pref-settings', observer or self.on_prefs_update)
def observe(self, observer=None):
"""Observer changes to the plugin settings."""
self.plugin_settings = sublime.load_settings('SublimeLinter.sublime-settings')
self.plugin_settings.clear_on_change('sublimelinter-persist-settings')
self.plugin_settings.add_on_change('sublimelinter-persist-settings',
observer or self.on_update)
def on_update_call(self, callback):
"""Set a callback to call when user settings are updated."""
self.on_update_callback = callback
def on_update(self):
"""
Update state when the user settings change.
The settings before the change are compared with the new settings.
Depending on what changes, views will either be redrawn or relinted.
"""
settings = util.merge_user_settings(self.plugin_settings)
self.settings.clear()
self.settings.update(settings)
if (
'@disable' in self.changeset or
self.previous_settings.get('@disable', False) != self.settings.get('@disable', False)
):
need_relint = True
self.changeset.discard('@disable')
else:
need_relint = False
# Clear the path-related caches if the paths list has changed
if (
'paths' in self.changeset or
(self.previous_settings and
self.previous_settings.get('paths') != self.settings.get('paths'))
):
need_relint = True
util.clear_path_caches()
self.changeset.discard('paths')
# Add python paths if they changed
if (
'python_paths' in self.changeset or
(self.previous_settings and
self.previous_settings.get('python_paths') != self.settings.get('python_paths'))
):
need_relint = True
self.changeset.discard('python_paths')
python_paths = self.settings.get('python_paths', {}).get(sublime.platform(), [])
for path in python_paths:
if path not in sys.path:
sys.path.append(path)
# If the syntax map changed, reassign linters to all views
from .linter import Linter
if (
'syntax_map' in self.changeset or
(self.previous_settings and
self.previous_settings.get('syntax_map') != self.settings.get('syntax_map'))
):
need_relint = True
self.changeset.discard('syntax_map')
Linter.clear_all()
util.apply_to_all_views(lambda view: Linter.assign(view, reset=True))
if (
'no_column_highlights_line' in self.changeset or
self.previous_settings.get('no_column_highlights_line') != self.settings.get('no_column_highlights_line')
):
need_relint = True
self.changeset.discard('no_column_highlights_line')
if (
'gutter_theme' in self.changeset or
self.previous_settings.get('gutter_theme') != self.settings.get('gutter_theme')
):
self.changeset.discard('gutter_theme')
self.update_gutter_marks()
error_color = self.settings.get('error_color', '')
warning_color = self.settings.get('warning_color', '')
if (
('error_color' in self.changeset or 'warning_color' in self.changeset) or
(self.previous_settings and error_color and warning_color and
(self.previous_settings.get('error_color') != error_color or
self.previous_settings.get('warning_color') != warning_color))
):
self.changeset.discard('error_color')
self.changeset.discard('warning_color')
if (
sublime.ok_cancel_dialog(
'You changed the error and/or warning color. '
'Would you like to update the user color schemes '
'with the new colors?')
):
util.change_mark_colors(error_color, warning_color)
# If any other settings changed, relint
if (self.previous_settings or len(self.changeset) > 0):
need_relint = True
self.changeset.clear()
if need_relint:
Linter.reload()
if self.previous_settings and self.on_update_callback:
self.on_update_callback(need_relint)
def save(self, view=None):
"""
Regenerate and save the user settings.
User settings are updated with the default settings and the defaults
from every linter, and if the user settings are currently being edited,
the view is updated.
"""
self.load()
# Fill in default linter settings
settings = self.settings
linters = settings.pop('linters', {})
for name, linter in linter_classes.items():
default = linter.settings().copy()
default.update(linters.pop(name, {}))
for key, value in (('@disable', False), ('args', []), ('excludes', [])):
if key not in default:
default[key] = value
linters[name] = default
settings['linters'] = linters
filename = '{}.sublime-settings'.format(PLUGIN_NAME)
user_prefs_path = os.path.join(sublime.packages_path(), 'User', filename)
settings_views = []
if view is None:
# See if any open views are the user prefs
for window in sublime.windows():
for view in window.views():
if view.file_name() == user_prefs_path:
settings_views.append(view)
else:
settings_views = [view]
if settings_views:
def replace(edit):
if not view.is_dirty():
j = json.dumps({'user': settings}, indent=4, sort_keys=True)
j = j.replace(' \n', '\n')
view.replace(edit, sublime.Region(0, view.size()), j)
for view in settings_views:
edits[view.id()].append(replace)
view.run_command('sublimelinter_edit')
view.run_command('save')
else:
user_settings = sublime.load_settings('SublimeLinter.sublime-settings')
user_settings.set('user', settings)
sublime.save_settings('SublimeLinter.sublime-settings')
def on_prefs_update(self):
"""Perform maintenance when the ST prefs are updated."""
util.generate_color_scheme()
def update_gutter_marks(self):
"""Update the gutter mark info based on the the current "gutter_theme" setting."""
theme_path = self.settings.get('gutter_theme', DEFAULT_GUTTER_THEME_PATH)
theme = os.path.splitext(os.path.basename(theme_path))[0]
if theme_path.lower() == 'none':
gutter_marks['warning'] = gutter_marks['error'] = ''
return
info = None
for path in (theme_path, DEFAULT_GUTTER_THEME_PATH):
try:
info = sublime.load_resource(path)
break
except IOError:
pass
if info is not None:
if theme != 'Default' and os.path.basename(path) == 'Default.gutter-theme':
printf('cannot find the gutter theme \'{}\', using the default'.format(theme))
path = os.path.dirname(path)
for error_type in ('warning', 'error'):
icon_path = '{}/{}.png'.format(path, error_type)
gutter_marks[error_type] = icon_path
try:
info = json.loads(info)
colorize = info.get('colorize', False)
except ValueError:
colorize = False
gutter_marks['colorize'] = colorize
else:
sublime.error_message(
'SublimeLinter: cannot find the gutter theme "{}",'
' and the default is also not available. '
'No gutter marks will display.'.format(theme)
)
gutter_marks['warning'] = gutter_marks['error'] = ''
if 'plugin_is_loaded' not in globals():
settings = Settings()
# A mapping between view ids and errors, which are line:(col, message) dicts
errors = {}
# A mapping between view ids and HighlightSets
highlights = {}
# A mapping between linter class names and linter classes
linter_classes = {}
# A mapping between view ids and a set of linter instances
view_linters = {}
# A mapping between view ids and views
views = {}
# Every time a view is modified, this is updated with a mapping between a view id
# and the time of the modification. This is checked at various stages of the linting
# process. If a view has been modified since the original modification, the
# linting process stops.
last_hit_times = {}
edits = defaultdict(list)
# Info about the gutter mark icons
gutter_marks = {'warning': 'Default', 'error': 'Default', 'colorize': True}
# Whether sys.path has been imported from the system.
sys_path_imported = False
# Set to true when the plugin is loaded at startup
plugin_is_loaded = False
def get_syntax(view):
"""Return the view's syntax or the syntax it is mapped to in the "syntax_map" setting."""
view_syntax = view.settings().get('syntax', '')
mapped_syntax = ''
if view_syntax:
match = SYNTAX_RE.search(view_syntax)
if match:
view_syntax = match.group(1).lower()
mapped_syntax = settings.get('syntax_map', {}).get(view_syntax, '').lower()
else:
view_syntax = ''
return mapped_syntax or view_syntax
def edit(vid, edit):
"""Perform an operation on a view with the given edit object."""
callbacks = edits.pop(vid, [])
for c in callbacks:
c(edit)
def view_did_close(vid):
"""Remove all references to the given view id in persistent storage."""
if vid in errors:
del errors[vid]
if vid in highlights:
del highlights[vid]
if vid in view_linters:
del view_linters[vid]
if vid in views:
del views[vid]
if vid in last_hit_times:
del last_hit_times[vid]
def debug_mode():
"""Return whether the "debug" setting is True."""
return settings.get('debug')
def debug(*args):
"""Print args to the console if the "debug" setting is True."""
if settings.get('debug'):
printf(*args)
def printf(*args):
"""Print args to the console, prefixed by the plugin name."""
print(PLUGIN_NAME + ': ', end='')
for arg in args:
print(arg, end=' ')
print()
def import_sys_path():
"""Import system python 3 sys.path into our sys.path."""
global sys_path_imported
if plugin_is_loaded and not sys_path_imported:
# Make sure the system python 3 paths are available to plugins.
# We do this here to ensure it is only done once.
sys.path.extend(util.get_python_paths())
sys_path_imported = True
def register_linter(linter_class, name, attrs):
"""Add a linter class to our mapping of class names <--> linter classes."""
if name:
name = name.lower()
linter_classes[name] = linter_class
# By setting the lint_settings to None, they will be set the next
# time linter_class.settings() is called.
linter_class.lint_settings = None
# The sublime plugin API is not available until plugin_loaded is executed
if plugin_is_loaded:
settings.load(force=True)
# If a linter is reloaded, we have to reassign that linter to all views
from . import linter
# If the linter had previously been loaded, just reassign that linter
if name in linter_classes:
linter_name = name
else:
linter_name = None
for view in views.values():
linter.Linter.assign(view, linter_name=linter_name)
printf('{} linter reloaded'.format(name))
else:
printf('{} linter loaded'.format(name))
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
resource_group_name: str,
server_name: str,
firewall_rule_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/ipv6FirewallRules/{firewallRuleName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"firewallRuleName": _SERIALIZER.url("firewall_rule_name", firewall_rule_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request(
resource_group_name: str,
server_name: str,
firewall_rule_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-08-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/ipv6FirewallRules/{firewallRuleName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"firewallRuleName": _SERIALIZER.url("firewall_rule_name", firewall_rule_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
resource_group_name: str,
server_name: str,
firewall_rule_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01-preview"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/ipv6FirewallRules/{firewallRuleName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"firewallRuleName": _SERIALIZER.url("firewall_rule_name", firewall_rule_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_list_by_server_request(
resource_group_name: str,
server_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-08-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/ipv6FirewallRules')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class IPv6FirewallRulesOperations(object):
"""IPv6FirewallRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
server_name: str,
firewall_rule_name: str,
**kwargs: Any
) -> "_models.IPv6FirewallRule":
"""Gets an IPv6 firewall rule.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param firewall_rule_name: The name of the firewall rule.
:type firewall_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IPv6FirewallRule, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.IPv6FirewallRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IPv6FirewallRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
server_name=server_name,
firewall_rule_name=firewall_rule_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IPv6FirewallRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/ipv6FirewallRules/{firewallRuleName}'} # type: ignore
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
server_name: str,
firewall_rule_name: str,
parameters: "_models.IPv6FirewallRule",
**kwargs: Any
) -> "_models.IPv6FirewallRule":
"""Creates or updates an IPv6 firewall rule.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param firewall_rule_name: The name of the firewall rule.
:type firewall_rule_name: str
:param parameters: The required parameters for creating or updating an IPv6 firewall rule.
:type parameters: ~azure.mgmt.sql.models.IPv6FirewallRule
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IPv6FirewallRule, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.IPv6FirewallRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IPv6FirewallRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'IPv6FirewallRule')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
server_name=server_name,
firewall_rule_name=firewall_rule_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('IPv6FirewallRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('IPv6FirewallRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/ipv6FirewallRules/{firewallRuleName}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
server_name: str,
firewall_rule_name: str,
**kwargs: Any
) -> None:
"""Deletes an IPv6 firewall rule.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param firewall_rule_name: The name of the firewall rule.
:type firewall_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
server_name=server_name,
firewall_rule_name=firewall_rule_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/ipv6FirewallRules/{firewallRuleName}'} # type: ignore
@distributed_trace
def list_by_server(
self,
resource_group_name: str,
server_name: str,
**kwargs: Any
) -> Iterable["_models.IPv6FirewallRuleListResult"]:
"""Gets a list of IPv6 firewall rules.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IPv6FirewallRuleListResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.sql.models.IPv6FirewallRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IPv6FirewallRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_server_request(
resource_group_name=resource_group_name,
server_name=server_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_server.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_server_request(
resource_group_name=resource_group_name,
server_name=server_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("IPv6FirewallRuleListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/ipv6FirewallRules'} # type: ignore
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Open-source TensorFlow Inception v2 Example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import app
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow.compat.v1 as tf
import inception_preprocessing
import inception_v2_tpu_model as inception
import vgg_preprocessing
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import data as contrib_data
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import summary
from tensorflow.contrib import tpu as contrib_tpu
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.training.python.training import evaluation
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_string(
'gcp_project', default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
# Model specific paramenters
flags.DEFINE_string(
'data_dir', '',
'Directory where input data is stored')
flags.DEFINE_string(
'model_dir', None,
'Directory where model output is stored')
flags.DEFINE_string(
'export_dir',
default=None,
help=('The directory where the exported SavedModel will be stored.'))
flags.DEFINE_integer(
'num_shards', 8,
'Number of shards (workers).')
flags.DEFINE_integer(
'iterations', 100,
'Number of iterations per TPU training loop.')
flags.DEFINE_integer(
'train_batch_size', 1024,
'Global (not per-shard) batch size for training')
flags.DEFINE_integer(
'eval_total_size', 0,
'Total batch size for evaluation, use the entire validation set if 0')
flags.DEFINE_integer(
'eval_batch_size', 1024,
'Global (not per-shard) batch size for evaluation')
flags.DEFINE_integer(
'train_steps', 200000,
'Number of steps use for training.')
flags.DEFINE_integer(
'train_steps_per_eval', 2000,
'Number of training steps to run between evaluations.')
flags.DEFINE_string(
'mode', 'train_and_eval',
'Mode to run: train, eval, train_and_eval')
flags.DEFINE_integer(
'min_eval_interval', 180,
'Minimum number of seconds between evaluations')
flags.DEFINE_integer(
'eval_timeout', None,
'Evaluation timeout: Maximum number of seconds that '
'may elapse while no new checkpoints are observed')
flags.DEFINE_bool(
'use_tpu', True,
'Use TPUs rather than plain CPUs')
flags.DEFINE_string(
'use_data', 'real',
'One of "fake","real"')
flags.DEFINE_float(
'learning_rate', 0.165,
'Learning rate.')
flags.DEFINE_string(
'optimizer', 'RMS',
'Optimizer (one of sgd, RMS, momentum)')
flags.DEFINE_integer(
'num_classes', 1001,
'Number of classes to distinguish')
flags.DEFINE_integer(
'width', 224,
'Width of input image')
flags.DEFINE_integer(
'height', 224,
'Height of input image')
flags.DEFINE_bool(
'transpose_enabled', False,
'Boolean to enable/disable explicit I/O transpose')
flags.DEFINE_bool(
'log_device_placement', False,
'Boolean to enable/disable log device placement')
flags.DEFINE_integer(
'save_summary_steps', 100,
'Number of steps which must have run before showing summaries.')
flags.DEFINE_integer(
'save_checkpoints_secs', 1000,
'Interval (in seconds) at which the model data '
'should be checkpointed. Set to 0 to disable.')
flags.DEFINE_bool(
'moving_average', True,
'Whether to enable moving average computation on variables')
flags.DEFINE_string(
'preprocessing', 'inception',
'Preprocessing stage to use: one of inception or vgg')
flags.DEFINE_bool(
'use_annotated_bbox', False,
'If true, use annotated bounding box as input to cropping function, '
'else use full image size')
flags.DEFINE_float(
'learning_rate_decay', 0.94,
'Exponential decay rate used in learning rate adjustment')
flags.DEFINE_integer(
'learning_rate_decay_epochs', 3,
'Exponential decay epochs used in learning rate adjustment')
flags.DEFINE_bool(
'display_tensors', False,
'Whether to dump prediction tensors for comparison')
flags.DEFINE_bool(
'clear_update_collections', True,
'Set batchnorm update_collections to None if true, else use default value')
flags.DEFINE_integer(
'cold_epochs', 2,
'Number of epochs using cold learning rate')
flags.DEFINE_integer(
'warmup_epochs', 7,
'Number of epochs using linearly increasing learning rate')
flags.DEFINE_bool(
'use_learning_rate_warmup', False,
'Apply learning rate warmup if true')
# Dataset specific paramenters
flags.DEFINE_bool(
'prefetch_enabled', True,
'Boolean to enable/disable prefetching')
flags.DEFINE_integer(
'prefetch_dataset_buffer_size', 8*1024*1024,
'Number of bytes in read buffer. 0 means no buffering.')
flags.DEFINE_integer(
'num_files_infeed', 8,
'Number of training files to read in parallel.')
flags.DEFINE_integer(
'num_parallel_calls', 64,
'Number of elements to process in parallel (by mapper)')
flags.DEFINE_integer(
'initial_shuffle_buffer_size', 1024,
'Number of elements from dataset that shuffler will sample from. '
'This shuffling is done before any other operations. '
'Set to 0 to disable')
flags.DEFINE_integer(
'followup_shuffle_buffer_size', 1000,
'Number of elements from dataset that shuffler will sample from. '
'This shuffling is done after prefetching is done. '
'Set to 0 to disable')
FLAGS = flags.FLAGS
# Dataset constants
_NUM_TRAIN_IMAGES = 1281167
_NUM_EVAL_IMAGES = 50000
# Random cropping constants
_RESIZE_SIDE_MIN = 256
_RESIZE_SIDE_MAX = 512
# Constants dictating the learning rate schedule.
RMSPROP_DECAY = 0.9 # Decay term for RMSProp.
RMSPROP_MOMENTUM = 0.9 # Momentum in RMSProp.
RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp.
# Constants dictating moving average.
MOVING_AVERAGE_DECAY = 0.995
# Batchnorm moving mean/variance parameters
BATCH_NORM_DECAY = 0.996
BATCH_NORM_EPSILON = 1e-3
def preprocess_raw_bytes(image_bytes, is_training=False, bbox=None):
"""Preprocesses a raw JPEG image.
This implementation is shared in common between train/eval pipelines,
and when serving the model.
Args:
image_bytes: A string Tensor, containing the encoded JPEG.
is_training: Whether or not to preprocess for training.
bbox: In inception preprocessing, this bbox can be used for cropping.
Returns:
A 3-Tensor [height, width, RGB channels] of type float32.
"""
image = tf.image.decode_jpeg(image_bytes, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if FLAGS.preprocessing == 'vgg':
image = vgg_preprocessing.preprocess_image(
image=image,
output_height=FLAGS.height,
output_width=FLAGS.width,
is_training=is_training,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX)
elif FLAGS.preprocessing == 'inception':
image = inception_preprocessing.preprocess_image(
image=image,
output_height=FLAGS.height,
output_width=FLAGS.width,
is_training=is_training,
bbox=bbox)
else:
assert False, 'Unknown preprocessing type: %s' % FLAGS.preprocessing
return image
class InputPipeline(object):
"""Generates ImageNet input_fn for training or evaluation.
The training data is assumed to be in TFRecord format with keys as specified
in the dataset_parser below, sharded across 1024 files, named sequentially:
train-00000-of-01024
train-00001-of-01024
...
train-01023-of-01024
The validation data is in the same format but sharded in 128 files.
The format of the data required is created by the script at:
https://github.com/tensorflow/tpu/blob/master/tools/datasets/imagenet_to_gcs.py
Args:
is_training: `bool` for whether the input is for training
"""
def __init__(self, is_training, data_dir):
self.is_training = is_training
self.data_dir = data_dir
def dataset_parser(self, serialized_proto):
"""Parse an Imagenet record from value."""
keys_to_features = {
'image/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/class/label':
tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),
'image/class/text':
tf.FixedLenFeature([], dtype=tf.string, default_value=''),
'image/object/bbox/xmin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax':
tf.VarLenFeature(dtype=tf.float32),
'image/object/class/label':
tf.VarLenFeature(dtype=tf.int64),
}
features = tf.parse_single_example(serialized_proto, keys_to_features)
bbox = None
if FLAGS.use_annotated_bbox:
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat([ymin, xmin, ymax, xmax], 0)
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
image = features['image/encoded']
image = preprocess_raw_bytes(image, is_training=self.is_training, bbox=bbox)
label = tf.cast(
tf.reshape(features['image/class/label'], shape=[]), dtype=tf.int32)
return image, label
def input_fn(self, params):
"""Input function which provides a single batch for train or eval.
Args:
params: `dict` of parameters passed from the `TPUEstimator`.
`params['batch_size']` is always provided and should be used as the
effective batch size.
Returns:
A `tf.data.Dataset` object.
"""
batch_size = params['batch_size']
if FLAGS.use_data == 'real':
assert self.data_dir, 'data_dir is required'
file_pattern = os.path.join(
self.data_dir, 'train-*' if self.is_training else 'validation-*')
dataset = tf.data.Dataset.list_files(file_pattern,
shuffle=self.is_training)
if self.is_training:
dataset = dataset.repeat()
def prefetch_dataset(filename):
dataset = tf.data.TFRecordDataset(
filename, buffer_size=FLAGS.prefetch_dataset_buffer_size)
return dataset
dataset = dataset.apply(
contrib_data.parallel_interleave(
prefetch_dataset,
cycle_length=FLAGS.num_files_infeed,
sloppy=True))
if FLAGS.followup_shuffle_buffer_size > 0:
dataset = dataset.shuffle(
buffer_size=FLAGS.followup_shuffle_buffer_size)
dataset = dataset.map(
self.dataset_parser,
num_parallel_calls=FLAGS.num_parallel_calls)
else:
random_image = tf.random.uniform([FLAGS.height, FLAGS.width, 3],
minval=-1,
maxval=1)
random_label = tf.random.uniform([], minval=0, maxval=999, dtype=tf.int32)
dataset = tf.data.Dataset.range(1).repeat().map(
lambda data: (random_image, random_label))
dataset = dataset.prefetch(batch_size)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(2) # Prefetch overlaps in-feed with training
def transpose_images(images):
return tensor_transform_fn(images, params['output_perm'])
dataset = dataset.map(
lambda images, labels: (transpose_images(images), labels),
num_parallel_calls=FLAGS.num_parallel_calls)
return dataset
def image_serving_input_fn():
"""Serving input fn for raw images.
This function is consumed when exporting a SavedModel.
Returns:
A ServingInputReceiver capable of serving MobileNet predictions.
"""
image_bytes_list = tf.placeholder(
shape=[None],
dtype=tf.string,
)
images = tf.map_fn(
preprocess_raw_bytes, image_bytes_list, back_prop=False, dtype=tf.float32)
return tf.estimator.export.ServingInputReceiver(
images, {'image_bytes': image_bytes_list})
def tensor_transform_fn(data, perm):
"""Transpose function.
This function is used to transpose an image tensor on the host and then
perform an inverse transpose on the TPU. The transpose on the TPU gets
effectively elided thus voiding any associated computational cost.
NOTE: Eventually the compiler will be able to detect when this kind of
operation may prove beneficial and perform these types of transformations
implicitly, voiding the need for user intervention
Args:
data: Tensor to be transposed
perm: New ordering of dimensions
Returns:
Transposed tensor
"""
if FLAGS.transpose_enabled:
return tf.transpose(data, perm)
return data
def inception_model_fn(features, labels, mode, params):
"""Inception v2 model using Estimator API."""
num_classes = FLAGS.num_classes
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
is_eval = (mode == tf.estimator.ModeKeys.EVAL)
if isinstance(features, dict):
features = features['feature']
features = tensor_transform_fn(features, params['input_perm'])
if FLAGS.clear_update_collections:
# updates_collections must be set to None in order to use fused batchnorm
with arg_scope(inception.inception_v2_arg_scope(
batch_norm_decay=BATCH_NORM_DECAY,
batch_norm_epsilon=BATCH_NORM_EPSILON,
updates_collections=None)):
logits, end_points = inception.inception_v2(
features,
num_classes,
is_training=is_training,
replace_separable_convolution=True)
else:
with arg_scope(inception.inception_v2_arg_scope(
batch_norm_decay=BATCH_NORM_DECAY,
batch_norm_epsilon=BATCH_NORM_EPSILON)):
logits, end_points = inception.inception_v2(
features,
num_classes,
is_training=is_training,
replace_separable_convolution=True)
predictions = {
'classes': tf.argmax(input=logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
'classify': tf.estimator.export.PredictOutput(predictions)
})
if mode == tf.estimator.ModeKeys.EVAL and FLAGS.display_tensors and (
not FLAGS.use_tpu):
with tf.control_dependencies([
tf.Print(
predictions['classes'], [predictions['classes']],
summarize=FLAGS.eval_batch_size,
message='prediction: ')
]):
labels = tf.Print(
labels, [labels], summarize=FLAGS.eval_batch_size, message='label: ')
one_hot_labels = tf.one_hot(labels, FLAGS.num_classes, dtype=tf.int32)
tf.losses.softmax_cross_entropy(
onehot_labels=one_hot_labels,
logits=logits,
weights=1.0,
label_smoothing=0.1)
loss = tf.losses.get_total_loss(add_regularization_losses=True)
initial_learning_rate = FLAGS.learning_rate * FLAGS.train_batch_size / 256
if FLAGS.use_learning_rate_warmup:
# Adjust initial learning rate to match final warmup rate
warmup_decay = FLAGS.learning_rate_decay**(
(FLAGS.warmup_epochs + FLAGS.cold_epochs) /
FLAGS.learning_rate_decay_epochs)
adj_initial_learning_rate = initial_learning_rate * warmup_decay
final_learning_rate = 0.0001 * initial_learning_rate
host_call = None
train_op = None
if is_training:
batches_per_epoch = _NUM_TRAIN_IMAGES / FLAGS.train_batch_size
global_step = tf.train.get_or_create_global_step()
current_epoch = tf.cast(
(tf.cast(global_step, tf.float32) / batches_per_epoch), tf.int32)
learning_rate = tf.train.exponential_decay(
learning_rate=initial_learning_rate,
global_step=global_step,
decay_steps=int(FLAGS.learning_rate_decay_epochs * batches_per_epoch),
decay_rate=FLAGS.learning_rate_decay,
staircase=True)
if FLAGS.use_learning_rate_warmup:
wlr = 0.1 * adj_initial_learning_rate
wlr_height = tf.cast(
0.9 * adj_initial_learning_rate /
(FLAGS.warmup_epochs + FLAGS.learning_rate_decay_epochs - 1),
tf.float32)
epoch_offset = tf.cast(FLAGS.cold_epochs - 1, tf.int32)
exp_decay_start = (FLAGS.warmup_epochs + FLAGS.cold_epochs +
FLAGS.learning_rate_decay_epochs)
lin_inc_lr = tf.add(
wlr, tf.multiply(
tf.cast(tf.subtract(current_epoch, epoch_offset), tf.float32),
wlr_height))
learning_rate = tf.where(
tf.greater_equal(current_epoch, FLAGS.cold_epochs),
(tf.where(tf.greater_equal(current_epoch, exp_decay_start),
learning_rate, lin_inc_lr)),
wlr)
# Set a minimum boundary for the learning rate.
learning_rate = tf.maximum(
learning_rate, final_learning_rate, name='learning_rate')
if FLAGS.optimizer == 'sgd':
tf.logging.info('Using SGD optimizer')
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate)
elif FLAGS.optimizer == 'momentum':
tf.logging.info('Using Momentum optimizer')
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=0.9)
elif FLAGS.optimizer == 'RMS':
tf.logging.info('Using RMS optimizer')
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
RMSPROP_DECAY,
momentum=RMSPROP_MOMENTUM,
epsilon=RMSPROP_EPSILON)
else:
tf.logging.fatal('Unknown optimizer:', FLAGS.optimizer)
if FLAGS.use_tpu:
optimizer = contrib_tpu.CrossShardOptimizer(optimizer)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step=global_step)
if FLAGS.moving_average:
ema = tf.train.ExponentialMovingAverage(
decay=MOVING_AVERAGE_DECAY, num_updates=global_step)
variables_to_average = (
tf.trainable_variables() + tf.moving_average_variables())
with tf.control_dependencies([train_op]), tf.name_scope('moving_average'):
train_op = ema.apply(variables_to_average)
# To log the loss, current learning rate, and epoch for Tensorboard, the
# summary op needs to be run on the host CPU via host_call. host_call
# expects [batch_size, ...] Tensors, thus reshape to introduce a batch
# dimension. These Tensors are implicitly concatenated to
# [params['batch_size']].
gs_t = tf.reshape(global_step, [1])
loss_t = tf.reshape(loss, [1])
lr_t = tf.reshape(learning_rate, [1])
ce_t = tf.reshape(current_epoch, [1])
def host_call_fn(gs, loss, lr, ce):
"""Training host call. Creates scalar summaries for training metrics.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the model
to the `metric_fn`, provide as part of the `host_call`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `host_call`.
Args:
gs: `Tensor with shape `[batch]` for the global_step
loss: `Tensor` with shape `[batch]` for the training loss.
lr: `Tensor` with shape `[batch]` for the learning_rate.
ce: `Tensor` with shape `[batch]` for the current_epoch.
Returns:
List of summary ops to run on the CPU host.
"""
gs = gs[0]
with summary.create_file_writer(FLAGS.model_dir).as_default():
with summary.always_record_summaries():
summary.scalar('loss', tf.reduce_mean(loss), step=gs)
summary.scalar('learning_rate', tf.reduce_mean(lr), step=gs)
summary.scalar('current_epoch', tf.reduce_mean(ce), step=gs)
return summary.all_summary_ops()
host_call = (host_call_fn, [gs_t, loss_t, lr_t, ce_t])
eval_metrics = None
if is_eval:
def metric_fn(labels, logits):
"""Evaluation metric function. Evaluates accuracy.
This function is executed on the CPU and should not directly reference
any Tensors in the rest of the `model_fn`. To pass Tensors from the model
to the `metric_fn`, provide as part of the `eval_metrics`. See
https://www.tensorflow.org/api_docs/python/tf/contrib/tpu/TPUEstimatorSpec
for more information.
Arguments should match the list of `Tensor` objects passed as the second
element in the tuple passed to `eval_metrics`.
Args:
labels: `Tensor` with shape `[batch, ]`.
logits: `Tensor` with shape `[batch, num_classes]`.
Returns:
A dict of the metrics to return from evaluation.
"""
predictions = tf.argmax(logits, axis=1)
top_1_accuracy = tf.metrics.accuracy(labels, predictions)
in_top_5 = tf.cast(tf.nn.in_top_k(logits, labels, 5), tf.float32)
top_5_accuracy = tf.metrics.mean(in_top_5)
return {
'accuracy': top_1_accuracy,
'accuracy@5': top_5_accuracy,
}
eval_metrics = (metric_fn, [labels, logits])
return contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
host_call=host_call,
eval_metrics=eval_metrics)
class LoadEMAHook(tf.train.SessionRunHook):
"""Hook to load exponential moving averages into corresponding variables."""
def __init__(self, model_dir):
super(LoadEMAHook, self).__init__()
self._model_dir = model_dir
def begin(self):
ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
variables_to_restore = ema.variables_to_restore()
self._load_ema = contrib_framework.assign_from_checkpoint_fn(
tf.train.latest_checkpoint(self._model_dir), variables_to_restore)
def after_create_session(self, sess, coord):
tf.logging.info('Reloading EMA...')
self._load_ema(sess)
def main(unused_argv):
del unused_argv # Unused
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
params = {
'input_perm': [0, 1, 2, 3],
'output_perm': [0, 1, 2, 3],
}
batch_axis = 0
if FLAGS.transpose_enabled:
params['input_perm'] = [3, 0, 1, 2]
params['output_perm'] = [1, 2, 3, 0]
batch_axis = 3
if FLAGS.eval_total_size > 0:
eval_size = FLAGS.eval_total_size
else:
eval_size = _NUM_EVAL_IMAGES
eval_steps = eval_size // FLAGS.eval_batch_size
iterations = (eval_steps if FLAGS.mode == 'eval' else
FLAGS.iterations)
eval_batch_size = (None if FLAGS.mode == 'train' else
FLAGS.eval_batch_size)
tpu_config = contrib_tpu.TPUConfig(
iterations_per_loop=iterations, num_shards=FLAGS.num_shards)
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_secs=FLAGS.save_checkpoints_secs,
save_summary_steps=FLAGS.save_summary_steps,
session_config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement),
tpu_config=tpu_config)
inception_classifier = contrib_tpu.TPUEstimator(
model_fn=inception_model_fn,
use_tpu=FLAGS.use_tpu,
config=run_config,
params=params,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=eval_batch_size,
batch_axis=(batch_axis, 0))
# Input pipelines are slightly different (with regards to shuffling and
# preprocessing) between training and evaluation.
imagenet_train = InputPipeline(
is_training=True,
data_dir=FLAGS.data_dir)
imagenet_eval = InputPipeline(
is_training=False,
data_dir=FLAGS.data_dir)
if FLAGS.moving_average:
eval_hooks = [LoadEMAHook(FLAGS.model_dir)]
else:
eval_hooks = []
if FLAGS.mode == 'eval':
# Run evaluation when there is a new checkpoint
for checkpoint in evaluation.checkpoints_iterator(
FLAGS.model_dir, timeout=FLAGS.eval_timeout):
tf.logging.info('Starting to evaluate.')
try:
start_timestamp = time.time() # Includes compilation time
eval_results = inception_classifier.evaluate(
input_fn=imagenet_eval.input_fn,
steps=eval_steps,
hooks=eval_hooks,
checkpoint_path=checkpoint)
elapsed_time = int(time.time() - start_timestamp)
tf.logging.info(
'Eval results: %s. Elapsed seconds: %d', eval_results, elapsed_time)
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(checkpoint).split('-')[1])
if current_step >= FLAGS.train_steps:
tf.logging.info(
'Evaluation finished after training step %d', current_step)
break
except tf.errors.NotFoundError:
# Since the coordinator is on a different job than the TPU worker,
# sometimes the TPU worker does not finish initializing until long after
# the CPU job tells it to start evaluating. In this case, the checkpoint
# file could have been deleted already.
tf.logging.info(
'Checkpoint %s no longer exists, skipping checkpoint', checkpoint)
elif FLAGS.mode == 'train_and_eval':
for cycle in range(FLAGS.train_steps // FLAGS.train_steps_per_eval):
tf.logging.info('Starting training cycle %d.' % cycle)
inception_classifier.train(
input_fn=imagenet_train.input_fn, steps=FLAGS.train_steps_per_eval)
tf.logging.info('Starting evaluation cycle %d .' % cycle)
eval_results = inception_classifier.evaluate(
input_fn=imagenet_eval.input_fn, steps=eval_steps, hooks=eval_hooks)
tf.logging.info('Evaluation results: %s' % eval_results)
else:
tf.logging.info('Starting training ...')
inception_classifier.train(
input_fn=imagenet_train.input_fn, steps=FLAGS.train_steps)
if FLAGS.export_dir is not None:
tf.logging.info('Starting to export model.')
inception_classifier.export_saved_model(
export_dir_base=FLAGS.export_dir,
serving_input_receiver_fn=image_serving_input_fn)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
|
|
from __future__ import unicode_literals
from datetime import date
from django import forms
from django.contrib.admin import BooleanFieldListFilter, SimpleListFilter
from django.contrib.admin.options import (
HORIZONTAL, VERTICAL, ModelAdmin, TabularInline,
)
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.widgets import AdminDateWidget, AdminRadioSelect
from django.core.checks import Error
from django.forms.models import BaseModelFormSet
from django.forms.widgets import Select
from django.test import SimpleTestCase, TestCase
from django.utils import six
from .models import (
Band, Concert, ValidationTestInlineModel, ValidationTestModel,
)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class ModelAdminTests(TestCase):
def setUp(self):
self.band = Band.objects.create(
name='The Doors',
bio='',
sign_date=date(1965, 1, 1),
)
self.site = AdminSite()
# form/fields/fieldsets interaction ##############################
def test_default_fields(self):
ma = ModelAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'bio', 'sign_date'])
self.assertEqual(list(ma.get_fields(request)), ['name', 'bio', 'sign_date'])
self.assertEqual(list(ma.get_fields(request, self.band)), ['name', 'bio', 'sign_date'])
def test_default_fieldsets(self):
# fieldsets_add and fieldsets_change should return a special data structure that
# is used in the templates. They should generate the "right thing" whether we
# have specified a custom form, the fields argument, or nothing at all.
#
# Here's the default case. There are no custom form_add/form_change methods,
# no fields argument, and no fieldsets argument.
ma = ModelAdmin(Band, self.site)
self.assertEqual(ma.get_fieldsets(request), [(None, {'fields': ['name', 'bio', 'sign_date']})])
self.assertEqual(ma.get_fieldsets(request, self.band), [(None, {'fields': ['name', 'bio', 'sign_date']})])
def test_get_fieldsets(self):
# Test that get_fieldsets is called when figuring out form fields.
# Refs #18681.
class BandAdmin(ModelAdmin):
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['name', 'bio']})]
ma = BandAdmin(Band, self.site)
form = ma.get_form(None)
self.assertEqual(form._meta.fields, ['name', 'bio'])
class InlineBandAdmin(TabularInline):
model = Concert
fk_name = 'main_band'
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['day', 'transport']})]
ma = InlineBandAdmin(Band, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['day', 'transport'])
def test_lookup_allowed_allows_nonexistent_lookup(self):
"""
Ensure that a lookup_allowed allows a parameter
whose field lookup doesn't exist.
Refs #21129.
"""
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertTrue(ma.lookup_allowed('name__nonexistent', 'test_value'))
def test_field_arguments(self):
# If we specify the fields argument, fieldsets_add and fieldsets_change should
# just stick the fields into a formsets structure and return it.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_fields(request)), ['name'])
self.assertEqual(list(ma.get_fields(request, self.band)), ['name'])
self.assertEqual(ma.get_fieldsets(request), [(None, {'fields': ['name']})])
self.assertEqual(ma.get_fieldsets(request, self.band), [(None, {'fields': ['name']})])
def test_field_arguments_restricted_on_form(self):
# If we specify fields or fieldsets, it should exclude fields on the Form class
# to the fields specified. This may cause errors to be raised in the db layer if
# required model fields aren't in fields/fieldsets, but that's preferable to
# ghost errors where you have a field in your Form class that isn't being
# displayed because you forgot to add it to fields/fieldsets
# Using `fields`.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields), ['name'])
# Using `fieldsets`.
class BandAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name']})]
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields), ['name'])
# Using `exclude`.
class BandAdmin(ModelAdmin):
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'sign_date'])
# You can also pass a tuple to `exclude`.
class BandAdmin(ModelAdmin):
exclude = ('bio',)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'sign_date'])
# Using `fields` and `exclude`.
class BandAdmin(ModelAdmin):
fields = ['name', 'bio']
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `ModelAdmin.readonly_fields` and when no
`ModelAdmin.exclude` is defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
readonly_fields = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['sign_date'])
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
readonly_fields = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'id', 'DELETE'])
def test_custom_formfield_override_readonly(self):
class AdminBandForm(forms.ModelForm):
name = forms.CharField()
class Meta:
exclude = tuple()
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
readonly_fields = ['name']
ma = BandAdmin(Band, self.site)
# `name` shouldn't appear in base_fields because it's part of
# readonly_fields.
self.assertEqual(
list(ma.get_form(request).base_fields),
['bio', 'sign_date']
)
# But it should appear in get_fields()/fieldsets() so it can be
# displayed as read-only.
self.assertEqual(
list(ma.get_fields(request)),
['bio', 'sign_date', 'name']
)
self.assertEqual(
list(ma.get_fieldsets(request)),
[(None, {'fields': ['bio', 'sign_date', 'name']})]
)
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is overridden if
`ModelAdmin.exclude` or `InlineModelAdmin.exclude` are defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
exclude = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['bio', 'sign_date'])
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'day', 'id', 'DELETE']
)
def test_custom_form_validation(self):
# If we specify a form, it should use it allowing custom validation to work
# properly. This won't, however, break any of the admin widgets or media.
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'bio', 'sign_date', 'delete'])
self.assertEqual(type(ma.get_form(request).base_fields['sign_date'].widget), AdminDateWidget)
def test_form_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `ModelAdmin.get_form()`
overrides all other declarations. Refs #8999.
"""
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['name']
class BandAdmin(ModelAdmin):
exclude = ['sign_date']
form = AdminBandForm
def get_form(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['bio']
return super(BandAdmin, self).get_form(request, obj, **kwargs)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'sign_date'])
def test_formset_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `InlineModelAdmin.get_formset()`
overrides all other declarations. Refs #8999.
"""
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_formset(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['opening_band']
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE'])
def test_queryset_override(self):
# If we need to override the queryset of a ModelChoiceField in our custom form
# make sure that RelatedFieldWidgetWrapper doesn't mess that up.
band2 = Band(name='The Beatles', bio='', sign_date=date(1962, 1, 1))
band2.save()
class ConcertAdmin(ModelAdmin):
pass
ma = ConcertAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(
str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band" required>'
'<option value="" selected="selected">---------</option>'
'<option value="%d">The Beatles</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % (band2.id, self.band.id)
)
class AdminConcertForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AdminConcertForm, self).__init__(*args, **kwargs)
self.fields["main_band"].queryset = Band.objects.filter(name='The Doors')
class ConcertAdminWithForm(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdminWithForm(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(
str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band" required>'
'<option value="" selected="selected">---------</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % self.band.id
)
def test_regression_for_ticket_15820(self):
"""
Ensure that `obj` is passed from `InlineModelAdmin.get_fieldsets()` to
`InlineModelAdmin.get_formset()`.
"""
class CustomConcertForm(forms.ModelForm):
class Meta:
model = Concert
fields = ['day']
class ConcertInline(TabularInline):
model = Concert
fk_name = 'main_band'
def get_formset(self, request, obj=None, **kwargs):
if obj:
kwargs['form'] = CustomConcertForm
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
Concert.objects.create(main_band=self.band, opening_band=self.band, day=1)
ma = BandAdmin(Band, self.site)
inline_instances = ma.get_inline_instances(request)
fieldsets = list(inline_instances[0].get_fieldsets(request))
self.assertEqual(fieldsets[0][1]['fields'], ['main_band', 'opening_band', 'day', 'transport'])
fieldsets = list(inline_instances[0].get_fieldsets(request, inline_instances[0].model))
self.assertEqual(fieldsets[0][1]['fields'], ['day'])
# radio_fields behavior ###########################################
def test_default_foreign_key_widget(self):
# First, without any radio_fields specified, the widgets for ForeignKey
# and fields with choices specified ought to be a basic Select widget.
# ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so
# they need to be handled properly when type checking. For Select fields, all of
# the choices lists have a first entry of dashes.
cma = ModelAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields['main_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['opening_band'].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')]
)
self.assertEqual(type(cmafa.base_fields['day'].widget), Select)
self.assertEqual(
list(cmafa.base_fields['day'].widget.choices),
[('', '---------'), (1, 'Fri'), (2, 'Sat')]
)
self.assertEqual(type(cmafa.base_fields['transport'].widget), Select)
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
def test_foreign_key_as_radio_field(self):
# Now specify all the fields as radio_fields. Widgets should now be
# RadioSelect, and the choices list should have a first entry of 'None' if
# blank=True for the model field. Finally, the widget should have the
# 'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL.
class ConcertAdmin(ModelAdmin):
radio_fields = {
'main_band': HORIZONTAL,
'opening_band': VERTICAL,
'day': VERTICAL,
'transport': HORIZONTAL,
}
cma = ConcertAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['main_band'].widget.attrs, {'class': 'radiolist inline'})
self.assertEqual(
list(cmafa.base_fields['main_band'].widget.choices),
[(self.band.id, 'The Doors')]
)
self.assertEqual(type(cmafa.base_fields['opening_band'].widget.widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['opening_band'].widget.attrs, {'class': 'radiolist'})
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', 'None'), (self.band.id, 'The Doors')]
)
self.assertEqual(type(cmafa.base_fields['day'].widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['day'].widget.attrs, {'class': 'radiolist'})
self.assertEqual(list(cmafa.base_fields['day'].widget.choices), [(1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['transport'].widget.attrs, {'class': 'radiolist inline'})
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', 'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')]
)
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ('transport',)
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['main_band', 'opening_band', 'day'])
class AdminConcertForm(forms.ModelForm):
extra = forms.CharField()
class Meta:
model = Concert
fields = ['extra', 'transport']
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['extra', 'transport'])
class ConcertInline(TabularInline):
form = AdminConcertForm
model = Concert
fk_name = 'main_band'
can_delete = True
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['extra', 'transport', 'id', 'DELETE', 'main_band']
)
class CheckTestCase(SimpleTestCase):
def assertIsInvalid(self, model_admin, model, msg, id=None, hint=None, invalid_obj=None):
invalid_obj = invalid_obj or model_admin
admin_obj = model_admin(model, AdminSite())
errors = admin_obj.check()
expected = [
Error(
msg,
hint=hint,
obj=invalid_obj,
id=id,
)
]
self.assertEqual(errors, expected)
def assertIsInvalidRegexp(self, model_admin, model, msg, id=None, hint=None, invalid_obj=None):
"""
Same as assertIsInvalid but treats the given msg as a regexp.
"""
invalid_obj = invalid_obj or model_admin
admin_obj = model_admin(model, AdminSite())
errors = admin_obj.check()
self.assertEqual(len(errors), 1)
error = errors[0]
self.assertEqual(error.hint, hint)
self.assertEqual(error.obj, invalid_obj)
self.assertEqual(error.id, id)
six.assertRegex(self, error.msg, msg)
def assertIsValid(self, model_admin, model):
admin_obj = model_admin(model, AdminSite())
errors = admin_obj.check()
expected = []
self.assertEqual(errors, expected)
class RawIdCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields' must be a list or tuple.",
'admin.E001')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'raw_id_fields[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E002')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields[0]' must be a foreign key or a many-to-many field.",
'admin.E003')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('users',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FieldsetsCheckTests(CheckTestCase):
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets' must be a list or tuple.",
'admin.E007')
def test_non_iterable_item(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ({},)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0]' must be a list or tuple.",
'admin.E008')
def test_item_not_a_pair(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ((),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0]' must be of length 2.",
'admin.E009')
def test_second_element_of_item_not_a_dict(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", ()),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0][1]' must be a dictionary.",
'admin.E010')
def test_missing_fields_key(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {}),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0][1]' must contain the key 'fields'.",
'admin.E011')
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_specified_both_fields_and_fieldsets(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
fields = ['name']
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"Both 'fieldsets' and 'fields' are specified.",
'admin.E005')
def test_duplicate_fields(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name', 'name']})]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"There are duplicate field(s) in 'fieldsets[0][1]'.",
'admin.E012')
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('name',)
}),
)
self.assertIsValid(BandAdmin, Band)
class FieldsCheckTests(CheckTestCase):
def test_duplicate_fields_in_fields(self):
class ValidationTestModelAdmin(ModelAdmin):
fields = ['name', 'name']
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fields' contains duplicate field(s).",
'admin.E006')
def test_inline(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = 10
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fields' must be a list or tuple.",
'admin.E004',
invalid_obj=ValidationTestInline)
class FormCheckTests(CheckTestCase):
def test_invalid_type(self):
class FakeForm(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
form = FakeForm
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'form' must inherit from 'BaseModelForm'.",
'admin.E016')
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('name',)
}),
)
self.assertIsValid(BandAdmin, Band)
def test_valid_case(self):
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (
('Band', {
'fields': ('name', 'bio', 'sign_date', 'delete')
}),
)
self.assertIsValid(BandAdmin, Band)
class FilterVerticalCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical' must be a list or tuple.",
'admin.E017')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'filter_vertical[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E019')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical[0]' must be a many-to-many field.",
'admin.E020')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("users",)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FilterHorizontalCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal' must be a list or tuple.",
'admin.E018')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'filter_horizontal[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E019')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal[0]' must be a many-to-many field.",
'admin.E020')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("users",)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class RadioFieldsCheckTests(CheckTestCase):
def test_not_dictionary(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = ()
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'radio_fields' must be a dictionary.",
'admin.E021')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {'non_existent_field': VERTICAL}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'radio_fields' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E022')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {'name': VERTICAL}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'radio_fields' refers to 'name', which is not an instance "
"of ForeignKey, and does not have a 'choices' definition."),
'admin.E023')
def test_invalid_value(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": None}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'radio_fields[\"state\"]' must be either admin.HORIZONTAL or admin.VERTICAL.",
'admin.E024')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": VERTICAL}
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class PrepopulatedFieldsCheckTests(CheckTestCase):
def test_not_dictionary(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = ()
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'prepopulated_fields' must be a dictionary.",
'admin.E026')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {'non_existent_field': ("slug",)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E027')
def test_missing_field_again(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ('non_existent_field',)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields[\"slug\"][0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E030')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"users": ('name',)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields' refers to 'users', which must not be "
"a DateTimeField, a foreign key, or a many-to-many field."),
'admin.E028')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ('name',)}
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListDisplayTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display' must be a list or tuple.",
'admin.E107')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'list_display[0]' refers to 'non_existent_field', which is not a callable, an attribute "
"of 'ValidationTestModelAdmin', or an attribute or method on 'modeladmin.ValidationTestModel'."),
'admin.E108')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('users',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display[0]' must not be a many-to-many field.",
'admin.E109')
def test_valid_case(self):
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListDisplayLinksCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display_links' must be a list, a tuple, or None.",
'admin.E110')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel, (
"The value of 'list_display_links[0]' refers to "
"'non_existent_field', which is not defined in 'list_display'."
), 'admin.E111'
)
def test_missing_in_list_display(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display_links[0]' refers to 'name', which is not defined in 'list_display'.",
'admin.E111')
def test_valid_case(self):
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
list_display_links = ('name', 'decade_published_in', 'a_method', a_callable)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_None_is_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = None
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListFilterTests(CheckTestCase):
def test_list_filter_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter' must be a list or tuple.",
'admin.E112')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' refers to 'non_existent_field', which does not refer to a Field.",
'admin.E116')
def test_not_filter(self):
class RandomClass(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (RandomClass,)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must inherit from 'ListFilter'.",
'admin.E113')
def test_not_filter_again(self):
class RandomClass(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', RandomClass),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.",
'admin.E115')
def test_not_filter_again_again(self):
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'), )
def get_queryset(self, cl, qs):
return qs
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', AwesomeFilter),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.",
'admin.E115')
def test_not_associated_with_field_name(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (BooleanFieldListFilter,)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must not inherit from 'FieldListFilter'.",
'admin.E114')
def test_valid_case(self):
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'), )
def get_queryset(self, cl, qs):
return qs
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('is_active', AwesomeFilter, ('is_active', BooleanFieldListFilter), 'no')
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListPerPageCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 'hello'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_per_page' must be an integer.",
'admin.E118')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 100
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListMaxShowAllCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 'hello'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_max_show_all' must be an integer.",
'admin.E119')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 200
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SearchFieldsCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
search_fields = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'search_fields' must be a list or tuple.",
'admin.E126')
class DateHierarchyCheckTests(CheckTestCase):
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'non_existent_field'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'date_hierarchy' refers to 'non_existent_field', which "
"is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E127')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'name'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'date_hierarchy' must be a DateField or DateTimeField.",
'admin.E128')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'pub_date'
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class OrderingCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'ordering' must be a list or tuple.",
'admin.E031'
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'ordering[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'.",
'admin.E033'
)
def test_random_marker_not_alone(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?', 'name')
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'ordering' has the random ordering marker '?', but contains "
"other fields as well.",
'admin.E032',
hint='Either remove the "?", or remove the other fields.'
)
def test_valid_random_marker_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_valid_complex_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('band__name',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('name',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListSelectRelatedCheckTests(CheckTestCase):
def test_invalid_type(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = 1
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_select_related' must be a boolean, tuple or list.",
'admin.E117')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = False
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SaveAsCheckTests(CheckTestCase):
def test_not_boolean(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = 1
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'save_as' must be a boolean.",
'admin.E101')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = True
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SaveOnTopCheckTests(CheckTestCase):
def test_not_boolean(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = 1
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'save_on_top' must be a boolean.",
'admin.E102')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = True
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class InlinesCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
inlines = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'inlines' must be a list or tuple.",
'admin.E103')
def test_not_model_admin(self):
class ValidationTestInline(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"'.*\.ValidationTestInline' must inherit from 'BaseModelAdmin'\.",
'admin.E104')
def test_missing_model_field(self):
class ValidationTestInline(TabularInline):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"'.*\.ValidationTestInline' must have a 'model' attribute\.",
'admin.E105')
def test_invalid_model_type(self):
""" Test if `model` attribute on inline model admin is a models.Model.
"""
class SomethingBad(object):
pass
class ValidationTestInline(TabularInline):
model = SomethingBad
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"The value of '.*\.ValidationTestInline.model' must be a Model\.",
'admin.E106')
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FkNameCheckTests(CheckTestCase):
def test_missing_field(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = 'non_existent_field'
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"'modeladmin.ValidationTestInlineModel' has no field named 'non_existent_field'.",
'admin.E202',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = "parent"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ExtraCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'extra' must be an integer.",
'admin.E203',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class MaxNumCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'max_num' must be an integer.",
'admin.E204',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class MinNumCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
min_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'min_num' must be an integer.",
'admin.E205',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
min_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FormsetCheckTests(CheckTestCase):
def test_invalid_type(self):
class FakeFormSet(object):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = FakeFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'formset' must inherit from 'BaseModelFormSet'.",
'admin.E206',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class RealModelFormSet(BaseModelFormSet):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = RealModelFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListDisplayEditableTests(CheckTestCase):
def test_list_display_links_is_none(self):
"""
list_display and list_editable can contain the same values
when list_display_links is None
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = list_display
list_display_links = None
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_same_as_list_editable_first_item(self):
"""
The first item in list_display can be the same as the first in
list_editable.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['name', 'slug']
list_display_links = ['pub_date']
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_in_list_editable(self):
"""
The first item in list_display can be in list_editable as long as
list_display_links is defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['slug', 'name']
list_display_links = ['pub_date']
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_same_as_list_editable_no_list_display_links(self):
"""
The first item in list_display cannot be the same as the first item
in list_editable if list_display_links is not defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name']
list_editable = ['name']
self.assertIsInvalid(
ProductAdmin, ValidationTestModel,
"The value of 'list_editable[0]' refers to the first field "
"in 'list_display' ('name'), which cannot be used unless "
"'list_display_links' is set.",
id='admin.E124',
)
def test_list_display_first_item_in_list_editable_no_list_display_links(self):
"""
The first item in list_display cannot be in list_editable if
list_display_links isn't defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['slug', 'name']
self.assertIsInvalid(
ProductAdmin, ValidationTestModel,
"The value of 'list_editable[1]' refers to the first field "
"in 'list_display' ('name'), which cannot be used unless "
"'list_display_links' is set.",
id='admin.E124',
)
class ModelAdminPermissionTests(SimpleTestCase):
class MockUser(object):
def has_module_perms(self, app_label):
if app_label == "modeladmin":
return True
return False
class MockAddUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.add_band":
return True
return False
class MockChangeUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.change_band":
return True
return False
class MockDeleteUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.delete_band":
return True
return False
def test_has_add_permission(self):
"""
Ensure that has_add_permission returns True for users who can add
objects and False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_add_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_add_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_add_permission(request))
def test_has_change_permission(self):
"""
Ensure that has_change_permission returns True for users who can edit
objects and False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_change_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_change_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_change_permission(request))
def test_has_delete_permission(self):
"""
Ensure that has_delete_permission returns True for users who can delete
objects and False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_delete_permission(request))
def test_has_module_permission(self):
"""
Ensure that has_module_permission returns True for users who have any
permission for the module and False for users who don't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_module_permission(request))
original_app_label = ma.opts.app_label
ma.opts.app_label = 'anotherapp'
try:
request.user = self.MockAddUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_module_permission(request))
finally:
ma.opts.app_label = original_app_label
|
|
import sys
import numpy as np
from OpenGL.GL import *
from OpenGL.raw.GL.ARB.vertex_array_object import glGenVertexArrays,glBindVertexArray
class JuMEG_TSV_OGL_VBO(object):
""" Helper class for using GLSL shader programs
"""
def __init__(self):
self.__vbo_data = np.array([],dtype=np.float32)
self.__vbo_id = None
# self.__vbo_id = np.array([],dtype=np.uint8)
self._vbo_isinit = False
#===== PROPERTIES
#--- vob_id
def __get_vbo_id(self):
return self.__vbo_id
def __set_vbo_id(self,d):
self.__vbo_id = d
self.isinit = False
vbo_id = property(__get_vbo_id,__set_vbo_id)
#--- vob_data
def __get_vbo_data(self):
return self.__vbo_data
def __set_vbo_data(self,d):
self.__vbo_data = d
self.isinit = False
data=property(__get_vbo_data,__set_vbo_data)
#--- vbo_data_signal
def __get_vbo_data_y(self):
return self.__vbo_data[1::2]
def __set_vbo_data_y(self,d):
self.__vbo_data[1::2] = d
data_y=property(__get_vbo_data_y,__set_vbo_data_y)
#--- vbo_data_timepoints
def __get_vbo_data_x(self):
return self.__vbo_data[0:-1:2]
def __set_vbo_data_x(self,d):
self.__vbo_data = np.zeros( 2*d.size,dtype=np.float32)
self.__vbo_data[0:-1:2] = d
self.isinit = False
self.init()
data_x=property(__get_vbo_data_x,__set_vbo_data_x)
def __get_vbo_data_pts_x(self):
return self.data_x.size
data_points_x=property(__get_vbo_data_pts_x)
def __get_vbo_data_pts_y(self):
return self.data_y.size
data_points_y=property(__get_vbo_data_pts_y)
#def __get_vbo_data_pts(self):
# return self.data.size
data_points = property(__get_vbo_data_pts_x)
#--- vob_isinit
def __get_vbo_isinit(self):
return self.__vbo_isinit
def __set_vbo_isinit(self,d):
self.__vbo_isinit = d
isinit=property(__get_vbo_isinit,__set_vbo_isinit)
def reset(self,attr_idx=0):
glUseProgram(0)
#--TODO enable/destroy all buffers
glDisableVertexAttribArray(attr_idx)
glBindVertexArray(0)
self.vbo_isinit = False
# finally: self.vbo.unbind() glDisableClientState(GL_VERTEX_ARRAY); finally: shaders.glUseProgram( 0 )
def init(self,data=None):
if ( data ):
self.data = data
if self.vbo_id:
self.reset()
#--- vertices
self.vbo_id = glGenBuffers(1)
#print"VBO INIT "
#print self.vbo_id
#print type(self.vbo_id)
glBindBuffer(GL_ARRAY_BUFFER, self.vbo_id)
glBufferData(GL_ARRAY_BUFFER, 4*len(self.data),self.data,GL_DYNAMIC_DRAW)
#--- plot border
# glBindBuffer(GL_ARRAY_BUFFER, self.vbo_id[1] )
# glBufferData(GL_ARRAY_BUFFER, 4*len(self.vbo_plot_border),self.vbo_plot_border, GL_STATIC_DRAW)
self.isinit = True
#print"done vbo init"
def update_data(self,data=None):
if any( data ):
if (data.size != self.data.size) :
self.isinit=False
self.data=data
if not self.isinit:
self.init()
#--- vertices
glBindBuffer(GL_ARRAY_BUFFER, self.vbo_id)
#--- TODO ck if only y/signal value can be copied step 2
glBufferSubData(GL_ARRAY_BUFFER,0, 4*len(self.data), self.data)
def update_xdata(self,data=None):
if data.any():
if (data.size != self.data_points_x) :
self.isinit=False
self.data_x=data
if not self.isinit:
self.init()
self.update_sub_buffer()
#--- vertices
#glBindBuffer(GL_ARRAY_BUFFER, self.vbo_id)
#--- TODO ck if only y/signal value can be copied step 2
#glBufferSubData(GL_ARRAY_BUFFER,4*self.data_points, 4*len(self.vbo_data_xs), self.vbo_data_timepoints)
def update_ydata(self,data=None):
if data.any():
if (data.size != self.data_points_y) :
self.isinit=False
self.data_y=data
if not self.isinit:
self.init()
self.update_sub_buffer()
def update_sub_buffer(self):
#--- vertices
glBindBuffer(GL_ARRAY_BUFFER, self.vbo_id)
#--- TODO ck if only y/signal value can be copied step 2
glBufferSubData(GL_ARRAY_BUFFER,0,4*len(self.data), self.data)
"""
index buffer
GLuint elementbuffer;
glGenBuffers(1, &elementbuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.size() * sizeof(unsigned int), &indices[0], GL_STATIC_DRAW);
// Index buffer
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer);
// Draw the triangles !
glDrawElements(
GL_TRIANGLES, // mode
indices.size(), // count
GL_UNSIGNED_INT, // type -> GL_UNSIGNED_SHORT
(void*)0 // element array buffer offset
);
#ff
#from OpenGL.arrays import ArrayDatatype as ADT
#from OpenGL.GL import shaders
#from OpenGL.Context.arrays import *
#import numpy as np
#from OpenGL.GL import *
from OpenGL.raw.GL.ARB.vertex_array_object import glGenVertexArrays,glBindVertexArray
#from ctypes import pointer,sizeof, c_float, c_void_p, c_uint
from OpenGL.GLUT import *
#from OpenGL.GL import *
#from linalg import matrix as m
#from linalg import quaternion as q
#from OpenGL.arrays import ArrayDatatype
from OpenGL.arrays import ArrayDatatype
from OpenGL.GL import (GL_ARRAY_BUFFER, GL_COLOR_BUFFER_BIT,
GL_COMPILE_STATUS, GL_FALSE, GL_FLOAT, GL_FRAGMENT_SHADER,
GL_LINK_STATUS, GL_RENDERER, GL_SHADING_LANGUAGE_VERSION,
GL_STATIC_DRAW, GL_TRIANGLES, GL_TRUE, GL_VENDOR, GL_VERSION,
GL_VERTEX_SHADER, glAttachShader, glBindBuffer,
glBufferData, glClear, glClearColor, glCompileShader,
glCreateProgram, glCreateShader, glDeleteProgram,
glDeleteShader, glDrawArrays, glEnableVertexAttribArray,
glGenBuffers,glGetAttribLocation,
glGetProgramInfoLog, glGetProgramiv, glGetShaderInfoLog,
glGetShaderiv, glGetString, glGetUniformLocation, glLinkProgram,
glShaderSource, glUseProgram, glVertexAttribPointer)
"""
|
|
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - "text/html+css" Formatter for feeding the GUI editor
@copyright: 2005-2006 Bastian Blank, Florian Festi, Thomas Waldmann, Reimar Bauer
@license: GNU GPL, see COPYING for details.
"""
from MoinMoin import log
logging = log.getLogger(__name__)
from MoinMoin.formatter import FormatterBase, text_html
from MoinMoin import wikiutil
from MoinMoin.Page import Page
from MoinMoin.action import AttachFile
class Formatter(text_html.Formatter):
""" Send HTML data for the GUI editor """
# Block elements ####################################################
def heading(self, on, depth, **kw):
# remember depth of first heading, and adapt counting depth accordingly
if not self._base_depth:
self._base_depth = depth
count_depth = max(depth - (self._base_depth - 1), 1)
heading_depth = depth
# closing tag, with empty line after, to make source more readable
if not on:
return self._close('h%d' % heading_depth)
else:
return self._open('h%d' % heading_depth, **kw)
# Links ##############################################################
def pagelink(self, on, pagename='', page=None, **kw):
""" Link to a page.
formatter.text_python will use an optimized call with a page!=None
parameter. DO NOT USE THIS YOURSELF OR IT WILL BREAK.
See wikiutil.link_tag() for possible keyword parameters.
"""
FormatterBase.pagelink(self, on, pagename, page, **kw)
if page is None:
page = Page(self.request, pagename, formatter=self)
return page.link_to(self.request, on=on, **kw)
def interwikilink(self, on, interwiki='', pagename='', **kw):
"""
@keyword title: override using the interwiki wikiname as title
"""
if not on:
return self.url(0) # return '</a>'
html_class = 'badinterwiki' # we use badinterwiki in any case to simplify reverse conversion
href = wikiutil.quoteWikinameURL(pagename) or "/" # FCKeditor behaves strange on empty href
title = kw.get('title', interwiki)
return self.url(1, href, title=title, css=html_class) # interwiki links with pages with umlauts
def attachment_inlined(self, url, text, **kw):
url = wikiutil.escape(url)
text = wikiutil.escape(text)
if url == text:
return '<span style="background-color:#ffff11">{{attachment:%s}}</span>' % url
else:
return '<span style="background-color:#ffff11">{{attachment:%s|%s}}</span>' % (url, text)
def attachment_link(self, on, url=None, **kw):
assert on in (0, 1, False, True) # make sure we get called the new way, not like the 1.5 api was
_ = self.request.getText
querystr = kw.get('querystr', {})
assert isinstance(querystr, dict) # new in 1.6, only support dicts
if 'do' not in querystr:
querystr['do'] = 'view'
if on:
pagename = self.page.page_name
target = AttachFile.getAttachUrl(pagename, url, self.request, do=querystr['do'])
return self.url(on, target, title="attachment:%s" % wikiutil.quoteWikinameURL(url))
else:
return self.url(on)
def attachment_image(self, url, **kw):
_ = self.request.getText
# we force the title here, needed later for html>wiki converter
kw['title'] = "attachment:%s" % wikiutil.quoteWikinameURL(url)
pagename = self.page.page_name
if '/' in url:
pagename, target = AttachFile.absoluteName(url, pagename)
url = url.split('/')[-1]
kw['src'] = AttachFile.getAttachUrl(pagename, url, self.request, addts=1)
return self.image(**kw)
def attachment_drawing(self, url, text, **kw):
# Todo get it to start the drawing editor on a click
try:
drawing_action = AttachFile.get_action(self.request, url, do='modify')
assert drawing_action is not None
attachment_drawing = wikiutil.importPlugin(self.request.cfg, 'action',
drawing_action, 'gedit_drawing')
return attachment_drawing(self, url, text, **kw)
except (wikiutil.PluginMissingError, wikiutil.PluginAttributeError, AssertionError):
return url
def icon(self, type):
return self.request.theme.make_icon(type, title='smiley:%s' % type)
smiley = icon
def nowikiword(self, text):
return '<span style="background-color:#ffff11">!</span>' + self.text(text)
# Dynamic stuff / Plugins ############################################
def macro(self, macro_obj, name, args, markup=None):
if markup is not None:
result = markup
elif args is not None:
result = "<<%s(%s)>>" % (name, args)
else:
result = "<<%s>>" % name
return '<span style="background-color:#ffff11">%s</span>' % wikiutil.escape(result)
def parser(self, parser_name, lines):
""" parser_name MUST be valid!
"""
result = [self.preformatted(1)]
for line in lines:
result.append(self.text(line))
result.append(self.linebreak(preformatted=1))
result.append(self.preformatted(0))
return "".join(result)
# Other ##############################################################
style2attribute = {
'width': 'width',
'height': 'height',
'background': 'bgcolor',
'background-color': 'bgcolor',
#if this is used as table style="text-align: right", it doesn't work
#if it is transformed to align="right":
#'text-align': 'align',
#'vertical-align': 'valign'
}
def _style_to_attributes(self, attrs):
if 'style' not in attrs:
return attrs
unknown = []
for entry in attrs['style'].split(';'):
try:
key, value = entry.split(':')
except ValueError:
unknown.append(entry)
continue
key, value = key.strip(), value.strip()
if key in self.style2attribute:
attrs[self.style2attribute[key]] = value
else:
unknown.append("%s:%s" % (key, value))
if unknown:
attrs['style'] = ';'.join(unknown)
else:
del attrs['style']
return attrs
def _checkTableAttr(self, attrs, prefix):
#logging.debug(repr(attrs))
attrs = text_html.Formatter._checkTableAttr(self, attrs, prefix)
#logging.debug(repr(attrs))
attrs = self._style_to_attributes(attrs)
#logging.debug(repr(attrs))
return attrs
_allowed_table_attrs = {
'table': ['class', 'id', 'style', 'bgcolor', 'width', 'height', ],
'row': ['class', 'id', 'style', 'bgcolor', 'width', 'height', ],
'': ['colspan', 'rowspan', 'class', 'id', 'style', 'bgcolor', 'width', 'height', ],
}
def table(self, on, attrs=None, **kw):
""" Create table
@param on: start table
@param attrs: table attributes
@rtype: string
@return start or end tag of a table
"""
result = []
if on:
# Open table
if not attrs:
attrs = {}
else:
#result.append(self.rawHTML("<!-- ATTRS1: %s -->" % repr(attrs)))
attrs = self._checkTableAttr(attrs, 'table')
#result.append(self.rawHTML("<!-- ATTRS2: %s -->" % repr(attrs)))
result.append(self._open('table', newline=1, attr=attrs,
allowed_attrs=self._allowed_table_attrs['table'],
**kw))
else:
# Close table then div
result.append(self._close('table'))
return ''.join(result)
def comment(self, text, **kw):
text = text.rstrip() # workaround for growing amount of blanks at EOL
return self.preformatted(1, css_class='comment') + self.text(text) + self.preformatted(0)
def strong(self, on, **kw):
tag = 'b'
if on:
return self._open(tag, allowed_attrs=[], **kw)
return self._close(tag)
def emphasis(self, on, **kw):
tag = 'i'
if on:
return self._open(tag, allowed_attrs=[], **kw)
return self._close(tag)
def underline(self, on, **kw):
tag = 'u'
if on:
return self._open(tag, allowed_attrs=[], **kw)
return self._close(tag)
def line_anchordef(self, lineno):
return '' # not needed for gui editor feeding
def line_anchorlink(self, on, lineno=0):
return '' # not needed for gui editor feeding
def span(self, on, **kw):
previous_state = self.request.user.show_comments
self.request.user.show_comments = True
ret = text_html.Formatter.span(self, on, **kw)
self.request.user.show_comments = previous_state
return ret
|
|
#!/usr/bin/env python3
# -*- mode: python; indent-tabs-mode: nil; c-basic-offset: 4; tab-width: 4; -*-
# vim: set shiftwidth=4 softtabstop=4 expandtab:
"""Support for reading from an NCAR EOL RAF PostgreSQL database of
real-time flight data.
2014 Copyright University Corporation for Atmospheric Research
This file is part of the "django-ncharts" package.
The license and distribution terms for this file may be found in the
file LICENSE in this package.
"""
from datetime import datetime
import logging
import sys
import threading
import pytz
import numpy as np
import psycopg2
from ncharts import exceptions as nc_exc
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
_logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class RAFDatabase(object):
"""Support for reading time series from NCAR EOL RAF PostgreSQL database.
"""
__cached_connections = {}
__cache_lock = threading.Lock()
@staticmethod
def get_connection(
database="real-time-GV",
user="ads",
host="eol-rt-data.fl-ext.ucar.edu",
port=5432,
password=None):
"""Return a psycopg2 database connection.
The returned connection can be shared between threads.
If the connection is kept open, then for a given
database, user, host and port, this method
will always return the same connection.
Args:
database, user, host, port password: Parameters needed
to establish a connection to the PostgreSQL database.
Returns:
A psycopg2.connection
Raises:
psycopg2.Error
"""
hashval = hash(database + user + host + str(port))
with RAFDatabase.__cache_lock:
conn = None
if hashval in RAFDatabase.__cached_connections:
conn = RAFDatabase.__cached_connections[hashval]
# connection closed: nonzero if it is closed or broken.
# Mainly just checking here if it is broken, in which
# case, close and attempt a re-connect.
if conn.closed:
try:
conn.rollback()
except psycopg2.Error as exc:
_logger.warning("%s rollback: %s", conn, exc)
try:
conn.close()
except psycopg2.Error as exc:
_logger.warning("%s close: %s", conn, exc)
del RAFDatabase.__cached_connections[hashval]
conn = None
if not conn:
conn = psycopg2.connect(
database=database, user=user,
host=host, port=port, password=password)
conn.set_session(
isolation_level="READ COMMITTED",
readonly=True)
RAFDatabase.__cached_connections[hashval] = conn
return conn
@staticmethod
def close_connection(conn):
"""Close a psycopg2 database connection.
Args:
conn: connection to close.
Raises:
nothing
According to http://initd.org/psycopg/docs/connection.html:
Changed in version 2.5: if the connection is used in a with
statement, the (rollback) method is automatically called if
an exception is raised in the with block.
All connections here are used in a with statement, so we
don't have to call rollback() before close.
"""
with RAFDatabase.__cache_lock:
for (hashval, cconn) in RAFDatabase.__cached_connections.items():
if conn == cconn:
try:
conn.close()
except psycopg2.Error as exc:
_logger.warning("%s close: %s", conn, exc)
del RAFDatabase.__cached_connections[hashval]
break
def __init__(
self,
database="real-time-GV",
user="ads",
host="eol-rt-data.fl-ext.ucar.edu",
port=5432,
password=None,
table="raf_lrt"):
"""Construct an instance of RAF database connection.
Args:
database, user, host, port, password: Usual parameters
needed to create a PostgreSQL connection.
table: name of table in the database which contains
the time-series data to be read.
Raises:
nc_exc.NoDataException
"""
try:
self.conn = RAFDatabase.get_connection(
database=database, user=user,
host=host, port=port, password=password)
self.database = database
self.user = user
self.host = host
self.port = port
self.password = password
self.table = table
except psycopg2.Error as exc:
raise nc_exc.NoDataException(
"Database not available: {}".format(exc))
def get_variables(self):
"""Fetch pertinent fields from the 'variable_list' table in
the RAF database, such as the list of variable names, their units, and
missing values.
Raises:
nc_exc.NoDataException
"""
try:
with self.conn as conn:
with conn.cursor() as cur:
cur.execute("\
SELECT name, units, long_name, ndims, dims, missing_value from variable_list;")
variables = {}
for var in cur:
dimnames = ["time"]
# make a bold assumption that a second dimension
# is a particle-probe bin number
if var[3] > 1:
dimnames.append("bin")
variables[var[0]] = {
"units": var[1],
"long_name": var[2],
"dimnames": dimnames,
"shape": var[4]
}
return variables
except psycopg2.Error as exc:
# psycopg.connections are thread safe
RAFDatabase.close_connection(conn)
raise nc_exc.NoDataException(
"No variables found: {}".format(exc))
def read_times(
self,
start_time=pytz.utc.localize(datetime.min),
end_time=pytz.utc.localize(datetime.max)):
"""Read datetimes from the table within a range.
Raises:
nc_exc.NoDataException
"""
start_time = start_time.replace(tzinfo=None)
end_time = end_time.replace(tzinfo=None)
# _logger.debug("read_times, table=%s", self.table)
vname = "datetime"
try:
with self.conn as conn:
with conn.cursor() as cur:
# datetimes in database are returned to python as timezone naive.
cur.execute(
"SELECT {} FROM {} WHERE {} >= %s AND {} < %s;"
.format(vname, self.table, vname, vname),
(start_time, end_time))
return [pytz.utc.localize(x[0]).timestamp() for x in cur]
except psycopg2.Error as exc:
RAFDatabase.close_connection(conn)
raise nc_exc.NoDataException(
"read {}: {}".format(vname, exc))
def get_start_time(self):
"""Read first datatime from the database table.
Raises:
nc_exc.NoDataException
"""
vname = "datetime"
try:
with self.conn as conn:
with conn.cursor() as cur:
# datetimes in database are returned to python as timezone naive.
cur.execute(
"SELECT {} FROM {} FETCH FIRST 1 ROW ONLY;"
.format(vname, self.table))
start_time = cur.fetchone()
if not start_time:
_logger.warning("%s: read %s: no data", conn, vname)
raise nc_exc.NoDataException("read {}".format(vname))
return pytz.utc.localize(start_time[0])
except psycopg2.Error as exc:
_logger.warning("%s: read %s: %s", conn, vname, exc)
RAFDatabase.close_connection(conn)
raise nc_exc.NoDataException("read {}: {}".format(vname, exc))
def read_time_series(
self,
variables=(),
start_time=pytz.utc.localize(datetime.min),
end_time=pytz.utc.localize(datetime.max),
size_limit=1000 * 1000 * 1000):
"""Read times and variables from the table within a time period.
For each variable, its missing_value will be read from the
variable_list table. Values read from the time series table
which match the missing_value will be set to float('nan').
Args:
variables: list or tuple of variable names to read.
start_time: starting datetime of data to be read.
end_time: ending datetime of data to be read.
size_limit: attempt to screen outrageous requests.
Returns:
A one element dict, compatible with that returned by
netcdf.read_time_series(), containing for a series_name of '':
{
'time' : list of UTC timestamps,
'data': lists of numpy.ndarray containing
the data for each variable,
'vmap': dict by variable name,
containing the index into the series data for the variable,
'dim2': dict by variable name, of values for second
dimension of the data, such as height.
}
Raises:
nc_exc.NoDataException
"""
total_size = 0
start_time = start_time.replace(tzinfo=None)
end_time = end_time.replace(tzinfo=None)
vtime = self.read_times(start_time=start_time, end_time=end_time)
# _logger.debug("read_times, len=%d", len(vtime))
total_size += sys.getsizeof(vtime)
if total_size > size_limit:
raise nc_exc.TooMuchDataException(
"too many time values requested, size={0} MB".\
format(total_size/(1000 * 1000)))
vdata = []
vmap = {}
vdim2 = {}
try:
with self.conn as conn:
with conn.cursor() as cur:
for vname in variables:
operation = "read variable_list"
# _logger.debug("vname=%s",vname)
cur.execute(
"SELECT dims, missing_value from variable_list where name=%s;",
(vname,))
vinfo = cur.fetchall()
# _logger.debug("vinfo=%s",vinfo)
dims = vinfo[0][0]
dims[0] = len(vtime)
missval = vinfo[0][1]
if len(dims) > 1:
# In initial CSET data, dims for CUHSAS_RWOOU
# in variable_list was [1,99]
# Seems that the 99 should have been 100,
# which is what is returned by this:
operation = "read dimension of {}".format(vname)
cur.execute("\
SELECT array_upper({},1) FROM {} FETCH FIRST 1 ROW ONLY;\
".format(vname, self.table))
dimsx = cur.fetchall()[0]
dims[1] = dimsx[0]
# _logger.debug("vname=%s, dims=%s, dimsx=%s", vname, dims, dimsx)
operation = "read {}".format(vname)
cur.execute("\
SELECT {} FROM {} WHERE datetime >= %s AND datetime < %s;\
".format(vname, self.table), (start_time, end_time))
cdata = np.ma.masked_values(np.ndarray(
shape=dims, buffer=np.array(
[v for v in cur], dtype=float)), value=missval)
if isinstance(cdata, np.ma.core.MaskedArray):
# _logger.debug("is MaskedArray")
cdata = cdata.filled(fill_value=float('nan'))
total_size += sys.getsizeof(cdata)
if total_size > size_limit:
raise nc_exc.TooMuchDataException(
"too many values requested, size={0} MB".\
format(total_size/(1000 * 1000)))
vindex = len(vdata)
vdata.append(cdata)
vmap[vname] = vindex
if len(dims) > 1:
vdim2[vname] = {
"data": [i for i in range(dims[1])],
"name": "bin",
"units": ""
}
return {
'': {
'time': vtime,
'data': vdata,
'vmap': vmap,
'dim2': vdim2,
}
}
except psycopg2.Error as exc:
RAFDatabase.close_connection(conn)
raise nc_exc.NoDataException(
(operation + ": {}").format(exc))
def test_func():
""" """
db = RAFDatabase(
database="real-time-GV", user="ads",
host="eol-rt-data.fl-ext.ucar.edu",
port=5432,
table="raf_lrt")
variables = db.get_variables()
time0 = db.get_start_time()
_logger.debug("time0=%s", time0)
# times = db.read_times()
# _logger.debug("all times=%s",times)
t1 = pytz.utc.localize(datetime(2015, 6, 29, 15, 10, 0))
t2 = pytz.utc.localize(datetime(2015, 6, 29, 15, 11, 0))
times = db.read_times(start_time=t1, end_time=t2)
_logger.debug("times=%s", times)
data = db.read_time_series(("TASX",), start_time=t1, end_time=t2)
_logger.debug("data=%s", data)
data = db.read_time_series(("CUHSAS_RWOOU",), start_time=t1, end_time=t2)
_logger.debug("data=%s", data)
RAFDatabase.close_connection(db)
if __name__ == '__main__':
test_func()
|
|
#!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
'''
base.py
common base for the commands execution framework. Units of work are defined as Operations
as found in other modules like unix.py. These units of work are then packaged up and executed
within a GpCommand. A GpCommand is just a common infrastructure for executing an Operation.
The general idea is that the application developer breaks the problem down into a set of
GpCommands that need to be executed. This class also provides a queue and set of workers
for executing this set of commands.
'''
from Queue import Queue,Empty
from threading import Thread
import os
import signal
import subprocess
import sys
import time
from gppylib import gplog
from gppylib import gpsubprocess
from pygresql.pg import DB
# paramiko prints deprecation warnings which are ugly to the end-user
import warnings
warnings.simplefilter('ignore', DeprecationWarning)
import paramiko, getpass
logger=gplog.get_default_logger()
GPHOME=os.environ.get('GPHOME')
SRC_GPPATH=". %s/greenplum_path.sh;" % GPHOME
# Maximum retries if sshd rejects the connection due to too many
# unauthenticated connections.
SSH_MAX_RETRY=10
# Delay before retrying ssh connection, in seconds
SSH_RETRY_DELAY=.5
class WorkerPool(object):
"""TODO:"""
halt_command='halt command'
def __init__(self,numWorkers=16,items=None,daemonize=False):
self.workers=[]
self.should_stop=False
self.work_queue=Queue()
self.completed_queue=Queue()
self.num_assigned=0
self.daemonize=daemonize
if items is not None:
for item in items:
self.work_queue.put(item)
self.num_assigned += 1
for i in range(0,numWorkers):
w = Worker("worker%d" % i, self)
self.workers.append(w)
w.start()
self.numWorkers = numWorkers
self.logger = logger
###
def getNumWorkers(self):
return self.numWorkers
def getNextWorkItem(self):
return self.work_queue.get(block=True)
def addFinishedWorkItem(self,command):
self.completed_queue.put(command)
self.work_queue.task_done()
def markTaskDone(self):
self.work_queue.task_done()
def addCommand(self,cmd):
self.logger.debug("Adding cmd to work_queue: %s" % cmd.cmdStr)
self.work_queue.put(cmd)
self.num_assigned += 1
def wait_and_printdots(self,command_count,quiet=True):
while self.completed_queue.qsize() < command_count:
time.sleep(1)
if not quiet:
sys.stdout.write(".")
sys.stdout.flush()
if not quiet:
print " "
self.join()
def print_progress(self, command_count):
while True:
num_completed = self.completed_queue.qsize()
num_completed_percentage = 0
if command_count:
num_completed_percentage = float(num_completed) / command_count
logger.info('%0.2f%% of jobs completed' % (num_completed_percentage * 100))
if num_completed >= command_count:
return
time.sleep(10)
def join(self):
self.work_queue.join()
return True
def joinWorkers(self):
for w in self.workers:
w.join()
def getCompletedItems(self):
completedList=[]
try:
while True:
item=self.completed_queue.get(False)
if item is not None:
completedList.append(item)
except Empty:
return completedList
return completedList #just to be sure
def check_results(self):
""" goes through all items in the completed_queue and throws an exception at the
first one that didn't execute successfully
throws ExecutionError
"""
try:
while True:
item=self.completed_queue.get(False)
if not item.get_results().wasSuccessful():
raise ExecutionError("Error Executing Command: ",item)
except Empty:
return
def empty_completed_items(self):
while not self.completed_queue.empty():
self.completed_queue.get(False)
def isDone(self):
#TODO: not sure that qsize() is safe
return (self.num_assigned == self.completed_queue.qsize())
def haltWork(self):
self.logger.debug("WorkerPool haltWork()")
self.should_stop=True
for w in self.workers:
w.haltWork()
self.work_queue.put(self.halt_command)
class OperationWorkerPool(WorkerPool):
""" TODO: This is a hack! In reality, the WorkerPool should work with Operations, and
Command should be a subclass of Operation. Till then, we'll spoof the necessary Command
functionality within Operation. """
def __init__(self, numWorkers=16, operations=None):
if operations is not None:
for operation in operations:
self._spoof_operation(operation)
super(OperationWorkerPool, self).__init__(numWorkers, operations)
def check_results(self):
raise NotImplementedError("OperationWorkerPool has no means of verifying success.")
def _spoof_operation(self, operation):
operation.cmdStr = str(operation)
class Worker(Thread):
"""TODO:"""
pool=None
cmd=None
name=None
logger=None
def __init__(self,name,pool):
self.name=name
self.pool=pool
self.logger=logger
Thread.__init__(self)
self.daemon=pool.daemonize
def run(self):
while True:
try:
try:
self.cmd = self.pool.getNextWorkItem()
except TypeError:
# misleading exception raised during interpreter shutdown
return
# we must have got a command to run here
if self.cmd is None:
self.logger.debug("[%s] got a None cmd" % self.name)
self.pool.markTaskDone()
elif self.cmd is self.pool.halt_command:
self.logger.debug("[%s] got a halt cmd" % self.name)
self.pool.markTaskDone()
self.cmd=None
return
elif self.pool.should_stop:
self.logger.debug("[%s] got cmd and pool is stopped: %s" % (self.name, self.cmd))
self.pool.markTaskDone()
self.cmd=None
else:
self.logger.debug("[%s] got cmd: %s" % (self.name,self.cmd.cmdStr))
self.cmd.run()
self.logger.debug("[%s] finished cmd: %s" % (self.name, self.cmd))
self.pool.addFinishedWorkItem(self.cmd)
self.cmd=None
except Exception,e:
self.logger.exception(e)
if self.cmd:
self.logger.debug("[%s] finished cmd with exception: %s" % (self.name, self.cmd))
self.pool.addFinishedWorkItem(self.cmd)
self.cmd=None
def haltWork(self):
self.logger.debug("[%s] haltWork" % self.name)
# this was originally coded as
#
# if self.cmd is not None:
# self.cmd.interrupt()
# self.cmd.cancel()
#
# but as observed in MPP-13808, the worker thread's run() loop may set self.cmd to None
# past the point where the calling thread checks self.cmd for None, leading to a curious
# "'NoneType' object has no attribute 'cancel' exception" which may prevent the worker pool's
# haltWorkers() from actually halting all the workers.
#
c = self.cmd
if c is not None and isinstance(c, Command):
c.interrupt()
c.cancel()
"""
TODO: consider just having a single interface that needs to be implemented for
describing work to allow the Workers to use it. This would allow the user
to better provide logic necessary. i.e. even though the user wants to
execute a unix command... how the results are interpretted are highly
application specific. So we should have a separate level of abstraction
for executing UnixCommands and DatabaseCommands from this one.
other things to think about:
-- how to support cancel
-- how to support progress
-- undo?
-- blocking vs. unblocking
"""
#--------------------------------NEW WORLD-----------------------------------
class CommandResult():
""" Used as a way to package up the results from a GpCommand
"""
#rc,stdout,stderr,completed,halt
def __init__(self,rc,stdout,stderr,completed,halt):
self.rc=rc
self.stdout=stdout
self.stderr=stderr
self.completed=completed
self.halt=halt
pass
def printResult(self):
res = "cmd had rc=%d completed=%s halted=%s\n stdout='%s'\n " \
"stderr='%s'" % (self.rc,str(self.completed), str(self.halt), self.stdout, self.stderr)
return res
def wasSuccessful(self):
if self.halt:
return False
if not self.completed:
return False
if self.rc != 0:
return False
return True
def __str__(self):
return self.printResult()
def split_stdout(self, how=':'):
"""
TODO: AK: This doesn't belong here if it pertains only to pg_controldata.
MPP-16318: Skip over discrepancies in the pg_controldata stdout, as it's
not this code's responsibility to judge the pg_controldata stdout. This is
especially true for 'immediate' shutdown, in which case, we won't even
care for WARNINGs or other pg_controldata discrepancies.
"""
for line in self.stdout.split('\n'):
ret = line.split(how, 1)
if len(ret) == 2:
yield ret
class ExecutionError(Exception):
def __init__(self,summary,cmd):
self.summary=summary
self.cmd=cmd
def __str__(self):
#TODO: improve dumping of self.cmd
return "ExecutionError: '%s' occured. Details: '%s' %s" %\
(self.summary,self.cmd.cmdStr,self.cmd.get_results().printResult())
#specify types of execution contexts.
LOCAL=1
REMOTE=2
RMI=3
NAKED=4
gExecutionContextFactory = None
#
# @param factory needs to have a createExecutionContext(self, execution_context_id, remoteHost, stdin, nakedExecutionInfo) function
#
def setExecutionContextFactory(factory):
global gExecutionContextFactory
gExecutionContextFactory = factory
def createExecutionContext(execution_context_id,remoteHost,stdin, nakedExecutionInfo=None):
if gExecutionContextFactory is not None:
return gExecutionContextFactory.createExecutionContext(execution_context_id, remoteHost, stdin)
elif execution_context_id == LOCAL:
return LocalExecutionContext(stdin)
elif execution_context_id == REMOTE:
if remoteHost is None:
raise Exception("Programmer Error. Specified REMOTE execution context but didn't provide a remoteHost")
return RemoteExecutionContext(remoteHost,stdin)
elif execution_context_id == RMI:
return RMIExecutionContext()
elif execution_context_id == NAKED:
if remoteHost is None:
raise Exception("Programmer Error. Specified NAKED execution context but didn't provide a remoteHost")
if nakedExecutionInfo is None:
raise Exception("Programmer Error. Specified NAKED execution context but didn't provide a NakedExecutionInfo")
return NakedExecutionContext(remoteHost, stdin, nakedExecutionInfo)
class ExecutionContext():
""" An ExecutionContext defines where and how to execute the Command and how to
gather up information that are the results of the command.
"""
propagate_env_map = {}
"""
Dict. mapping environment variables to their values.
"""
def __init__(self):
pass
def execute(self,cmd):
pass
def interrupt(self,cmd):
pass
def cancel(self,cmd):
pass
class LocalExecutionContext(ExecutionContext):
proc=None
halt=False
completed=False
def __init__(self,stdin):
ExecutionContext.__init__(self)
self.stdin = stdin
pass
def execute(self, cmd, wait=True):
# prepend env. variables from ExcecutionContext.propagate_env_map
# e.g. Given {'FOO': 1, 'BAR': 2}, we'll produce "FOO=1 BAR=2 ..."
for k, v in self.__class__.propagate_env_map.iteritems():
cmd.cmdStr = "%s=%s %s" % (k, v, cmd.cmdStr)
# also propagate env from command instance specific map
for k, v in cmd.propagate_env_map.iteritems():
cmd.cmdStr = "%s=%s %s" % (k, v, cmd.cmdStr)
# executable='/bin/bash' is to ensure the shell is bash. bash isn't the
# actual command executed, but the shell that command string runs under.
self.proc = gpsubprocess.Popen(cmd.cmdStr, env=None, shell=True,
executable='/bin/bash',
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE, close_fds=True)
if wait:
(rc,stdout_value,stderr_value)=self.proc.communicate2(input=self.stdin)
self.completed=True
cmd.set_results(CommandResult(
rc,"".join(stdout_value),"".join(stderr_value),self.completed,self.halt))
def cancel(self,cmd):
if self.proc:
try:
os.kill(self.proc.pid, signal.SIGTERM)
except OSError:
pass
def interrupt(self,cmd):
self.halt=True
if self.proc:
self.proc.cancel()
##########################################################################
# Naked Execution is used to run commands where ssh keys are not exchanged
class NakedExecutionInfo:
SFTP_NONE = 0
SFTP_PUT = 1
SFTP_GET = 2
def __init__(self, passwordMap, sftp_operation = SFTP_NONE, sftp_remote = None, sftp_local = None):
self.passwordMap = passwordMap
self.sftp_operation = sftp_operation
self.sftp_remote = sftp_remote
self.sftp_local = sftp_local
class NakedExecutionPasswordMap:
def __init__(self, hostlist):
self.hostlist = hostlist
self.mapping = dict()
self.unique_passwords = set()
self.complete = False
# this method throws exceptions on error to create a valid list
def discover(self):
for host in self.hostlist:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# TRY NO PASSWORD
try:
client.connect(host)
self.mapping[host] = None
client.close()
continue # next host
except Exception, e:
pass
try:
client.close()
except Exception, e:
pass
# TRY EXISTING PASSWORDS
foundit = False
for passwd in self.unique_passwords:
try:
client.connect(host, password=passwd)
foundit = True
self.mapping[host] = passwd
break
except Exception, e:
pass
if foundit:
continue
# ASK USER
foundit = False
for attempt in range(5):
try:
passwd = getpass.getpass(' *** Enter password for %s: ' % (host), sys.stderr)
client.connect(host, password=passwd)
foundit = True
self.mapping[host] = passwd
if passwd not in self.unique_passwords:
self.unique_passwords.add(passwd)
break
except Exception, e:
pass
try:
client.close()
except Exception, e:
pass
if not foundit:
raise Exception("Did not get a valid password for host " + host)
if len(self.mapping.keys()) == len(self.hostlist) and len(self.hostlist) > 0:
self.complete = True
class NakedExecutionContext(LocalExecutionContext):
def __init__(self,targetHost,stdin, nakedCommandInfo):
LocalExecutionContext.__init__(self, stdin)
self.targetHost=targetHost
self.passwordMap = nakedCommandInfo.passwordMap
self.sftp_operation = nakedCommandInfo.sftp_operation
self.sftp_remote = nakedCommandInfo.sftp_remote
self.sftp_local = nakedCommandInfo.sftp_local
self.client = None
def execute(self,cmd):
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
self.client.connect(self.targetHost, password=self.passwordMap.mapping[self.targetHost])
except paramiko.AuthenticationException:
self.client.close()
cmd.set_results(CommandResult(1,"","password validation on %s failed" % self.targetHost,False, False))
return
except Exception, e:
cmd.set_results(CommandResult(1,"","conection to host " + self.targetHost + " failed: " + e.__str__(),False, False))
return
if self.sftp_operation == NakedExecutionInfo.SFTP_NONE:
self.execute_ssh(cmd)
elif self.sftp_operation == NakedExecutionInfo.SFTP_PUT:
self.execute_sftp_put(cmd)
elif self.sftp_operation == NakedExecutionInfo.SFTP_GET:
self.execute_sftp_get(cmd)
else:
raise Exception("bad NakedExecutionInfo.sftp_operation")
def execute_ssh(self,cmd):
try:
stdin, stdout, stderr = self.client.exec_command(cmd.cmdStr)
rc = stdout.channel.recv_exit_status()
self.completed=True
cmd.set_results(CommandResult(rc,stdout.readlines(),stderr.readlines(),self.completed, self.halt))
stdin.close()
stdout.close()
stderr.close()
except Exception, e:
cmd.set_results(CommandResult(1,"",e.__str__(),False, False))
finally:
self.client.close()
def execute_sftp_put(self, cmd):
ftp = None
try:
ftp = self.client.open_sftp()
ftp.put(self.sftp_local, self.sftp_remote)
self.completed=True
cmd.set_results(CommandResult(0,"","",self.completed, self.halt))
except Exception, e:
cmd.set_results(CommandResult(1,"",e.__str__(),False, False))
finally:
ftp.close()
self.client.close()
def execute_sftp_get(self, cmd):
ftp = None
try:
ftp = self.client.open_sftp()
ftp.get(self.sftp_remote, self.sftp_local)
self.completed=True
cmd.set_results(CommandResult(0,"","",self.completed, self.halt))
except Exception, e:
cmd.set_results(CommandResult(1,"",e.__str__(),False, False))
finally:
ftp.close()
self.client.close()
def interrupt(self, cmd):
self.halt=True
self.client.close()
cmd.set_results(CommandResult(1,"","command on host " + self.targetHost + " interrupted ", False, False))
def cancel(self, cmd):
self.client.close()
cmd.set_results(CommandResult(1,"","command on host " + self.targetHost + " canceled ", False, False))
class RemoteExecutionContext(LocalExecutionContext):
trail = set()
"""
Leaves a trail of hosts to which we've ssh'ed, during the life of a particular interpreter.
"""
def __init__(self,targetHost,stdin):
LocalExecutionContext.__init__(self, stdin)
self.targetHost=targetHost
pass
def execute(self,cmd):
# prepend env. variables from ExcecutionContext.propagate_env_map
# e.g. Given {'FOO': 1, 'BAR': 2}, we'll produce "FOO=1 BAR=2 ..."
for k, v in self.__class__.propagate_env_map.iteritems():
cmd.cmdStr = "%s=%s %s" % (k, v, cmd.cmdStr)
self.__class__.trail.add(self.targetHost)
# also propagate env from command instance specific map
for k, v in cmd.propagate_env_map.iteritems():
cmd.cmdStr = "%s=%s %s" % (k, v, cmd.cmdStr)
# Escape " for remote execution otherwise it interferes with ssh
cmd.cmdStr = cmd.cmdStr.replace('"', '\\"')
cmd.cmdStr="ssh -o 'StrictHostKeyChecking no' %s \"%s %s\"" % (self.targetHost,SRC_GPPATH,cmd.cmdStr)
LocalExecutionContext.execute(self,cmd)
if (cmd.get_results().stderr.startswith('ssh_exchange_identification: Connection closed by remote host')):
self.__retry(cmd)
pass
def __retry(self, cmd, count=0):
if count == SSH_MAX_RETRY:
return
time.sleep(SSH_RETRY_DELAY)
LocalExecutionContext.execute(self, cmd)
if (cmd.get_results().stderr.startswith('ssh_exchange_identification: Connection closed by remote host')):
self.__retry(cmd, count + 1)
class RMIExecutionContext(ExecutionContext):
""" Leave this as a big old TODO: for now. see agent.py for some more details"""
def __init__(self):
ExecutionContext.__init__(self)
raise Exception("RMIExecutionContext - Not implemented")
pass
class Command:
""" TODO:
"""
name=None
cmdStr=None
results=None
exec_context=None
propagate_env_map={} # specific environment variables for this command instance
def __init__(self,name,cmdStr,ctxt=LOCAL,remoteHost=None,stdin=None,nakedExecutionInfo=None):
self.name=name
self.cmdStr=cmdStr
self.exec_context=createExecutionContext(ctxt,remoteHost,stdin=stdin,nakedExecutionInfo=nakedExecutionInfo)
self.remoteHost=remoteHost
def __str__(self):
if self.results:
return "%s cmdStr='%s' had result: %s" % (self.name,self.cmdStr,self.results)
else:
return "%s cmdStr='%s'" % (self.name,self.cmdStr)
# Start a process that will execute the command but don't wait for
# it to complete. Return the Popen object instead.
def runNoWait(self):
faultPoint = os.getenv('GP_COMMAND_FAULT_POINT')
if not faultPoint or (self.name and not self.name.startswith(faultPoint)):
self.exec_context.execute(self, wait=False)
return self.exec_context.proc
def run(self,validateAfter=False):
faultPoint = os.getenv('GP_COMMAND_FAULT_POINT')
if not faultPoint or (self.name and not self.name.startswith(faultPoint)):
self.exec_context.execute(self)
else:
# simulate error
self.results = CommandResult(1,'Fault Injection','Fault Injection' ,False,True)
if validateAfter:
self.validate()
pass
def set_results(self,results):
self.results=results
def get_results(self):
return self.results
def get_stdout(self, strip=True):
if self.results is None:
raise Exception("command not run yet")
return self.results.stdout if not strip else self.results.stdout.strip()
def get_stdout_lines(self):
return self.results.stdout.splitlines()
def get_stderr_lines(self):
return self.results.stderr.splitlines()
def get_return_code(self):
if self.results is None:
raise Exception("command not run yet")
return self.results.rc
def get_stderr(self):
if self.results is None:
raise Exception("command not run yet")
return self.results.stderr
def cancel(self):
if self.exec_context and isinstance(self.exec_context, ExecutionContext):
self.exec_context.cancel(self)
def interrupt(self):
if self.exec_context and isinstance(self.exec_context, ExecutionContext):
self.exec_context.interrupt(self)
def was_successful(self):
if self.results is None:
return False
else:
return self.results.wasSuccessful()
def validate(self,expected_rc=0):
"""Plain vanilla validation which expects a 0 return code."""
if self.results.rc != expected_rc:
raise ExecutionError("non-zero rc: %d" % self.results.rc, self)
class SQLCommand(Command):
"""Base class for commands that execute SQL statements. Classes
that inherit from SQLCOmmand should set cancel_conn to the pygresql
connection they wish to cancel and check self.cancel_flag."""
def __init__(self,name):
Command.__init__(self, name, cmdStr=None)
self.cancel_flag = False
self.cancel_conn = None
def run(self,validateAfter=False):
raise ExecutionError("programmer error. implementors of SQLCommand must implement run()", self)
def interrupt(self):
# No execution context for SQLCommands
pass
def cancel(self):
# assignment is an atomic operation in python
self.cancel_flag = True
# if self.conn is not set we cannot cancel.
if self.cancel_conn:
DB(self.cancel_conn).cancel()
def run_remote_commands(name, commands):
"""
"""
cmds = {}
pool = WorkerPool()
for host, cmdStr in commands.items():
cmd = Command(name=name, cmdStr=cmdStr, ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
cmds[host] = cmd
pool.join()
pool.check_results()
return cmds
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
from copy import deepcopy
from time import time
from django.conf import settings
from django.db.models import Q
from django.db.models.base import ModelBase
from django.utils import six
from django.utils import tree
from django.utils.encoding import force_text
from haystack.constants import VALID_FILTERS, FILTER_SEPARATOR, DEFAULT_ALIAS
from haystack.exceptions import MoreLikeThisError, FacetingError
from haystack.models import SearchResult
from haystack.utils.loading import UnifiedIndex
from haystack.utils import get_model_ct
VALID_GAPS = ['year', 'month', 'day', 'hour', 'minute', 'second']
SPELLING_SUGGESTION_HAS_NOT_RUN = object()
def log_query(func):
"""
A decorator for pseudo-logging search queries. Used in the ``SearchBackend``
to wrap the ``search`` method.
"""
def wrapper(obj, query_string, *args, **kwargs):
start = time()
try:
return func(obj, query_string, *args, **kwargs)
finally:
stop = time()
if settings.DEBUG:
from haystack import connections
connections[obj.connection_alias].queries.append({
'query_string': query_string,
'additional_args': args,
'additional_kwargs': kwargs,
'time': "%.3f" % (stop - start),
'start': start,
'stop': stop,
})
return wrapper
class EmptyResults(object):
hits = 0
docs = []
def __len__(self):
return 0
def __getitem__(self, k):
if isinstance(k, slice):
return []
else:
raise IndexError("It's not here.")
class BaseSearchBackend(object):
"""
Abstract search engine base class.
"""
# Backends should include their own reserved words/characters.
RESERVED_WORDS = []
RESERVED_CHARACTERS = []
def __init__(self, connection_alias, **connection_options):
self.connection_alias = connection_alias
self.timeout = connection_options.get('TIMEOUT', 10)
self.include_spelling = connection_options.get('INCLUDE_SPELLING', False)
self.batch_size = connection_options.get('BATCH_SIZE', 1000)
self.silently_fail = connection_options.get('SILENTLY_FAIL', True)
self.distance_available = connection_options.get('DISTANCE_AVAILABLE', False)
def update(self, index, iterable, commit=True):
"""
Updates the backend when given a SearchIndex and a collection of
documents.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError
def remove(self, obj_or_string):
"""
Removes a document/object from the backend. Can be either a model
instance or the identifier (i.e. ``app_name.model_name.id``) in the
event the object no longer exists.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError
def clear(self, models=None, commit=True):
"""
Clears the backend of all documents/objects for a collection of models.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError
@log_query
def search(self, query_string, **kwargs):
"""
Takes a query to search on and returns dictionary.
The query should be a string that is appropriate syntax for the backend.
The returned dictionary should contain the keys 'results' and 'hits'.
The 'results' value should be an iterable of populated SearchResult
objects. The 'hits' should be an integer count of the number of matched
results the search backend found.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError
def build_search_kwargs(self, query_string, sort_by=None, start_offset=0, end_offset=None,
fields='', highlight=False, facets=None,
date_facets=None, query_facets=None,
narrow_queries=None, spelling_query=None,
within=None, dwithin=None, distance_point=None,
models=None, limit_to_registered_models=None,
result_class=None, **extra_kwargs):
# A convenience method most backends should include in order to make
# extension easier.
raise NotImplementedError
def prep_value(self, value):
"""
Hook to give the backend a chance to prep an attribute value before
sending it to the search engine. By default, just force it to unicode.
"""
return force_text(value)
def more_like_this(self, model_instance, additional_query_string=None, result_class=None):
"""
Takes a model object and returns results the backend thinks are similar.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError("Subclasses must provide a way to fetch similar record via the 'more_like_this' method if supported by the backend.")
def extract_file_contents(self, file_obj):
"""
Hook to allow backends which support rich-content types such as PDF,
Word, etc. extraction to process the provided file object and return
the contents for indexing
Returns None if metadata cannot be extracted; otherwise returns a
dictionary containing at least two keys:
:contents:
Extracted full-text content, if applicable
:metadata:
key:value pairs of text strings
"""
raise NotImplementedError("Subclasses must provide a way to extract metadata via the 'extract' method if supported by the backend.")
def build_schema(self, fields):
"""
Takes a dictionary of fields and returns schema information.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError("Subclasses must provide a way to build their schema.")
def build_models_list(self):
"""
Builds a list of models for searching.
The ``search`` method should use this and the ``django_ct`` field to
narrow the results (unless the user indicates not to). This helps ignore
any results that are not currently handled models and ensures
consistent caching.
"""
from haystack import connections
models = []
for model in connections[self.connection_alias].get_unified_index().get_indexed_models():
models.append(get_model_ct(model))
return models
# Alias for easy loading within SearchQuery objects.
SearchBackend = BaseSearchBackend
class SearchNode(tree.Node):
"""
Manages an individual condition within a query.
Most often, this will be a lookup to ensure that a certain word or phrase
appears in the documents being indexed. However, it also supports filtering
types (such as 'lt', 'gt', 'in' and others) for more complex lookups.
This object creates a tree, with children being a list of either more
``SQ`` objects or the expressions/values themselves.
"""
AND = 'AND'
OR = 'OR'
default = AND
# Start compat. Django 1.6 changed how ``tree.Node`` works, so we're going
# to patch back in the original implementation until time to rewrite this
# presents itself.
# See https://github.com/django/django/commit/d3f00bd.
def __init__(self, children=None, connector=None, negated=False):
"""
Constructs a new Node. If no connector is given, the default will be
used.
Warning: You probably don't want to pass in the 'negated' parameter. It
is NOT the same as constructing a node and calling negate() on the
result.
"""
self.children = children and children[:] or []
self.connector = connector or self.default
self.subtree_parents = []
self.negated = negated
# We need this because of django.db.models.query_utils.Q. Q. __init__() is
# problematic, but it is a natural Node subclass in all other respects.
def _new_instance(cls, children=None, connector=None, negated=False):
"""
This is called to create a new instance of this class when we need new
Nodes (or subclasses) in the internal code in this class. Normally, it
just shadows __init__(). However, subclasses with an __init__ signature
that is not an extension of Node.__init__ might need to implement this
method to allow a Node to create a new instance of them (if they have
any extra setting up to do).
"""
obj = SearchNode(children, connector, negated)
obj.__class__ = cls
return obj
_new_instance = classmethod(_new_instance)
def __str__(self):
if self.negated:
return '(NOT (%s: %s))' % (self.connector, ', '.join([str(c) for c in self.children]))
return '(%s: %s)' % (self.connector, ', '.join([str(c) for c in self.children]))
def __deepcopy__(self, memodict):
"""
Utility method used by copy.deepcopy().
"""
obj = SearchNode(connector=self.connector, negated=self.negated)
obj.__class__ = self.__class__
obj.children = copy.deepcopy(self.children, memodict)
obj.subtree_parents = copy.deepcopy(self.subtree_parents, memodict)
return obj
def __len__(self):
"""
The size of a node if the number of children it has.
"""
return len(self.children)
def __bool__(self):
"""
For truth value testing.
"""
return bool(self.children)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __contains__(self, other):
"""
Returns True is 'other' is a direct child of this instance.
"""
return other in self.children
def add(self, node, conn_type):
"""
Adds a new node to the tree. If the conn_type is the same as the root's
current connector type, the node is added to the first level.
Otherwise, the whole tree is pushed down one level and a new root
connector is created, connecting the existing tree and the new node.
"""
if node in self.children and conn_type == self.connector:
return
if len(self.children) < 2:
self.connector = conn_type
if self.connector == conn_type:
if isinstance(node, SearchNode) and (node.connector == conn_type or len(node) == 1):
self.children.extend(node.children)
else:
self.children.append(node)
else:
obj = self._new_instance(self.children, self.connector, self.negated)
self.connector = conn_type
self.children = [obj, node]
def negate(self):
"""
Negate the sense of the root connector. This reorganises the children
so that the current node has a single child: a negated node containing
all the previous children. This slightly odd construction makes adding
new children behave more intuitively.
Interpreting the meaning of this negate is up to client code. This
method is useful for implementing "not" arrangements.
"""
self.children = [self._new_instance(self.children, self.connector, not self.negated)]
self.connector = self.default
def start_subtree(self, conn_type):
"""
Sets up internal state so that new nodes are added to a subtree of the
current node. The conn_type specifies how the sub-tree is joined to the
existing children.
"""
if len(self.children) == 1:
self.connector = conn_type
elif self.connector != conn_type:
self.children = [self._new_instance(self.children, self.connector, self.negated)]
self.connector = conn_type
self.negated = False
self.subtree_parents.append(self.__class__(self.children, self.connector, self.negated))
self.connector = self.default
self.negated = False
self.children = []
def end_subtree(self):
"""
Closes off the most recently unmatched start_subtree() call.
This puts the current state into a node of the parent tree and returns
the current instances state to be the parent.
"""
obj = self.subtree_parents.pop()
node = self.__class__(self.children, self.connector)
self.connector = obj.connector
self.negated = obj.negated
self.children = obj.children
self.children.append(node)
# End compat.
def __repr__(self):
return '<SQ: %s %s>' % (self.connector, self.as_query_string(self._repr_query_fragment_callback))
def _repr_query_fragment_callback(self, field, filter_type, value):
if six.PY3:
value = force_text(value)
else:
value = force_text(value).encode('utf8')
return "%s%s%s=%s" % (field, FILTER_SEPARATOR, filter_type, value)
def as_query_string(self, query_fragment_callback):
"""
Produces a portion of the search query from the current SQ and its
children.
"""
result = []
for child in self.children:
if hasattr(child, 'as_query_string'):
result.append(child.as_query_string(query_fragment_callback))
else:
expression, value = child
field, filter_type = self.split_expression(expression)
result.append(query_fragment_callback(field, filter_type, value))
conn = ' %s ' % self.connector
query_string = conn.join(result)
if query_string:
if self.negated:
query_string = 'NOT (%s)' % query_string
elif len(self.children) != 1:
query_string = '(%s)' % query_string
return query_string
def split_expression(self, expression):
"""Parses an expression and determines the field and filter type."""
parts = expression.split(FILTER_SEPARATOR)
field = parts[0]
if len(parts) == 1 or parts[-1] not in VALID_FILTERS:
filter_type = 'content'
else:
filter_type = parts.pop()
return (field, filter_type)
class SQ(Q, SearchNode):
"""
Manages an individual condition within a query.
Most often, this will be a lookup to ensure that a certain word or phrase
appears in the documents being indexed. However, it also supports filtering
types (such as 'lt', 'gt', 'in' and others) for more complex lookups.
"""
pass
class BaseSearchQuery(object):
"""
A base class for handling the query itself.
This class acts as an intermediary between the ``SearchQuerySet`` and the
``SearchBackend`` itself.
The ``SearchQuery`` object maintains a tree of ``SQ`` objects. Each ``SQ``
object supports what field it looks up against, what kind of lookup (i.e.
the __'s), what value it's looking for, if it's a AND/OR/NOT and tracks
any children it may have. The ``SearchQuery.build_query`` method starts with
the root of the tree, building part of the final query at each node until
the full final query is ready for the ``SearchBackend``.
Backends should extend this class and provide implementations for
``build_query_fragment``, ``clean`` and ``run``. See the ``solr`` backend for an example
implementation.
"""
def __init__(self, using=DEFAULT_ALIAS):
self.query_filter = SearchNode()
self.order_by = []
self.models = set()
self.boost = {}
self.start_offset = 0
self.end_offset = None
self.highlight = False
self.facets = {}
self.date_facets = {}
self.query_facets = []
self.narrow_queries = set()
#: If defined, fields should be a list of field names - no other values
#: will be retrieved so the caller must be careful to include django_ct
#: and django_id when using code which expects those to be included in
#: the results
self.fields = []
# Geospatial-related information
self.within = {}
self.dwithin = {}
self.distance_point = {}
# Internal.
self._raw_query = None
self._raw_query_params = {}
self._more_like_this = False
self._mlt_instance = None
self._results = None
self._hit_count = None
self._facet_counts = None
self._stats = None
self._spelling_suggestion = SPELLING_SUGGESTION_HAS_NOT_RUN
self.spelling_query = None
self.result_class = SearchResult
self.stats = {}
from haystack import connections
self._using = using
self.backend = connections[self._using].get_backend()
def __str__(self):
return self.build_query()
def __getstate__(self):
"""For pickling."""
obj_dict = self.__dict__.copy()
del(obj_dict['backend'])
return obj_dict
def __setstate__(self, obj_dict):
"""For unpickling."""
from haystack import connections
self.__dict__.update(obj_dict)
self.backend = connections[self._using].get_backend()
def has_run(self):
"""Indicates if any query has been been run."""
return None not in (self._results, self._hit_count)
def build_params(self, spelling_query=None):
"""Generates a list of params to use when searching."""
kwargs = {
'start_offset': self.start_offset,
}
if self.order_by:
kwargs['sort_by'] = self.order_by
if self.end_offset is not None:
kwargs['end_offset'] = self.end_offset
if self.highlight:
kwargs['highlight'] = self.highlight
if self.facets:
kwargs['facets'] = self.facets
if self.date_facets:
kwargs['date_facets'] = self.date_facets
if self.query_facets:
kwargs['query_facets'] = self.query_facets
if self.narrow_queries:
kwargs['narrow_queries'] = self.narrow_queries
if spelling_query:
kwargs['spelling_query'] = spelling_query
elif self.spelling_query:
kwargs['spelling_query'] = self.spelling_query
if self.boost:
kwargs['boost'] = self.boost
if self.within:
kwargs['within'] = self.within
if self.dwithin:
kwargs['dwithin'] = self.dwithin
if self.distance_point:
kwargs['distance_point'] = self.distance_point
if self.result_class:
kwargs['result_class'] = self.result_class
if self.fields:
kwargs['fields'] = self.fields
if self.models:
kwargs['models'] = self.models
return kwargs
def run(self, spelling_query=None, **kwargs):
"""Builds and executes the query. Returns a list of search results."""
final_query = self.build_query()
search_kwargs = self.build_params(spelling_query=spelling_query)
if kwargs:
search_kwargs.update(kwargs)
results = self.backend.search(final_query, **search_kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
self._facet_counts = self.post_process_facets(results)
self._spelling_suggestion = results.get('spelling_suggestion', None)
def run_mlt(self, **kwargs):
"""
Executes the More Like This. Returns a list of search results similar
to the provided document (and optionally query).
"""
if self._more_like_this is False or self._mlt_instance is None:
raise MoreLikeThisError("No instance was provided to determine 'More Like This' results.")
search_kwargs = {
'result_class': self.result_class,
}
if self.models:
search_kwargs['models'] = self.models
if kwargs:
search_kwargs.update(kwargs)
additional_query_string = self.build_query()
results = self.backend.more_like_this(self._mlt_instance, additional_query_string, **search_kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
def run_raw(self, **kwargs):
"""Executes a raw query. Returns a list of search results."""
search_kwargs = self.build_params()
search_kwargs.update(self._raw_query_params)
if kwargs:
search_kwargs.update(kwargs)
results = self.backend.search(self._raw_query, **search_kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
self._facet_counts = results.get('facets', {})
self._spelling_suggestion = results.get('spelling_suggestion', None)
def get_count(self):
"""
Returns the number of results the backend found for the query.
If the query has not been run, this will execute the query and store
the results.
"""
if self._hit_count is None:
# Limit the slice to 1 so we get a count without consuming
# everything.
if not self.end_offset:
self.end_offset = 1
if self._more_like_this:
# Special case for MLT.
self.run_mlt()
elif self._raw_query:
# Special case for raw queries.
self.run_raw()
else:
self.run()
return self._hit_count
def get_results(self, **kwargs):
"""
Returns the results received from the backend.
If the query has not been run, this will execute the query and store
the results.
"""
if self._results is None:
if self._more_like_this:
# Special case for MLT.
self.run_mlt(**kwargs)
elif self._raw_query:
# Special case for raw queries.
self.run_raw(**kwargs)
else:
self.run(**kwargs)
return self._results
def get_facet_counts(self):
"""
Returns the facet counts received from the backend.
If the query has not been run, this will execute the query and store
the results.
"""
if self._facet_counts is None:
self.run()
return self._facet_counts
def get_stats(self):
"""
Returns the stats received from the backend.
If the query has not been run, this will execute the query and store
the results
"""
if self._stats is None:
self.run()
return self._stats
def set_spelling_query(self, spelling_query):
self.spelling_query = spelling_query
def get_spelling_suggestion(self, preferred_query=None):
"""
Returns the spelling suggestion received from the backend.
If the query has not been run, this will execute the query and store
the results.
"""
if self._spelling_suggestion is SPELLING_SUGGESTION_HAS_NOT_RUN:
self.run(spelling_query=preferred_query)
return self._spelling_suggestion
def boost_fragment(self, boost_word, boost_value):
"""Generates query fragment for boosting a single word/value pair."""
return "%s^%s" % (boost_word, boost_value)
def matching_all_fragment(self):
"""Generates the query that matches all documents."""
return '*'
def build_query(self):
"""
Interprets the collected query metadata and builds the final query to
be sent to the backend.
"""
final_query = self.query_filter.as_query_string(self.build_query_fragment)
if not final_query:
# Match all.
final_query = self.matching_all_fragment()
if self.boost:
boost_list = []
for boost_word, boost_value in self.boost.items():
boost_list.append(self.boost_fragment(boost_word, boost_value))
final_query = "%s %s" % (final_query, " ".join(boost_list))
return final_query
def combine(self, rhs, connector=SQ.AND):
if connector == SQ.AND:
self.add_filter(rhs.query_filter)
elif connector == SQ.OR:
self.add_filter(rhs.query_filter, use_or=True)
# Methods for backends to implement.
def build_query_fragment(self, field, filter_type, value):
"""
Generates a query fragment from a field, filter type and a value.
Must be implemented in backends as this will be highly backend specific.
"""
raise NotImplementedError("Subclasses must provide a way to generate query fragments via the 'build_query_fragment' method.")
# Standard methods to alter the query.
def clean(self, query_fragment):
"""
Provides a mechanism for sanitizing user input before presenting the
value to the backend.
A basic (override-able) implementation is provided.
"""
if not isinstance(query_fragment, six.string_types):
return query_fragment
words = query_fragment.split()
cleaned_words = []
for word in words:
if word in self.backend.RESERVED_WORDS:
word = word.replace(word, word.lower())
for char in self.backend.RESERVED_CHARACTERS:
word = word.replace(char, '\\%s' % char)
cleaned_words.append(word)
return ' '.join(cleaned_words)
def build_not_query(self, query_string):
if ' ' in query_string:
query_string = "(%s)" % query_string
return u"NOT %s" % query_string
def build_exact_query(self, query_string):
return u'"%s"' % query_string
def add_filter(self, query_filter, use_or=False):
"""
Adds a SQ to the current query.
"""
if use_or:
connector = SQ.OR
else:
connector = SQ.AND
if self.query_filter and query_filter.connector != connector and len(query_filter) > 1:
self.query_filter.start_subtree(connector)
subtree = True
else:
subtree = False
for child in query_filter.children:
if isinstance(child, tree.Node):
self.query_filter.start_subtree(connector)
self.add_filter(child)
self.query_filter.end_subtree()
else:
expression, value = child
self.query_filter.add((expression, value), connector)
connector = query_filter.connector
if query_filter.negated:
self.query_filter.negate()
if subtree:
self.query_filter.end_subtree()
def add_order_by(self, field):
"""Orders the search result by a field."""
self.order_by.append(field)
def clear_order_by(self):
"""
Clears out all ordering that has been already added, reverting the
query to relevancy.
"""
self.order_by = []
def add_model(self, model):
"""
Restricts the query requiring matches in the given model.
This builds upon previous additions, so you can limit to multiple models
by chaining this method several times.
"""
if not isinstance(model, ModelBase):
raise AttributeError('The model being added to the query must derive from Model.')
self.models.add(model)
def set_limits(self, low=None, high=None):
"""Restricts the query by altering either the start, end or both offsets."""
if low is not None:
self.start_offset = int(low)
if high is not None:
self.end_offset = int(high)
def clear_limits(self):
"""Clears any existing limits."""
self.start_offset, self.end_offset = 0, None
def add_boost(self, term, boost_value):
"""Adds a boosted term and the amount to boost it to the query."""
self.boost[term] = boost_value
def raw_search(self, query_string, **kwargs):
"""
Runs a raw query (no parsing) against the backend.
This method causes the SearchQuery to ignore the standard query
generating facilities, running only what was provided instead.
Note that any kwargs passed along will override anything provided
to the rest of the ``SearchQuerySet``.
"""
self._raw_query = query_string
self._raw_query_params = kwargs
def more_like_this(self, model_instance):
"""
Allows backends with support for "More Like This" to return results
similar to the provided instance.
"""
self._more_like_this = True
self._mlt_instance = model_instance
def add_stats_query(self, stats_field, stats_facets):
"""Adds stats and stats_facets queries for the Solr backend."""
self.stats[stats_field] = stats_facets
def add_highlight(self, **kwargs):
"""Adds highlighting to the search results."""
self.highlight = kwargs or True
def add_within(self, field, point_1, point_2):
"""Adds bounding box parameters to search query."""
from haystack.utils.geo import ensure_point
self.within = {
'field': field,
'point_1': ensure_point(point_1),
'point_2': ensure_point(point_2),
}
def add_dwithin(self, field, point, distance):
"""Adds radius-based parameters to search query."""
from haystack.utils.geo import ensure_point, ensure_distance
self.dwithin = {
'field': field,
'point': ensure_point(point),
'distance': ensure_distance(distance),
}
def add_distance(self, field, point):
"""
Denotes that results should include distance measurements from the
point passed in.
"""
from haystack.utils.geo import ensure_point
self.distance_point = {
'field': field,
'point': ensure_point(point),
}
def add_field_facet(self, field, **options):
"""Adds a regular facet on a field."""
from haystack import connections
field_name = connections[self._using].get_unified_index().get_facet_fieldname(field)
self.facets[field_name] = options.copy()
def add_date_facet(self, field, start_date, end_date, gap_by, gap_amount=1):
"""Adds a date-based facet on a field."""
from haystack import connections
if gap_by not in VALID_GAPS:
raise FacetingError("The gap_by ('%s') must be one of the following: %s." % (gap_by, ', '.join(VALID_GAPS)))
details = {
'start_date': start_date,
'end_date': end_date,
'gap_by': gap_by,
'gap_amount': gap_amount,
}
self.date_facets[connections[self._using].get_unified_index().get_facet_fieldname(field)] = details
def add_query_facet(self, field, query):
"""Adds a query facet on a field."""
from haystack import connections
self.query_facets.append((connections[self._using].get_unified_index().get_facet_fieldname(field), query))
def add_narrow_query(self, query):
"""
Narrows a search to a subset of all documents per the query.
Generally used in conjunction with faceting.
"""
self.narrow_queries.add(query)
def set_result_class(self, klass):
"""
Sets the result class to use for results.
Overrides any previous usages. If ``None`` is provided, Haystack will
revert back to the default ``SearchResult`` object.
"""
if klass is None:
klass = SearchResult
self.result_class = klass
def post_process_facets(self, results):
# Handle renaming the facet fields. Undecorate and all that.
from haystack import connections
revised_facets = {}
field_data = connections[self._using].get_unified_index().all_searchfields()
for facet_type, field_details in results.get('facets', {}).items():
temp_facets = {}
for field, field_facets in field_details.items():
fieldname = field
if field in field_data and hasattr(field_data[field], 'get_facet_for_name'):
fieldname = field_data[field].get_facet_for_name()
temp_facets[fieldname] = field_facets
revised_facets[facet_type] = temp_facets
return revised_facets
def using(self, using=None):
"""
Allows for overriding which connection should be used. This
disables the use of routers when performing the query.
If ``None`` is provided, it has no effect on what backend is used.
"""
return self._clone(using=using)
def _reset(self):
"""
Resets the instance's internal state to appear as though no query has
been run before. Only need to tweak a few variables we check.
"""
self._results = None
self._hit_count = None
self._facet_counts = None
self._spelling_suggestion = SPELLING_SUGGESTION_HAS_NOT_RUN
def _clone(self, klass=None, using=None):
if using is None:
using = self._using
else:
from haystack import connections
klass = connections[using].query
if klass is None:
klass = self.__class__
clone = klass(using=using)
clone.query_filter = deepcopy(self.query_filter)
clone.order_by = self.order_by[:]
clone.models = self.models.copy()
clone.boost = self.boost.copy()
clone.highlight = self.highlight
clone.stats = self.stats.copy()
clone.facets = self.facets.copy()
clone.date_facets = self.date_facets.copy()
clone.query_facets = self.query_facets[:]
clone.narrow_queries = self.narrow_queries.copy()
clone.start_offset = self.start_offset
clone.end_offset = self.end_offset
clone.result_class = self.result_class
clone.within = self.within.copy()
clone.dwithin = self.dwithin.copy()
clone.distance_point = self.distance_point.copy()
clone._raw_query = self._raw_query
clone._raw_query_params = self._raw_query_params
clone.spelling_query = self.spelling_query
clone._more_like_this = self._more_like_this
clone._mlt_instance = self._mlt_instance
return clone
class BaseEngine(object):
backend = BaseSearchBackend
query = BaseSearchQuery
unified_index = UnifiedIndex
def __init__(self, using=None):
if using is None:
using = DEFAULT_ALIAS
self.using = using
self.options = settings.HAYSTACK_CONNECTIONS.get(self.using, {})
self.queries = []
self._index = None
self._backend = None
def get_backend(self):
if self._backend is None:
self._backend = self.backend(self.using, **self.options)
return self._backend
def reset_sessions(self):
"""Reset any transient connections, file handles, etc."""
self._backend = None
def get_query(self):
return self.query(using=self.using)
def reset_queries(self):
del self.queries[:]
def get_unified_index(self):
if self._index is None:
self._index = self.unified_index(self.options.get('EXCLUDED_INDEXES', []))
return self._index
|
|
"""
Various data structures used in query construction.
Factored out from django.db.models.query to avoid making the main module very
large and/or so that they can be used by other modules without getting into
circular import difficulties.
"""
from __future__ import unicode_literals
import inspect
from collections import namedtuple
from django.core.exceptions import FieldDoesNotExist
from django.db.backends import utils
from django.db.models.constants import LOOKUP_SEP
from django.utils import tree
# PathInfo is used when converting lookups (fk__somecol). The contents
# describe the relation in Model terms (model Options and Fields for both
# sides of the relation. The join_field is the field backing the relation.
PathInfo = namedtuple('PathInfo', 'from_opts to_opts target_fields join_field m2m direct')
class InvalidQuery(Exception):
"""
The query passed to raw isn't a safe query to use with raw.
"""
pass
class QueryWrapper(object):
"""
A type that indicates the contents are an SQL fragment and the associate
parameters. Can be used to pass opaque data to a where-clause, for example.
"""
contains_aggregate = False
def __init__(self, sql, params):
self.data = sql, list(params)
def as_sql(self, compiler=None, connection=None):
return self.data
class Q(tree.Node):
"""
Encapsulates filters as objects that can then be combined logically (using
& and |).
"""
# Connection types
AND = 'AND'
OR = 'OR'
default = AND
def __init__(self, *args, **kwargs):
super(Q, self).__init__(children=list(args) + list(kwargs.items()))
def _combine(self, other, conn):
if not isinstance(other, Q):
raise TypeError(other)
obj = type(self)()
obj.connector = conn
obj.add(self, conn)
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __invert__(self):
obj = type(self)()
obj.add(self, self.AND)
obj.negate()
return obj
def clone(self):
clone = self.__class__._new_instance(
children=[], connector=self.connector, negated=self.negated)
for child in self.children:
if hasattr(child, 'clone'):
clone.children.append(child.clone())
else:
clone.children.append(child)
return clone
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# We must promote any new joins to left outer joins so that when Q is
# used as an expression, rows aren't filtered due to joins.
clause, joins = query._add_q(self, reuse, allow_joins=allow_joins, split_subq=False)
query.promote_joins(joins)
return clause
@classmethod
def _refs_aggregate(cls, obj, existing_aggregates):
if not isinstance(obj, tree.Node):
aggregate, aggregate_lookups = refs_aggregate(obj[0].split(LOOKUP_SEP), existing_aggregates)
if not aggregate and hasattr(obj[1], 'refs_aggregate'):
return obj[1].refs_aggregate(existing_aggregates)
return aggregate, aggregate_lookups
for c in obj.children:
aggregate, aggregate_lookups = cls._refs_aggregate(c, existing_aggregates)
if aggregate:
return aggregate, aggregate_lookups
return False, ()
def refs_aggregate(self, existing_aggregates):
if not existing_aggregates:
return False
return self._refs_aggregate(self, existing_aggregates)
class DeferredAttribute(object):
"""
A wrapper for a deferred-loading field. When the value is read from this
object the first time, the query is executed.
"""
def __init__(self, field_name, model):
self.field_name = field_name
def __get__(self, instance, cls=None):
"""
Retrieves and caches the value from the datastore on the first lookup.
Returns the cached value.
"""
non_deferred_model = instance._meta.proxy_for_model
opts = non_deferred_model._meta
assert instance is not None
data = instance.__dict__
if data.get(self.field_name, self) is self:
# self.field_name is the attname of the field, but only() takes the
# actual name, so we need to translate it here.
try:
f = opts.get_field(self.field_name)
except FieldDoesNotExist:
f = [f for f in opts.fields if f.attname == self.field_name][0]
name = f.name
# Let's see if the field is part of the parent chain. If so we
# might be able to reuse the already loaded value. Refs #18343.
val = self._check_parent_chain(instance, name)
if val is None:
instance.refresh_from_db(fields=[self.field_name])
val = getattr(instance, self.field_name)
data[self.field_name] = val
return data[self.field_name]
def __set__(self, instance, value):
"""
Deferred loading attributes can be set normally (which means there will
never be a database lookup involved.
"""
instance.__dict__[self.field_name] = value
def _check_parent_chain(self, instance, name):
"""
Check if the field value can be fetched from a parent field already
loaded in the instance. This can be done if the to-be fetched
field is a primary key field.
"""
opts = instance._meta
f = opts.get_field(name)
link_field = opts.get_ancestor_link(f.model)
if f.primary_key and f != link_field:
return getattr(instance, link_field.attname)
return None
class RegisterLookupMixin(object):
def _get_lookup(self, lookup_name):
try:
return self.class_lookups[lookup_name]
except KeyError:
# To allow for inheritance, check parent class' class_lookups.
for parent in inspect.getmro(self.__class__):
if 'class_lookups' not in parent.__dict__:
continue
if lookup_name in parent.class_lookups:
return parent.class_lookups[lookup_name]
except AttributeError:
# This class didn't have any class_lookups
pass
return None
def get_lookup(self, lookup_name):
from django.db.models.lookups import Lookup
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_lookup(lookup_name)
if found is not None and not issubclass(found, Lookup):
return None
return found
def get_transform(self, lookup_name):
from django.db.models.lookups import Transform
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_transform(lookup_name)
if found is not None and not issubclass(found, Transform):
return None
return found
@classmethod
def register_lookup(cls, lookup, lookup_name=None):
if lookup_name is None:
lookup_name = lookup.lookup_name
if 'class_lookups' not in cls.__dict__:
cls.class_lookups = {}
cls.class_lookups[lookup_name] = lookup
return lookup
@classmethod
def _unregister_lookup(cls, lookup, lookup_name=None):
"""
Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe.
"""
if lookup_name is None:
lookup_name = lookup.lookup_name
del cls.class_lookups[lookup_name]
def select_related_descend(field, restricted, requested, load_fields, reverse=False):
"""
Returns True if this field should be used to descend deeper for
select_related() purposes. Used by both the query construction code
(sql.query.fill_related_selections()) and the model instance creation code
(query.get_klass_info()).
Arguments:
* field - the field to be checked
* restricted - a boolean field, indicating if the field list has been
manually restricted using a requested clause)
* requested - The select_related() dictionary.
* load_fields - the set of fields to be loaded on this model
* reverse - boolean, True if we are checking a reverse select related
"""
if not field.remote_field:
return False
if field.remote_field.parent_link and not reverse:
return False
if restricted:
if reverse and field.related_query_name() not in requested:
return False
if not reverse and field.name not in requested:
return False
if not restricted and field.null:
return False
if load_fields:
if field.attname not in load_fields:
if restricted and field.name in requested:
raise InvalidQuery("Field %s.%s cannot be both deferred"
" and traversed using select_related"
" at the same time." %
(field.model._meta.object_name, field.name))
return False
return True
# This function is needed because data descriptors must be defined on a class
# object, not an instance, to have any effect.
def deferred_class_factory(model, attrs):
"""
Returns a class object that is a copy of "model" with the specified "attrs"
being replaced with DeferredAttribute objects. The "pk_value" ties the
deferred attributes to a particular instance of the model.
"""
if not attrs:
return model
opts = model._meta
# Never create deferred models based on deferred model
if model._deferred:
# Deferred models are proxies for the non-deferred model. We never
# create chains of defers => proxy_for_model is the non-deferred
# model.
model = opts.proxy_for_model
# The app registry wants a unique name for each model, otherwise the new
# class won't be created (we get an exception). Therefore, we generate
# the name using the passed in attrs. It's OK to reuse an existing class
# object if the attrs are identical.
name = "%s_Deferred_%s" % (model.__name__, '_'.join(sorted(attrs)))
name = utils.truncate_name(name, 80, 32)
try:
return opts.apps.get_model(model._meta.app_label, name)
except LookupError:
class Meta:
proxy = True
apps = opts.apps
app_label = opts.app_label
overrides = {attr: DeferredAttribute(attr, model) for attr in attrs}
overrides["Meta"] = Meta
overrides["__module__"] = model.__module__
overrides["_deferred"] = True
return type(str(name), (model,), overrides)
def refs_aggregate(lookup_parts, aggregates):
"""
A helper method to check if the lookup_parts contains references
to the given aggregates set. Because the LOOKUP_SEP is contained in the
default annotation names we must check each prefix of the lookup_parts
for a match.
"""
for n in range(len(lookup_parts) + 1):
level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n])
if level_n_lookup in aggregates and aggregates[level_n_lookup].contains_aggregate:
return aggregates[level_n_lookup], lookup_parts[n:]
return False, ()
def refs_expression(lookup_parts, annotations):
"""
A helper method to check if the lookup_parts contains references
to the given annotations set. Because the LOOKUP_SEP is contained in the
default annotation names we must check each prefix of the lookup_parts
for a match.
"""
for n in range(len(lookup_parts) + 1):
level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n])
if level_n_lookup in annotations and annotations[level_n_lookup]:
return annotations[level_n_lookup], lookup_parts[n:]
return False, ()
def check_rel_lookup_compatibility(model, target_opts, field):
"""
Check that self.model is compatible with target_opts. Compatibility
is OK if:
1) model and opts match (where proxy inheritance is removed)
2) model is parent of opts' model or the other way around
"""
def check(opts):
return (
model._meta.concrete_model == opts.concrete_model or
opts.concrete_model in model._meta.get_parent_list() or
model in opts.get_parent_list()
)
# If the field is a primary key, then doing a query against the field's
# model is ok, too. Consider the case:
# class Restaurant(models.Model):
# place = OnetoOneField(Place, primary_key=True):
# Restaurant.objects.filter(pk__in=Restaurant.objects.all()).
# If we didn't have the primary key check, then pk__in (== place__in) would
# give Place's opts as the target opts, but Restaurant isn't compatible
# with that. This logic applies only to primary keys, as when doing __in=qs,
# we are going to turn this into __in=qs.values('pk') later on.
return (
check(target_opts) or
(getattr(field, 'primary_key', False) and check(field.model._meta))
)
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for Volume Code."""
import ddt
import time
import mock
import os_brick
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import imageutils
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils as tests_utils
from cinder.tests.unit import volume as base
import cinder.volume
from cinder.volume import api as volume_api
from cinder.volume.flows.manager import create_volume as create_volume_manager
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as volutils
from cinder.volume import volume_types
QUOTAS = quota.QUOTAS
CONF = cfg.CONF
def create_snapshot(volume_id, size=1, metadata=None, ctxt=None,
**kwargs):
"""Create a snapshot object."""
metadata = metadata or {}
snap = objects.Snapshot(ctxt or context.get_admin_context())
snap.volume_size = size
snap.user_id = kwargs.get('user_id', fake.USER_ID)
snap.project_id = kwargs.get('project_id', fake.PROJECT_ID)
snap.volume_id = volume_id
snap.status = fields.SnapshotStatus.CREATING
if metadata is not None:
snap.metadata = metadata
snap.update(kwargs)
snap.create()
return snap
@ddt.ddt
class VolumeMigrationTestCase(base.BaseVolumeTestCase):
def setUp(self):
super(VolumeMigrationTestCase, self).setUp()
self._clear_patch = mock.patch('cinder.volume.utils.clear_volume',
autospec=True)
self._clear_patch.start()
self.expected_status = 'available'
def tearDown(self):
super(VolumeMigrationTestCase, self).tearDown()
self._clear_patch.stop()
def test_migrate_volume_driver(self):
"""Test volume migration done by driver."""
# Mock driver and rpc functions
self.mock_object(self.volume.driver, 'migrate_volume',
lambda x, y, z, new_type_id=None: (
True, {'user_id': fake.USER_ID}))
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host,
migration_status='migrating')
host_obj = {'host': 'newhost', 'capabilities': {}}
self.volume.migrate_volume(self.context, volume, host_obj, False)
# check volume properties
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('newhost', volume.host)
self.assertEqual('success', volume.migration_status)
def _fake_create_volume(self, ctxt, volume, req_spec, filters,
allow_reschedule=True):
return db.volume_update(ctxt, volume['id'],
{'status': self.expected_status})
def test_migrate_volume_error(self):
with mock.patch.object(self.volume.driver, 'migrate_volume') as \
mock_migrate,\
mock.patch.object(self.volume.driver, 'create_export') as \
mock_create_export:
# Exception case at self.driver.migrate_volume and create_export
mock_migrate.side_effect = processutils.ProcessExecutionError
mock_create_export.side_effect = processutils.ProcessExecutionError
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.assertRaises(processutils.ProcessExecutionError,
self.volume.migrate_volume,
self.context,
volume,
host_obj,
False)
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('error', volume.migration_status)
self.assertEqual('available', volume.status)
@mock.patch('cinder.compute.API')
@mock.patch('cinder.volume.manager.VolumeManager.'
'migrate_volume_completion')
@mock.patch('cinder.db.sqlalchemy.api.volume_get')
def test_migrate_volume_generic(self, volume_get,
migrate_volume_completion,
nova_api):
fake_db_new_volume = {'status': 'available', 'id': fake.VOLUME_ID}
fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume)
new_volume_obj = fake_volume.fake_volume_obj(self.context,
**fake_new_volume)
host_obj = {'host': 'newhost', 'capabilities': {}}
volume_get.return_value = fake_new_volume
update_server_volume = nova_api.return_value.update_server_volume
volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
with mock.patch.object(self.volume, '_copy_volume_data') as \
mock_copy_volume:
self.volume._migrate_volume_generic(self.context, volume,
host_obj, None)
mock_copy_volume.assert_called_with(self.context, volume,
new_volume_obj,
remote='dest')
migrate_volume_completion.assert_called_with(
self.context, volume, new_volume_obj, error=False)
self.assertFalse(update_server_volume.called)
@mock.patch('cinder.compute.API')
@mock.patch('cinder.volume.manager.VolumeManager.'
'migrate_volume_completion')
@mock.patch('cinder.db.sqlalchemy.api.volume_get')
def test_migrate_volume_generic_attached_volume(self, volume_get,
migrate_volume_completion,
nova_api):
attached_host = 'some-host'
fake_volume_id = fake.VOLUME_ID
fake_db_new_volume = {'status': 'available', 'id': fake_volume_id}
fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume)
host_obj = {'host': 'newhost', 'capabilities': {}}
fake_uuid = fakes.get_fake_uuid()
update_server_volume = nova_api.return_value.update_server_volume
volume_get.return_value = fake_new_volume
volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
volume_attach = tests_utils.attach_volume(
self.context, volume['id'], fake_uuid, attached_host, '/dev/vda')
self.assertIsNotNone(volume_attach['volume_attachment'][0]['id'])
self.assertEqual(
fake_uuid, volume_attach['volume_attachment'][0]['instance_uuid'])
self.assertEqual('in-use', volume_attach['status'])
self.volume._migrate_volume_generic(self.context, volume,
host_obj, None)
self.assertFalse(migrate_volume_completion.called)
update_server_volume.assert_called_with(self.context, fake_uuid,
volume['id'], fake_volume_id)
@mock.patch('cinder.objects.volume.Volume.save')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.create_volume')
@mock.patch('cinder.compute.API')
@mock.patch('cinder.volume.manager.VolumeManager.'
'migrate_volume_completion')
@mock.patch('cinder.db.sqlalchemy.api.volume_get')
def test_migrate_volume_generic_volume_from_snap(self, volume_get,
migrate_volume_completion,
nova_api, create_volume,
save):
def fake_create_volume(*args, **kwargs):
context, volume, request_spec, filter_properties = args
fake_db = mock.Mock()
task = create_volume_manager.ExtractVolumeSpecTask(fake_db)
specs = task.execute(context, volume, {})
self.assertEqual('raw', specs['type'])
def fake_copy_volume_data_with_chk_param(*args, **kwargs):
context, src, dest = args
self.assertEqual(src['snapshot_id'], dest['snapshot_id'])
fake_db_new_volume = {'status': 'available', 'id': fake.VOLUME_ID}
fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume)
host_obj = {'host': 'newhost', 'capabilities': {}}
volume_get.return_value = fake_new_volume
volume_from_snap = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
volume_from_snap['snapshot_id'] = fake.SNAPSHOT_ID
create_volume.side_effect = fake_create_volume
with mock.patch.object(self.volume, '_copy_volume_data') as \
mock_copy_volume:
mock_copy_volume.side_effect = fake_copy_volume_data_with_chk_param
self.volume._migrate_volume_generic(self.context, volume_from_snap,
host_obj, None)
@mock.patch('cinder.objects.volume.Volume.save')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.create_volume')
@mock.patch('cinder.compute.API')
@mock.patch('cinder.volume.manager.VolumeManager.'
'migrate_volume_completion')
@mock.patch('cinder.db.sqlalchemy.api.volume_get')
def test_migrate_volume_generic_for_clone(self, volume_get,
migrate_volume_completion,
nova_api, create_volume, save):
def fake_create_volume(*args, **kwargs):
context, volume, request_spec, filter_properties = args
fake_db = mock.Mock()
task = create_volume_manager.ExtractVolumeSpecTask(fake_db)
specs = task.execute(context, volume, {})
self.assertEqual('raw', specs['type'])
def fake_copy_volume_data_with_chk_param(*args, **kwargs):
context, src, dest = args
self.assertEqual(src['source_volid'], dest['source_volid'])
fake_db_new_volume = {'status': 'available', 'id': fake.VOLUME_ID}
fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume)
host_obj = {'host': 'newhost', 'capabilities': {}}
volume_get.return_value = fake_new_volume
clone = tests_utils.create_volume(self.context, size=1,
host=CONF.host)
clone['source_volid'] = fake.VOLUME2_ID
create_volume.side_effect = fake_create_volume
with mock.patch.object(self.volume, '_copy_volume_data') as \
mock_copy_volume:
mock_copy_volume.side_effect = fake_copy_volume_data_with_chk_param
self.volume._migrate_volume_generic(self.context, clone,
host_obj, None)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'update_migrated_volume')
@mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume')
@mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')
def test_migrate_volume_for_volume_generic(self, create_volume,
rpc_delete_volume,
update_migrated_volume):
fake_volume = tests_utils.create_volume(self.context, size=1,
previous_status='available',
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
with mock.patch.object(self.volume.driver, 'migrate_volume') as \
mock_migrate_volume,\
mock.patch.object(self.volume, '_copy_volume_data'),\
mock.patch.object(self.volume.driver, 'delete_volume') as \
delete_volume:
create_volume.side_effect = self._fake_create_volume
self.volume.migrate_volume(self.context, fake_volume, host_obj,
True)
volume = objects.Volume.get_by_id(context.get_admin_context(),
fake_volume.id)
self.assertEqual('newhost', volume.host)
self.assertEqual('success', volume.migration_status)
self.assertFalse(mock_migrate_volume.called)
self.assertFalse(delete_volume.called)
self.assertTrue(rpc_delete_volume.called)
self.assertTrue(update_migrated_volume.called)
def test_migrate_volume_generic_copy_error(self):
with mock.patch.object(self.volume.driver, 'migrate_volume'),\
mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')\
as mock_create_volume,\
mock.patch.object(self.volume, '_copy_volume_data') as \
mock_copy_volume,\
mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume'),\
mock.patch.object(self.volume, 'migrate_volume_completion'),\
mock.patch.object(self.volume.driver, 'create_export'):
# Exception case at migrate_volume_generic
# source_volume['migration_status'] is 'migrating'
mock_create_volume.side_effect = self._fake_create_volume
mock_copy_volume.side_effect = processutils.ProcessExecutionError
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.assertRaises(processutils.ProcessExecutionError,
self.volume.migrate_volume,
self.context,
volume,
host_obj,
True)
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('error', volume.migration_status)
self.assertEqual('available', volume.status)
@mock.patch('cinder.image.image_utils.qemu_img_info')
def test_migrate_volume_with_glance_metadata(self, mock_qemu_info):
volume = self._create_volume_from_image(clone_image_volume=True)
glance_metadata = volume.glance_metadata
# We imitate the behavior of rpcapi, by serializing and then
# deserializing the volume object we created earlier.
serializer = objects.base.CinderObjectSerializer()
serialized_volume = serializer.serialize_entity(self.context, volume)
volume = serializer.deserialize_entity(self.context, serialized_volume)
image_info = imageutils.QemuImgInfo()
image_info.virtual_size = '1073741824'
mock_qemu_info.return_value = image_info
host_obj = {'host': 'newhost', 'capabilities': {}}
with mock.patch.object(self.volume.driver,
'migrate_volume') as mock_migrate_volume:
mock_migrate_volume.side_effect = (
lambda x, y, z, new_type_id=None: (
True, {'user_id': fake.USER_ID}))
self.volume.migrate_volume(self.context, volume, host_obj,
False)
self.assertEqual('newhost', volume.host)
self.assertEqual('success', volume.migration_status)
self.assertEqual(glance_metadata, volume.glance_metadata)
@mock.patch('cinder.db.volume_update')
def test_update_migrated_volume(self, volume_update):
fake_host = 'fake_host'
fake_new_host = 'fake_new_host'
fake_update = {'_name_id': fake.VOLUME2_NAME_ID,
'provider_location': 'updated_location'}
fake_elevated = context.RequestContext(fake.USER_ID, self.project_id,
is_admin=True)
volume = tests_utils.create_volume(self.context, size=1,
status='available',
host=fake_host)
new_volume = tests_utils.create_volume(
self.context, size=1,
status='available',
provider_location='fake_provider_location',
_name_id=fake.VOLUME_NAME_ID,
host=fake_new_host)
new_volume._name_id = fake.VOLUME_NAME_ID
new_volume.provider_location = 'fake_provider_location'
fake_update_error = {'_name_id': new_volume._name_id,
'provider_location':
new_volume.provider_location}
expected_update = {'_name_id': volume._name_id,
'provider_location': volume.provider_location}
with mock.patch.object(self.volume.driver,
'update_migrated_volume') as migrate_update,\
mock.patch.object(self.context, 'elevated') as elevated:
migrate_update.return_value = fake_update
elevated.return_value = fake_elevated
self.volume.update_migrated_volume(self.context, volume,
new_volume, 'available')
volume_update.assert_has_calls((
mock.call(fake_elevated, new_volume.id, expected_update),
mock.call(fake_elevated, volume.id, fake_update)))
# Test the case for update_migrated_volume not implemented
# for the driver.
migrate_update.reset_mock()
volume_update.reset_mock()
# Reset the volume objects to their original value, since they
# were changed in the last call.
new_volume._name_id = fake.VOLUME_NAME_ID
new_volume.provider_location = 'fake_provider_location'
migrate_update.side_effect = NotImplementedError
self.volume.update_migrated_volume(self.context, volume,
new_volume, 'available')
volume_update.assert_has_calls((
mock.call(fake_elevated, new_volume.id, fake_update),
mock.call(fake_elevated, volume.id, fake_update_error)))
def test_migrate_volume_generic_create_volume_error(self):
self.expected_status = 'error'
with mock.patch.object(self.volume.driver, 'migrate_volume'), \
mock.patch.object(volume_rpcapi.VolumeAPI,
'create_volume') as mock_create_volume, \
mock.patch.object(self.volume, '_clean_temporary_volume') as \
clean_temporary_volume:
# Exception case at the creation of the new temporary volume
mock_create_volume.side_effect = self._fake_create_volume
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.assertRaises(exception.VolumeMigrationFailed,
self.volume.migrate_volume,
self.context,
volume,
host_obj,
True)
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('error', volume['migration_status'])
self.assertEqual('available', volume['status'])
self.assertTrue(clean_temporary_volume.called)
self.expected_status = 'available'
def test_migrate_volume_generic_timeout_error(self):
CONF.set_override("migration_create_volume_timeout_secs", 2)
with mock.patch.object(self.volume.driver, 'migrate_volume'), \
mock.patch.object(volume_rpcapi.VolumeAPI,
'create_volume') as mock_create_volume, \
mock.patch.object(self.volume, '_clean_temporary_volume') as \
clean_temporary_volume, \
mock.patch.object(time, 'sleep'):
# Exception case at the timeout of the volume creation
self.expected_status = 'creating'
mock_create_volume.side_effect = self._fake_create_volume
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.assertRaises(exception.VolumeMigrationFailed,
self.volume.migrate_volume,
self.context,
volume,
host_obj,
True)
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('error', volume['migration_status'])
self.assertEqual('available', volume['status'])
self.assertTrue(clean_temporary_volume.called)
self.expected_status = 'available'
def test_migrate_volume_generic_create_export_error(self):
with mock.patch.object(self.volume.driver, 'migrate_volume'),\
mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')\
as mock_create_volume,\
mock.patch.object(self.volume, '_copy_volume_data') as \
mock_copy_volume,\
mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume'),\
mock.patch.object(self.volume, 'migrate_volume_completion'),\
mock.patch.object(self.volume.driver, 'create_export') as \
mock_create_export:
# Exception case at create_export
mock_create_volume.side_effect = self._fake_create_volume
mock_copy_volume.side_effect = processutils.ProcessExecutionError
mock_create_export.side_effect = processutils.ProcessExecutionError
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.assertRaises(processutils.ProcessExecutionError,
self.volume.migrate_volume,
self.context,
volume,
host_obj,
True)
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('error', volume['migration_status'])
self.assertEqual('available', volume['status'])
def test_migrate_volume_generic_migrate_volume_completion_error(self):
def fake_migrate_volume_completion(ctxt, volume, new_volume,
error=False):
db.volume_update(ctxt, volume['id'],
{'migration_status': 'completing'})
raise processutils.ProcessExecutionError
with mock.patch.object(self.volume.driver, 'migrate_volume'),\
mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')\
as mock_create_volume,\
mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume'),\
mock.patch.object(self.volume, 'migrate_volume_completion')\
as mock_migrate_compl,\
mock.patch.object(self.volume.driver, 'create_export'), \
mock.patch.object(self.volume, '_attach_volume') \
as mock_attach, \
mock.patch.object(self.volume, '_detach_volume'), \
mock.patch.object(os_brick.initiator.connector,
'get_connector_properties') \
as mock_get_connector_properties, \
mock.patch.object(volutils, 'copy_volume') as mock_copy, \
mock.patch.object(volume_rpcapi.VolumeAPI,
'get_capabilities') \
as mock_get_capabilities:
# Exception case at delete_volume
# source_volume['migration_status'] is 'completing'
mock_create_volume.side_effect = self._fake_create_volume
mock_migrate_compl.side_effect = fake_migrate_volume_completion
mock_get_connector_properties.return_value = {}
mock_attach.side_effect = [{'device': {'path': 'bar'}},
{'device': {'path': 'foo'}}]
mock_get_capabilities.return_value = {'sparse_copy_volume': True}
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.assertRaises(processutils.ProcessExecutionError,
self.volume.migrate_volume,
self.context,
volume,
host_obj,
True)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEqual('error', volume['migration_status'])
self.assertEqual('available', volume['status'])
mock_copy.assert_called_once_with('foo', 'bar', 0, '1M',
sparse=True)
def fake_attach_volume(self, ctxt, volume, instance_uuid, host_name,
mountpoint, mode):
tests_utils.attach_volume(ctxt, volume.id,
instance_uuid, host_name,
'/dev/vda')
def _test_migrate_volume_completion(self, status='available',
instance_uuid=None, attached_host=None,
retyping=False,
previous_status='available'):
initial_status = retyping and 'retyping' or status
old_volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host,
status=initial_status,
migration_status='migrating',
previous_status=previous_status)
attachment = None
if status == 'in-use':
vol = tests_utils.attach_volume(self.context, old_volume.id,
instance_uuid, attached_host,
'/dev/vda')
self.assertEqual('in-use', vol['status'])
attachment = vol['volume_attachment'][0]
target_status = 'target:%s' % old_volume.id
new_host = CONF.host + 'new'
new_volume = tests_utils.create_volume(self.context, size=0,
host=new_host,
migration_status=target_status)
with mock.patch.object(self.volume, 'detach_volume') as \
mock_detach_volume,\
mock.patch.object(volume_rpcapi.VolumeAPI,
'delete_volume') as mock_delete_volume,\
mock.patch.object(volume_rpcapi.VolumeAPI,
'attach_volume') as mock_attach_volume,\
mock.patch.object(volume_rpcapi.VolumeAPI,
'update_migrated_volume'),\
mock.patch.object(self.volume.driver, 'attach_volume'):
mock_attach_volume.side_effect = self.fake_attach_volume
old_volume_host = old_volume.host
new_volume_host = new_volume.host
self.volume.migrate_volume_completion(self.context, old_volume,
new_volume)
after_new_volume = objects.Volume.get_by_id(self.context,
new_volume.id)
after_old_volume = objects.Volume.get_by_id(self.context,
old_volume.id)
if status == 'in-use':
mock_detach_volume.assert_called_with(self.context,
old_volume.id,
attachment['id'])
attachments = db.volume_attachment_get_all_by_instance_uuid(
self.context, instance_uuid)
mock_attach_volume.assert_called_once_with(
self.context,
old_volume,
attachment['instance_uuid'],
attachment['attached_host'],
attachment['mountpoint'],
'rw'
)
self.assertIsNotNone(attachments)
self.assertEqual(attached_host,
attachments[0]['attached_host'])
self.assertEqual(instance_uuid,
attachments[0]['instance_uuid'])
else:
self.assertFalse(mock_detach_volume.called)
self.assertTrue(mock_delete_volume.called)
# NOTE(sborkows): the migrate_volume_completion method alters
# old and new volume objects, so we need to check the equality
# between the former host value and the actual one.
self.assertEqual(old_volume_host, after_new_volume.host)
self.assertEqual(new_volume_host, after_old_volume.host)
def test_migrate_volume_completion_retype_available(self):
self._test_migrate_volume_completion('available', retyping=True)
def test_migrate_volume_completion_retype_in_use(self):
self._test_migrate_volume_completion(
'in-use',
'83c969d5-065e-4c9c-907d-5394bc2e98e2',
'some-host',
retyping=True,
previous_status='in-use')
def test_migrate_volume_completion_migrate_available(self):
self._test_migrate_volume_completion()
def test_migrate_volume_completion_migrate_in_use(self):
self._test_migrate_volume_completion(
'in-use',
'83c969d5-065e-4c9c-907d-5394bc2e98e2',
'some-host',
retyping=False,
previous_status='in-use')
@ddt.data(False, True)
def test_api_migrate_volume_completion_from_swap_with_no_migration(
self, swap_error):
# This test validates that Cinder properly finishes the swap volume
# status updates for the case that no migration has occurred
instance_uuid = '83c969d5-065e-4c9c-907d-5394bc2e98e2'
attached_host = 'attached-host'
orig_attached_vol = tests_utils.create_volume(self.context, size=0)
orig_attached_vol = tests_utils.attach_volume(
self.context, orig_attached_vol['id'], instance_uuid,
attached_host, '/dev/vda')
new_volume = tests_utils.create_volume(self.context, size=0)
@mock.patch.object(volume_rpcapi.VolumeAPI, 'detach_volume')
@mock.patch.object(volume_rpcapi.VolumeAPI, 'attach_volume')
def _run_migration_completion(rpc_attach_volume,
rpc_detach_volume):
attachment = orig_attached_vol['volume_attachment'][0]
attachment_id = attachment['id']
rpc_attach_volume.side_effect = self.fake_attach_volume
vol_id = volume_api.API().migrate_volume_completion(
self.context, orig_attached_vol, new_volume, swap_error)
if swap_error:
# When swap failed, we don't want to finish attachment
self.assertFalse(rpc_detach_volume.called)
self.assertFalse(rpc_attach_volume.called)
else:
# When no error, we should be finishing the attachment
rpc_detach_volume.assert_called_with(self.context,
orig_attached_vol,
attachment_id)
rpc_attach_volume.assert_called_with(
self.context, new_volume, attachment['instance_uuid'],
attachment['attached_host'], attachment['mountpoint'],
'rw')
self.assertEqual(new_volume['id'], vol_id)
_run_migration_completion()
@mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify')
def test_retype_setup_fail_volume_is_available(self, mock_notify):
"""Verify volume is still available if retype prepare failed."""
elevated = context.get_admin_context()
project_id = self.context.project_id
db.volume_type_create(elevated, {'name': 'old', 'extra_specs': {}})
old_vol_type = db.volume_type_get_by_name(elevated, 'old')
db.volume_type_create(elevated, {'name': 'new', 'extra_specs': {}})
new_vol_type = db.volume_type_get_by_name(elevated, 'new')
db.quota_create(elevated, project_id, 'volumes_new', 0)
volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host, status='available',
volume_type_id=old_vol_type['id'])
api = cinder.volume.api.API()
self.assertRaises(exception.VolumeLimitExceeded, api.retype,
self.context, volume, new_vol_type['id'])
volume = db.volume_get(elevated, volume.id)
mock_notify.assert_not_called()
self.assertEqual('available', volume['status'])
@mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify')
def _retype_volume_exec(self, driver, mock_notify,
snap=False, policy='on-demand',
migrate_exc=False, exc=None, diff_equal=False,
replica=False, reserve_vol_type_only=False,
encryption_changed=False,
replica_new=None):
elevated = context.get_admin_context()
project_id = self.context.project_id
if replica:
rep_status = 'enabled'
extra_specs = {'replication_enabled': '<is> True'}
else:
rep_status = 'disabled'
extra_specs = {}
if replica_new is None:
replica_new = replica
new_specs = {'replication_enabled': '<is> True'} if replica_new else {}
db.volume_type_create(elevated, {'name': 'old',
'extra_specs': extra_specs})
old_vol_type = db.volume_type_get_by_name(elevated, 'old')
db.volume_type_create(elevated, {'name': 'new',
'extra_specs': new_specs})
vol_type = db.volume_type_get_by_name(elevated, 'new')
db.quota_create(elevated, project_id, 'volumes_new', 10)
volume = tests_utils.create_volume(self.context, size=1,
host=CONF.host, status='retyping',
volume_type_id=old_vol_type['id'],
replication_status=rep_status)
volume.previous_status = 'available'
volume.save()
if snap:
create_snapshot(volume.id, size=volume.size,
user_id=self.user_context.user_id,
project_id=self.user_context.project_id,
ctxt=self.user_context)
if driver or diff_equal:
host_obj = {'host': CONF.host, 'capabilities': {}}
else:
host_obj = {'host': 'newhost', 'capabilities': {}}
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(self.context,
reserve_opts,
vol_type['id'])
if reserve_vol_type_only:
reserve_opts.pop('volumes')
reserve_opts.pop('gigabytes')
try:
usage = db.quota_usage_get(elevated, project_id, 'volumes')
total_volumes_in_use = usage.in_use
usage = db.quota_usage_get(elevated, project_id, 'gigabytes')
total_gigabytes_in_use = usage.in_use
except exception.QuotaUsageNotFound:
total_volumes_in_use = 0
total_gigabytes_in_use = 0
reservations = QUOTAS.reserve(self.context,
project_id=project_id,
**reserve_opts)
old_reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(self.context,
old_reserve_opts,
old_vol_type['id'])
old_reservations = QUOTAS.reserve(self.context,
project_id=project_id,
**old_reserve_opts)
with mock.patch.object(self.volume.driver, 'retype') as _retype,\
mock.patch.object(volume_types, 'volume_types_diff') as _diff,\
mock.patch.object(self.volume, 'migrate_volume') as _mig,\
mock.patch.object(db.sqlalchemy.api, 'volume_get') as _vget,\
mock.patch.object(context.RequestContext, 'elevated') as _ctx:
_vget.return_value = volume
_retype.return_value = driver
_ctx.return_value = self.context
returned_diff = {
'encryption': {},
'qos_specs': {},
'extra_specs': {},
}
if replica != replica_new:
returned_diff['extra_specs']['replication_enabled'] = (
extra_specs.get('replication_enabled'),
new_specs.get('replication_enabled'))
expected_replica_status = 'enabled' if replica_new else 'disabled'
if encryption_changed:
returned_diff['encryption'] = 'fake'
_diff.return_value = (returned_diff, diff_equal)
if migrate_exc:
_mig.side_effect = KeyError
else:
_mig.return_value = True
if not exc:
self.volume.retype(self.context, volume,
vol_type['id'], host_obj,
migration_policy=policy,
reservations=reservations,
old_reservations=old_reservations)
else:
self.assertRaises(exc, self.volume.retype,
self.context, volume,
vol_type['id'], host_obj,
migration_policy=policy,
reservations=reservations,
old_reservations=old_reservations)
if host_obj['host'] != CONF.host:
_retype.assert_not_called()
# get volume/quota properties
volume = objects.Volume.get_by_id(elevated, volume.id)
try:
usage = db.quota_usage_get(elevated, project_id, 'volumes_new')
volumes_in_use = usage.in_use
except exception.QuotaUsageNotFound:
volumes_in_use = 0
# Get new in_use after retype, it should not be changed.
if reserve_vol_type_only:
try:
usage = db.quota_usage_get(elevated, project_id, 'volumes')
new_total_volumes_in_use = usage.in_use
usage = db.quota_usage_get(elevated, project_id, 'gigabytes')
new_total_gigabytes_in_use = usage.in_use
except exception.QuotaUsageNotFound:
new_total_volumes_in_use = 0
new_total_gigabytes_in_use = 0
self.assertEqual(total_volumes_in_use, new_total_volumes_in_use)
self.assertEqual(total_gigabytes_in_use,
new_total_gigabytes_in_use)
# check properties
if driver or diff_equal:
self.assertEqual(vol_type['id'], volume.volume_type_id)
self.assertEqual('available', volume.status)
self.assertEqual(CONF.host, volume.host)
self.assertEqual(1, volumes_in_use)
self.assert_notify_called(mock_notify,
(['INFO', 'volume.retype'],))
elif not exc:
self.assertEqual(old_vol_type['id'], volume.volume_type_id)
self.assertEqual('retyping', volume.status)
self.assertEqual(CONF.host, volume.host)
self.assertEqual(1, volumes_in_use)
self.assert_notify_called(mock_notify,
(['INFO', 'volume.retype'],))
else:
self.assertEqual(old_vol_type['id'], volume.volume_type_id)
self.assertEqual('available', volume.status)
self.assertEqual(CONF.host, volume.host)
self.assertEqual(0, volumes_in_use)
mock_notify.assert_not_called()
if encryption_changed:
self.assertTrue(_mig.called)
self.assertEqual(expected_replica_status, volume.replication_status)
def test_retype_volume_driver_success(self):
self._retype_volume_exec(True)
@ddt.data((False, False), (False, True), (True, False), (True, True))
@ddt.unpack
def test_retype_volume_replica(self, replica, replica_new):
self._retype_volume_exec(True, replica=replica,
replica_new=replica_new)
def test_retype_volume_migration_bad_policy(self):
# Test volume retype that requires migration by not allowed
self._retype_volume_exec(False, policy='never',
exc=exception.VolumeMigrationFailed)
def test_retype_volume_migration_with_replica(self):
self._retype_volume_exec(False,
replica=True,
exc=exception.InvalidVolume)
def test_retype_volume_migration_with_snaps(self):
self._retype_volume_exec(False, snap=True, exc=exception.InvalidVolume)
def test_retype_volume_migration_failed(self):
self._retype_volume_exec(False, migrate_exc=True, exc=KeyError)
def test_retype_volume_migration_success(self):
self._retype_volume_exec(False, migrate_exc=False, exc=None)
def test_retype_volume_migration_equal_types(self):
self._retype_volume_exec(False, diff_equal=True)
def test_retype_volume_with_type_only(self):
self._retype_volume_exec(True, reserve_vol_type_only=True)
def test_retype_volume_migration_encryption(self):
self._retype_volume_exec(False, encryption_changed=True)
def test_migrate_driver_not_initialized(self):
volume = tests_utils.create_volume(self.context, size=0,
host=CONF.host)
host_obj = {'host': 'newhost', 'capabilities': {}}
self.volume.driver._initialized = False
self.assertRaises(exception.DriverNotInitialized,
self.volume.migrate_volume,
self.context, volume, host_obj, True)
volume = objects.Volume.get_by_id(context.get_admin_context(),
volume.id)
self.assertEqual('error', volume.migration_status)
# lets cleanup the mess.
self.volume.driver._initialized = True
self.volume.delete_volume(self.context, volume)
def test_delete_source_volume_in_migration(self):
"""Test deleting a source volume that is in migration."""
self._test_delete_volume_in_migration('migrating')
def test_delete_destination_volume_in_migration(self):
"""Test deleting a destination volume that is in migration."""
self._test_delete_volume_in_migration('target:vol-id')
def _test_delete_volume_in_migration(self, migration_status):
"""Test deleting a volume that is in migration."""
volume = tests_utils.create_volume(self.context, host=CONF.host,
migration_status=migration_status)
self.volume.delete_volume(self.context, volume=volume)
# The volume is successfully removed during the volume delete
# and won't exist in the database any more.
self.assertRaises(exception.VolumeNotFound, volume.refresh)
|
|
"""The tests for the hassio component."""
import asyncio
import os
from unittest.mock import patch, Mock
import pytest
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.setup import async_setup_component
from homeassistant.components.hassio import STORAGE_KEY
from homeassistant.components import frontend
from tests.common import mock_coro
MOCK_ENVIRON = {
'HASSIO': '127.0.0.1',
'HASSIO_TOKEN': 'abcdefgh',
}
@pytest.fixture(autouse=True)
def mock_all(aioclient_mock):
"""Mock all setup requests."""
aioclient_mock.post(
"http://127.0.0.1/homeassistant/options", json={'result': 'ok'})
aioclient_mock.get(
"http://127.0.0.1/supervisor/ping", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/supervisor/options", json={'result': 'ok'})
aioclient_mock.get(
"http://127.0.0.1/homeassistant/info", json={
'result': 'ok', 'data': {'last_version': '10.0'}})
aioclient_mock.get(
"http://127.0.0.1/ingress/panels", json={
'result': 'ok', 'data': {'panels': {}}})
@asyncio.coroutine
def test_setup_api_ping(hass, aioclient_mock):
"""Test setup with API ping."""
with patch.dict(os.environ, MOCK_ENVIRON):
result = yield from async_setup_component(hass, 'hassio', {})
assert result
assert aioclient_mock.call_count == 4
assert hass.components.hassio.get_homeassistant_version() == "10.0"
assert hass.components.hassio.is_hassio()
async def test_setup_api_panel(hass, aioclient_mock):
"""Test setup with API ping."""
assert await async_setup_component(hass, 'frontend', {})
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, 'hassio', {})
assert result
panels = hass.data[frontend.DATA_PANELS]
assert panels.get('hassio').to_response() == {
'component_name': 'custom',
'icon': 'hass:home-assistant',
'title': 'Hass.io',
'url_path': 'hassio',
'require_admin': True,
'config': {'_panel_custom': {'embed_iframe': True,
'js_url': '/api/hassio/app/entrypoint.js',
'name': 'hassio-main',
'trust_external': False}},
}
@asyncio.coroutine
def test_setup_api_push_api_data(hass, aioclient_mock):
"""Test setup with API push."""
with patch.dict(os.environ, MOCK_ENVIRON):
result = yield from async_setup_component(hass, 'hassio', {
'http': {
'server_port': 9999
},
'hassio': {}
})
assert result
assert aioclient_mock.call_count == 4
assert not aioclient_mock.mock_calls[1][2]['ssl']
assert aioclient_mock.mock_calls[1][2]['port'] == 9999
assert aioclient_mock.mock_calls[1][2]['watchdog']
@asyncio.coroutine
def test_setup_api_push_api_data_server_host(hass, aioclient_mock):
"""Test setup with API push with active server host."""
with patch.dict(os.environ, MOCK_ENVIRON):
result = yield from async_setup_component(hass, 'hassio', {
'http': {
'server_port': 9999,
'server_host': "127.0.0.1"
},
'hassio': {}
})
assert result
assert aioclient_mock.call_count == 4
assert not aioclient_mock.mock_calls[1][2]['ssl']
assert aioclient_mock.mock_calls[1][2]['port'] == 9999
assert not aioclient_mock.mock_calls[1][2]['watchdog']
async def test_setup_api_push_api_data_default(hass, aioclient_mock,
hass_storage):
"""Test setup with API push default data."""
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, 'hassio', {
'http': {},
'hassio': {}
})
assert result
assert aioclient_mock.call_count == 4
assert not aioclient_mock.mock_calls[1][2]['ssl']
assert aioclient_mock.mock_calls[1][2]['port'] == 8123
refresh_token = aioclient_mock.mock_calls[1][2]['refresh_token']
hassio_user = await hass.auth.async_get_user(
hass_storage[STORAGE_KEY]['data']['hassio_user']
)
assert hassio_user is not None
assert hassio_user.system_generated
assert len(hassio_user.groups) == 1
assert hassio_user.groups[0].id == GROUP_ID_ADMIN
for token in hassio_user.refresh_tokens.values():
if token.token == refresh_token:
break
else:
assert False, 'refresh token not found'
async def test_setup_adds_admin_group_to_user(hass, aioclient_mock,
hass_storage):
"""Test setup with API push default data."""
# Create user without admin
user = await hass.auth.async_create_system_user('Hass.io')
assert not user.is_admin
await hass.auth.async_create_refresh_token(user)
hass_storage[STORAGE_KEY] = {
'data': {'hassio_user': user.id},
'key': STORAGE_KEY,
'version': 1
}
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, 'hassio', {
'http': {},
'hassio': {}
})
assert result
assert user.is_admin
async def test_setup_api_existing_hassio_user(hass, aioclient_mock,
hass_storage):
"""Test setup with API push default data."""
user = await hass.auth.async_create_system_user('Hass.io test')
token = await hass.auth.async_create_refresh_token(user)
hass_storage[STORAGE_KEY] = {
'version': 1,
'data': {
'hassio_user': user.id
}
}
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, 'hassio', {
'http': {},
'hassio': {}
})
assert result
assert aioclient_mock.call_count == 4
assert not aioclient_mock.mock_calls[1][2]['ssl']
assert aioclient_mock.mock_calls[1][2]['port'] == 8123
assert aioclient_mock.mock_calls[1][2]['refresh_token'] == token.token
@asyncio.coroutine
def test_setup_core_push_timezone(hass, aioclient_mock):
"""Test setup with API push default data."""
with patch.dict(os.environ, MOCK_ENVIRON):
result = yield from async_setup_component(hass, 'hassio', {
'hassio': {},
'homeassistant': {
'time_zone': 'testzone',
},
})
assert result
assert aioclient_mock.call_count == 5
assert aioclient_mock.mock_calls[2][2]['timezone'] == "testzone"
@asyncio.coroutine
def test_setup_hassio_no_additional_data(hass, aioclient_mock):
"""Test setup with API push default data."""
with patch.dict(os.environ, MOCK_ENVIRON), \
patch.dict(os.environ, {'HASSIO_TOKEN': "123456"}):
result = yield from async_setup_component(hass, 'hassio', {
'hassio': {},
})
assert result
assert aioclient_mock.call_count == 4
assert aioclient_mock.mock_calls[-1][3]['X-Hassio-Key'] == "123456"
@asyncio.coroutine
def test_fail_setup_without_environ_var(hass):
"""Fail setup if no environ variable set."""
with patch.dict(os.environ, {}, clear=True):
result = yield from async_setup_component(hass, 'hassio', {})
assert not result
@asyncio.coroutine
def test_warn_when_cannot_connect(hass, caplog):
"""Fail warn when we cannot connect."""
with patch.dict(os.environ, MOCK_ENVIRON), \
patch('homeassistant.components.hassio.HassIO.is_connected',
Mock(return_value=mock_coro(None))):
result = yield from async_setup_component(hass, 'hassio', {})
assert result
assert hass.components.hassio.is_hassio()
assert "Not connected with Hass.io / system to busy!" in caplog.text
@asyncio.coroutine
def test_service_register(hassio_env, hass):
"""Check if service will be setup."""
assert (yield from async_setup_component(hass, 'hassio', {}))
assert hass.services.has_service('hassio', 'addon_start')
assert hass.services.has_service('hassio', 'addon_stop')
assert hass.services.has_service('hassio', 'addon_restart')
assert hass.services.has_service('hassio', 'addon_stdin')
assert hass.services.has_service('hassio', 'host_shutdown')
assert hass.services.has_service('hassio', 'host_reboot')
assert hass.services.has_service('hassio', 'host_reboot')
assert hass.services.has_service('hassio', 'snapshot_full')
assert hass.services.has_service('hassio', 'snapshot_partial')
assert hass.services.has_service('hassio', 'restore_full')
assert hass.services.has_service('hassio', 'restore_partial')
@asyncio.coroutine
def test_service_calls(hassio_env, hass, aioclient_mock):
"""Call service and check the API calls behind that."""
assert (yield from async_setup_component(hass, 'hassio', {}))
aioclient_mock.post(
"http://127.0.0.1/addons/test/start", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/addons/test/stop", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/addons/test/restart", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/addons/test/stdin", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/host/shutdown", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/host/reboot", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/snapshots/new/full", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/snapshots/new/partial", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/snapshots/test/restore/full", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/snapshots/test/restore/partial",
json={'result': 'ok'})
yield from hass.services.async_call(
'hassio', 'addon_start', {'addon': 'test'})
yield from hass.services.async_call(
'hassio', 'addon_stop', {'addon': 'test'})
yield from hass.services.async_call(
'hassio', 'addon_restart', {'addon': 'test'})
yield from hass.services.async_call(
'hassio', 'addon_stdin', {'addon': 'test', 'input': 'test'})
yield from hass.async_block_till_done()
assert aioclient_mock.call_count == 6
assert aioclient_mock.mock_calls[-1][2] == 'test'
yield from hass.services.async_call('hassio', 'host_shutdown', {})
yield from hass.services.async_call('hassio', 'host_reboot', {})
yield from hass.async_block_till_done()
assert aioclient_mock.call_count == 8
yield from hass.services.async_call('hassio', 'snapshot_full', {})
yield from hass.services.async_call('hassio', 'snapshot_partial', {
'addons': ['test'],
'folders': ['ssl'],
'password': "123456",
})
yield from hass.async_block_till_done()
assert aioclient_mock.call_count == 10
assert aioclient_mock.mock_calls[-1][2] == {
'addons': ['test'], 'folders': ['ssl'], 'password': "123456"}
yield from hass.services.async_call('hassio', 'restore_full', {
'snapshot': 'test',
})
yield from hass.services.async_call('hassio', 'restore_partial', {
'snapshot': 'test',
'homeassistant': False,
'addons': ['test'],
'folders': ['ssl'],
'password': "123456",
})
yield from hass.async_block_till_done()
assert aioclient_mock.call_count == 12
assert aioclient_mock.mock_calls[-1][2] == {
'addons': ['test'], 'folders': ['ssl'], 'homeassistant': False,
'password': "123456"
}
@asyncio.coroutine
def test_service_calls_core(hassio_env, hass, aioclient_mock):
"""Call core service and check the API calls behind that."""
assert (yield from async_setup_component(hass, 'hassio', {}))
aioclient_mock.post(
"http://127.0.0.1/homeassistant/restart", json={'result': 'ok'})
aioclient_mock.post(
"http://127.0.0.1/homeassistant/stop", json={'result': 'ok'})
yield from hass.services.async_call('homeassistant', 'stop')
yield from hass.async_block_till_done()
assert aioclient_mock.call_count == 3
yield from hass.services.async_call('homeassistant', 'check_config')
yield from hass.async_block_till_done()
assert aioclient_mock.call_count == 3
with patch(
'homeassistant.config.async_check_ha_config_file',
return_value=mock_coro()
) as mock_check_config:
yield from hass.services.async_call('homeassistant', 'restart')
yield from hass.async_block_till_done()
assert mock_check_config.called
assert aioclient_mock.call_count == 4
|
|
# -*- coding: utf-8 -*-
"""
Tests parsers ability to read and parse non-local files
and hence require a network connection to be read.
"""
import logging
import pytest
import numpy as np
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas import DataFrame
from pandas.io.parsers import read_csv
from pandas.compat import BytesIO, StringIO
@pytest.mark.network
@pytest.mark.parametrize(
"compress_type, extension", [
('gzip', '.gz'), ('bz2', '.bz2'), ('zip', '.zip'),
pytest.param('xz', '.xz', marks=td.skip_if_no_lzma)
]
)
@pytest.mark.parametrize('mode', ['explicit', 'infer'])
@pytest.mark.parametrize('engine', ['python', 'c'])
def test_compressed_urls(salaries_table, compress_type, extension, mode,
engine):
check_compressed_urls(salaries_table, compress_type, extension, mode,
engine)
@tm.network
def check_compressed_urls(salaries_table, compression, extension, mode,
engine):
# test reading compressed urls with various engines and
# extension inference
base_url = ('https://github.com/pandas-dev/pandas/raw/master/'
'pandas/tests/io/parser/data/salaries.csv')
url = base_url + extension
if mode != 'explicit':
compression = mode
url_table = read_csv(url, sep='\t', compression=compression, engine=engine)
tm.assert_frame_equal(url_table, salaries_table)
@pytest.fixture
def tips_df(datapath):
"""DataFrame with the tips dataset."""
return read_csv(datapath('io', 'parser', 'data', 'tips.csv'))
@pytest.mark.usefixtures("s3_resource")
@td.skip_if_not_us_locale()
class TestS3(object):
def test_parse_public_s3_bucket(self, tips_df):
pytest.importorskip('s3fs')
# more of an integration test due to the not-public contents portion
# can probably mock this though.
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' +
ext, compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
# Read public file from bucket with not-public contents
df = read_csv('s3://cant_get_it/tips.csv')
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
def test_parse_public_s3n_bucket(self, tips_df):
# Read from AWS s3 as "s3n" URL
df = read_csv('s3n://pandas-test/tips.csv', nrows=10)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_parse_public_s3a_bucket(self, tips_df):
# Read from AWS s3 as "s3a" URL
df = read_csv('s3a://pandas-test/tips.csv', nrows=10)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_parse_public_s3_bucket_nrows(self, tips_df):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' +
ext, nrows=10, compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_parse_public_s3_bucket_chunked(self, tips_df):
# Read with a chunksize
chunksize = 5
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df_reader = read_csv('s3://pandas-test/tips.csv' + ext,
chunksize=chunksize, compression=comp)
assert df_reader.chunksize == chunksize
for i_chunk in [0, 1, 2]:
# Read a couple of chunks and make sure we see them
# properly.
df = df_reader.get_chunk()
assert isinstance(df, DataFrame)
assert not df.empty
true_df = tips_df.iloc[
chunksize * i_chunk: chunksize * (i_chunk + 1)]
tm.assert_frame_equal(true_df, df)
def test_parse_public_s3_bucket_chunked_python(self, tips_df):
# Read with a chunksize using the Python parser
chunksize = 5
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df_reader = read_csv('s3://pandas-test/tips.csv' + ext,
chunksize=chunksize, compression=comp,
engine='python')
assert df_reader.chunksize == chunksize
for i_chunk in [0, 1, 2]:
# Read a couple of chunks and make sure we see them properly.
df = df_reader.get_chunk()
assert isinstance(df, DataFrame)
assert not df.empty
true_df = tips_df.iloc[
chunksize * i_chunk: chunksize * (i_chunk + 1)]
tm.assert_frame_equal(true_df, df)
def test_parse_public_s3_bucket_python(self, tips_df):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python',
compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
def test_infer_s3_compression(self, tips_df):
for ext in ['', '.gz', '.bz2']:
df = read_csv('s3://pandas-test/tips.csv' + ext,
engine='python', compression='infer')
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(df, tips_df)
def test_parse_public_s3_bucket_nrows_python(self, tips_df):
for ext, comp in [('', None), ('.gz', 'gzip'), ('.bz2', 'bz2')]:
df = read_csv('s3://pandas-test/tips.csv' + ext, engine='python',
nrows=10, compression=comp)
assert isinstance(df, DataFrame)
assert not df.empty
tm.assert_frame_equal(tips_df.iloc[:10], df)
def test_s3_fails(self):
with pytest.raises(IOError):
read_csv('s3://nyqpug/asdf.csv')
# Receive a permission error when trying to read a private bucket.
# It's irrelevant here that this isn't actually a table.
with pytest.raises(IOError):
read_csv('s3://cant_get_it/')
def test_read_csv_handles_boto_s3_object(self,
s3_resource,
tips_file):
# see gh-16135
s3_object = s3_resource.meta.client.get_object(
Bucket='pandas-test',
Key='tips.csv')
result = read_csv(BytesIO(s3_object["Body"].read()), encoding='utf8')
assert isinstance(result, DataFrame)
assert not result.empty
expected = read_csv(tips_file)
tm.assert_frame_equal(result, expected)
def test_read_csv_chunked_download(self, s3_resource, caplog):
# 8 MB, S3FS usees 5MB chunks
df = DataFrame(np.random.randn(100000, 4), columns=list('abcd'))
buf = BytesIO()
str_buf = StringIO()
df.to_csv(str_buf)
buf = BytesIO(str_buf.getvalue().encode('utf-8'))
s3_resource.Bucket("pandas-test").put_object(
Key="large-file.csv",
Body=buf)
with caplog.at_level(logging.DEBUG, logger='s3fs.core'):
read_csv("s3://pandas-test/large-file.csv", nrows=5)
# log of fetch_range (start, stop)
assert ((0, 5505024) in {x.args[-2:] for x in caplog.records})
|
|
"""
The MIT License (MIT)
Copyright (c) 2014 Melissa Gymrek <mgymrek@mit.edu>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from itertools import chain
import hashlib
import pysam
import pyfaidx
import random
import sys
from .constants import ENDCHAR, GAPCHAR, DELCHAR
from .constants import BAM_CMATCH, BAM_CINS, BAM_CDEL, BAM_CREF_SKIP,\
BAM_CSOFT_CLIP, BAM_CHARD_CLIP, BAM_CPAD, BAM_CEQUAL, BAM_CDIFF
def CheckBam(bamfile):
try:
br = pysam.Samfile(bamfile, "rb")
return True
except: return False
def GetSamplesFromBamFiles(bamfiles):
""" Return dictionary of sample -> list of bam files """
samplesToBam = {}
for bam in bamfiles:
try:
br = pysam.Samfile(bam, "rb")
except:
sys.stderr.write("ERROR: Could not open %s. Is this a valid bam file?\n"%bam)
continue
for r in br.header.get("RG", []):
ident = r["ID"]
sample = r.get("SM", ident)
if bam not in samplesToBam.get(sample, []):
samplesToBam[sample] = samplesToBam.get(sample, []) + [bam]
return samplesToBam
def GetDefaultLocation(bamfiles):
"""
Return default location to jump to if no location given.
Look at the first read we see and go there.
If no reads aligned, return 'error'
Args:
bamfiles (list): A list with paths to bamfiles
Returns:
position (string): A string with chromosome and position
"""
default_chrom = None
default_pos = None
aligned = False
position = 'error'
for bam in bamfiles:
try:
br = pysam.Samfile(bam, "rb")
except:
sys.stderr.write("ERROR: Could not open %s. Is this a valid bam file?\n"%bam)
continue
# Peak at the first hundred reads
read_count = 0
while not (aligned or read_count > 100):
try:
aligned_read = br.next()
except StopIteration:
continue
if not aligned_read.is_unmapped:
default_chrom = br.getrname(aligned_read.tid)
default_pos = str(aligned_read.pos)
aligned = True
position = ':'.join([default_chrom, default_pos])
break
else:
read_count += 1
return position
def HashSample(sample):
"""
Return sample hash
"""
return hashlib.sha256(sample.encode()).hexdigest()
def ParseCigar(cigar, nucs):
"""
Return list of strings, each item corresponding to a single reference position
"""
rep = []
currentpos = 0
wasinsert = False
for c in cigar:
if c[0] in [BAM_CMATCH, BAM_CEQUAL, BAM_CDIFF]: # match (M, X, =)
for i in range(c[1]):
if wasinsert:
rep[-1] = rep[-1] + nucs[currentpos]
else: rep.append(nucs[currentpos])
wasinsert = False
currentpos += 1
elif c[0] == BAM_CINS: # put insertion in next base position (I)
if wasinsert:
rep[-1] = rep[-1] + nucs[currentpos:currentpos+c[1]]
else:
rep.append(nucs[currentpos:currentpos+c[1]])
currentpos = currentpos+c[1]
wasinsert = True
elif c[0] in [BAM_CDEL, BAM_CREF_SKIP]: # deletion (D) or skipped region from the reference (N)
for i in range(c[1]):
if wasinsert:
rep[-1] = rep[-1] + DELCHAR
else: rep.append(DELCHAR)
wasinsert = False
elif c[0] in [BAM_CSOFT_CLIP, BAM_CHARD_CLIP]: # hard clipping or soft clipping
pass # do nothing
elif c[0] == 6: # padding (silent deletion from padded reference) (P)
if wasinsert:
rep[-1] = rep[-1] + DELCHAR*c[1]
else: rep.append(DELCHAR*c[1])
wasinsert = True
else:
sys.stderr.write("ERROR: Invalid CIGAR operation (%s) in read %s \n"%(c[0], read.qname))
return rep
def AddInsertionLocations(all_locs, new_locs):
for item in new_locs:
pos = item[0]
size = item[1]
all_locs[pos] = max(all_locs.get(pos, 0), size)
return all_locs
class AlignmentGrid(object):
"""
Class for storing a grid of alignments
"""
def __init__(self, _bamreaders, _read_groups, _ref, _chrom, _pos, _samples=[], _settings={}):
self.bamreaders = _bamreaders
self.read_groups = _read_groups
self.ref = _ref
# Keep track of shortened chromosome names fasta entry has longer chrom string
# e.g. "1 dna:chromosome" -> "1"
if self.ref:
self.refkeys = dict([(key.split()[0], key) for key in self.ref.keys()])
else: self.refkeys = {}
self.chrom = _chrom
self.startpos = _pos
self.settings = _settings
self.pos = self.startpos-int(self.settings["LOADCHAR"]*0.5)
if self.pos < 0: self.pos = 0
self.usesamples = _samples
self.samples = list(set(
chain.from_iterable(rg.itervalues() for rg in _read_groups)))
for item in _samples:
if item not in self.samples: sys.stderr.write("WARNING: %s not in BAM\n"%item)
if len(_samples) > 0:
self.samples = [item for item in _samples if item in self.samples]
self.grid_by_sample = dict([(sample, {}) for sample in self.samples])
self.alnkeys_by_sample = dict([(sample, []) for sample in self.samples])
self.LoadGrid()
def GetSamples(self):
"""
Return list of samples
"""
return self.samples
def GetSampleHashes(self):
"""
Return list of sample hashes
"""
return map(HashSample, self.samples)
def LoadGrid(self):
"""
Load grid of alignments with buffer around start pos
"""
# Get reference
if self.ref is None or self.refkeys.get(self.chrom,"") not in self.ref.keys():
reference = ["N"]*self.settings["LOADCHAR"]
else:
refchrom = self.refkeys[self.chrom]
chromlen = len(self.ref[refchrom])
if chromlen <= self.pos:
return
elif chromlen <= self.pos+self.settings["LOADCHAR"]:
reference = self.ref[refchrom][self.pos:]
else: reference = self.ref[refchrom][self.pos:self.pos+self.settings["LOADCHAR"]]
reference = [reference[i] for i in range(len(reference))]
griddict = {"position": range(self.pos, self.pos+len(reference)), "reference": reference}
# Get reads
region=str("%s:%s-%s"%(self.chrom, max(1, int(self.pos)), int(self.pos+self.settings["LOADCHAR"])))
aligned_reads = []
for bi, br in enumerate(self.bamreaders):
try:
reads = list(br.fetch(region=region))
pileup = br.pileup(region=region)
maxcov = 0
for pcol in br.pileup(region=region):
if pcol.n > maxcov: maxcov = pcol.n
if maxcov > self.settings["DOWNSAMPLE"]:
reads = [item for item in reads if random.random() < self.settings["DOWNSAMPLE"]/float(maxcov)]
aligned_reads.extend((bi, read) for read in reads)
except: pass
readindex = 0
read_properties = []
insertion_locations = {}
maxreadlength = 0
for bamindex, read in aligned_reads:
# get reference position
position = read.pos
# get nucleotides
nucs = read.query
# get CIGAR
cigar = read.cigar
# get strand
strand = not read.is_reverse
if not strand: nucs = nucs.lower()
# get sample
rg = self.read_groups[bamindex].get(
dict(read.tags).get("RG",""),"")
if rg not in self.usesamples: continue
# get representation
if cigar is None:
sys.stderr.write("WARNING: read %s has no CIGAR string. It will not be displayed.\n"%read.qname)
continue
read_properties.append({"pos": position,"sample":rg})
rep = ParseCigar(cigar, nucs)
readlen = len(rep)
if readlen > maxreadlength: maxreadlength = readlen
# Fix boundaries
ins_locs = [(i, len(rep[i])) for i in range(len(rep)) if len(rep[i])>1]
if position < self.pos:
rep = rep[self.pos-position:]
ins_locs = [(item[0] - (self.pos-position), item[1]) for item in ins_locs]
else:
rep = [ENDCHAR]*(position-self.pos)+rep
ins_locs = [(item[0]+(position-self.pos), item[1]) for item in ins_locs]
if len(rep) > len(reference):
rep = rep[0:len(reference)]
ins_locs = set([item for item in ins_locs if item[0] >= 0 and item[0] < len(reference)])
insertion_locations = AddInsertionLocations(insertion_locations, ins_locs)
rep = rep + [ENDCHAR]*(len(reference)-len(rep))
griddict["aln%s"%readindex] = rep
readindex += 1
# Fix insertions
alnkeys = [item for item in griddict.keys() if item != "position"]
for i in insertion_locations:
maxchars = insertion_locations[i]
for ak in alnkeys:
if i != 0: prev = griddict[ak][i-1]
else: prev = ENDCHAR
val = griddict[ak][i]
if len(val) < maxchars:
if ENDCHAR in val or prev[-1] == ENDCHAR: c = ENDCHAR
else: c = GAPCHAR
griddict[ak][i] = c*(maxchars-len(val))+val
# Split by sample
for sample in self.samples:
# if self.settings.get("SORT","bypos") == "bypos": # plan on adding other sort methods later
# Get keys in sorted order
alnkeys = [(read_properties[i]["pos"], "aln%s"%i) for i in range(readindex) if read_properties[i]["sample"] == sample]
alnkeys.sort()
alnkeys = [item[1] for item in alnkeys]
# Get columns we need for the grid
sample_dict = dict([(x, griddict[x]) for x in alnkeys+["position","reference"]])
# Read stacking
sample_dict_collapsed = self.CollapseGridByPosition(sample_dict, alnkeys, maxreadlength=maxreadlength)
self.alnkeys_by_sample[sample] = [item for item in alnkeys if item in sample_dict_collapsed.keys()]
self.grid_by_sample[sample] = sample_dict_collapsed
def MergeRows(self, row1, row2, start, end):
""" merge row2 into row1. row2 spans start-end """
return row1[0:start] + row2[start:]
def CollapseGridByPosition(self, griddict, alncols, maxreadlength=10000):
"""
If more than one read can fit on the same line, put it there
"""
cols_to_delete = set()
col_to_ends = {"dummy":float("inf")}
minend = col_to_ends["dummy"]
prevstart = 0
for col in alncols:
track = griddict[col]
start = prevstart
while start<len(track) and (track[start][0] == ENDCHAR or track[start][0] == GAPCHAR):
start = start + 1
if start >= len(track):
start = 0
end = 0
else:
x = [i for i in range(start, min(start+maxreadlength, len(track))) if track[i][0] != ENDCHAR and track[i][0] != GAPCHAR]
end = x[-1]
if start > minend:
# Find the first column we can add it to
for c in alncols:
if col_to_ends.get(c, float("inf")) < start:
mincol = c
break
# Reset that column with merged alignments
griddict[mincol] = self.MergeRows(griddict[mincol], griddict[col], start, end)
# Set that column for deletion and clear it in case we use it later
griddict[col][start:end+1] = [ENDCHAR*len(griddict[col][i]) for i in range(start, end+1)]
cols_to_delete.add(col)
# Reset end
t = griddict[mincol]
col_to_ends[mincol] = end
minend = min(col_to_ends.values())
col_to_ends[col] = 0
# Make sure we're not deleting mincol
cols_to_delete.discard(mincol)
else: col_to_ends[col] = end
if end < minend: minend = end
prevstart = start
for col in cols_to_delete: del griddict[col]
return griddict
def GetReferenceTrack(self, _pos):
"""
Return string for the reference track
"""
if len(self.grid_by_sample.keys()) == 0: return "N"*self.settings["LOADCHAR"]
refseries = self.grid_by_sample.values()[0]["reference"]
reference = ""
for i in range(len(refseries)):
reference = reference + refseries[i]
return reference.upper()
def GetPositions(self, _pos):
positions = []
if len(self.grid_by_sample.keys()) == 0: return range(self.pos, self.pos+self.settings["LOADCHAR"])
refseries = self.grid_by_sample.values()[0]["reference"]
for i in range(len(refseries)):
positions.extend([self.pos+i]*len(refseries[i]))
return positions
def GetAlignmentTrack(self, _pos):
"""
Return list of strings for the alignment track
"""
alignments_by_sample = {}
for sample in self.grid_by_sample:
grid = self.grid_by_sample[sample]
alignments = []
for col in self.alnkeys_by_sample[sample]:
alignments.append("".join(grid[col]))
alignments_by_sample[HashSample(sample)] = alignments
return alignments_by_sample
def __str__(self):
return "[AlignmentGrid: %s:%s]"%(self.chrom, self.pos)
class BamView(object):
"""
Class for storing view of Bam Alignments
"""
def __init__(self, _bamfiles, _reffile):
self.bamfiles = _bamfiles
self.bamreaders = []
for bam in self.bamfiles:
try:
br = pysam.Samfile(bam, "rb")
self.bamreaders.append(br)
except:
sys.stderr.write("ERROR: could not open %s. Is this a valid bam file?\n"%bam)
if _reffile != "":
try:
self.reference = pyfaidx.Fasta(_reffile, as_raw=True)
except:
self.reference = None
else: self.reference = None
self.alignment_grid = None
self.read_groups = self.LoadRGDictionary()
def GetSamples(self):
"""
Get list of samples
"""
return self.alignment_grid.GetSamples()
def GetSampleHashes(self):
"""
Get list of sample hashes
"""
return self.alignment_grid.GetSampleHashes()
def LoadRGDictionary(self):
return [
dict([(r["ID"], r.get("SM", r["ID"])) for r in br.header.get("RG", [])])
for br in self.bamreaders]
def GetPositions(self, start_pos):
"""
Get vector of positions for columns
"""
return self.alignment_grid.GetPositions(start_pos)
def GetIndex(self, coord):
"""
Get index into positions vector for a given coordinate
Return -1 if coord not in range
"""
positions = self.GetPositions(0)
if coord < positions[0] or coord > positions[-1]: return -1
return positions.index(coord)
def LoadAlignmentGrid(self, _chrom, _pos, _samples=[], _settings={}):
"""
Load an alignment grid for a view at a specific chr:pos
"""
self.alignment_grid = AlignmentGrid(self.bamreaders, self.read_groups, self.reference, \
_chrom, _pos, _samples=_samples, _settings=_settings)
def GetReferenceTrack(self, start_pos):
"""
Return string for the reference track
"""
return self.alignment_grid.GetReferenceTrack(start_pos)
def GetAlignmentTrack(self, start_pos):
"""
Return list of strings for the alignment tracks
"""
return self.alignment_grid.GetAlignmentTrack(start_pos)
def __str__(self):
return "[BamView: %s]"%self.bamfile
|
|
"""Helper to deal with YAML + storage."""
from abc import ABC, abstractmethod
import asyncio
import logging
from typing import Any, Awaitable, Callable, Dict, List, Optional, cast
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.components import websocket_api
from homeassistant.const import CONF_ID
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import entity_registry
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.storage import Store
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import slugify
STORAGE_VERSION = 1
SAVE_DELAY = 10
CHANGE_ADDED = "added"
CHANGE_UPDATED = "updated"
CHANGE_REMOVED = "removed"
ChangeListener = Callable[
[
# Change type
str,
# Item ID
str,
# New or removed config
dict,
],
Awaitable[None],
]
class CollectionError(HomeAssistantError):
"""Base class for collection related errors."""
class ItemNotFound(CollectionError):
"""Raised when an item is not found."""
def __init__(self, item_id: str):
"""Initialize item not found error."""
super().__init__(f"Item {item_id} not found.")
self.item_id = item_id
class IDManager:
"""Keep track of IDs across different collections."""
def __init__(self) -> None:
"""Initiate the ID manager."""
self.collections: List[Dict[str, Any]] = []
def add_collection(self, collection: Dict[str, Any]) -> None:
"""Add a collection to check for ID usage."""
self.collections.append(collection)
def has_id(self, item_id: str) -> bool:
"""Test if the ID exists."""
return any(item_id in collection for collection in self.collections)
def generate_id(self, suggestion: str) -> str:
"""Generate an ID."""
base = slugify(suggestion)
proposal = base
attempt = 1
while self.has_id(proposal):
attempt += 1
proposal = f"{base}_{attempt}"
return proposal
class ObservableCollection(ABC):
"""Base collection type that can be observed."""
def __init__(self, logger: logging.Logger, id_manager: Optional[IDManager] = None):
"""Initialize the base collection."""
self.logger = logger
self.id_manager = id_manager or IDManager()
self.data: Dict[str, dict] = {}
self.listeners: List[ChangeListener] = []
self.id_manager.add_collection(self.data)
@callback
def async_items(self) -> List[dict]:
"""Return list of items in collection."""
return list(self.data.values())
@callback
def async_add_listener(self, listener: ChangeListener) -> None:
"""Add a listener.
Will be called with (change_type, item_id, updated_config).
"""
self.listeners.append(listener)
async def notify_change(self, change_type: str, item_id: str, item: dict) -> None:
"""Notify listeners of a change."""
self.logger.debug("%s %s: %s", change_type, item_id, item)
await asyncio.gather(
*[listener(change_type, item_id, item) for listener in self.listeners]
)
class YamlCollection(ObservableCollection):
"""Offer a collection based on static data."""
async def async_load(self, data: List[dict]) -> None:
"""Load the YAML collection. Overrides existing data."""
old_ids = set(self.data)
tasks = []
for item in data:
item_id = item[CONF_ID]
if item_id in old_ids:
old_ids.remove(item_id)
event = CHANGE_UPDATED
elif self.id_manager.has_id(item_id):
self.logger.warning("Duplicate ID '%s' detected, skipping", item_id)
continue
else:
event = CHANGE_ADDED
self.data[item_id] = item
tasks.append(self.notify_change(event, item_id, item))
for item_id in old_ids:
tasks.append(
self.notify_change(CHANGE_REMOVED, item_id, self.data.pop(item_id))
)
if tasks:
await asyncio.gather(*tasks)
class StorageCollection(ObservableCollection):
"""Offer a CRUD interface on top of JSON storage."""
def __init__(
self,
store: Store,
logger: logging.Logger,
id_manager: Optional[IDManager] = None,
):
"""Initialize the storage collection."""
super().__init__(logger, id_manager)
self.store = store
@property
def hass(self) -> HomeAssistant:
"""Home Assistant object."""
return self.store.hass
async def _async_load_data(self) -> Optional[dict]:
"""Load the data."""
return cast(Optional[dict], await self.store.async_load())
async def async_load(self) -> None:
"""Load the storage Manager."""
raw_storage = await self._async_load_data()
if raw_storage is None:
raw_storage = {"items": []}
for item in raw_storage["items"]:
self.data[item[CONF_ID]] = item
await asyncio.gather(
*[
self.notify_change(CHANGE_ADDED, item[CONF_ID], item)
for item in raw_storage["items"]
]
)
@abstractmethod
async def _process_create_data(self, data: dict) -> dict:
"""Validate the config is valid."""
@callback
@abstractmethod
def _get_suggested_id(self, info: dict) -> str:
"""Suggest an ID based on the config."""
@abstractmethod
async def _update_data(self, data: dict, update_data: dict) -> dict:
"""Return a new updated data object."""
async def async_create_item(self, data: dict) -> dict:
"""Create a new item."""
item = await self._process_create_data(data)
item[CONF_ID] = self.id_manager.generate_id(self._get_suggested_id(item))
self.data[item[CONF_ID]] = item
self._async_schedule_save()
await self.notify_change(CHANGE_ADDED, item[CONF_ID], item)
return item
async def async_update_item(self, item_id: str, updates: dict) -> dict:
"""Update item."""
if item_id not in self.data:
raise ItemNotFound(item_id)
if CONF_ID in updates:
raise ValueError("Cannot update ID")
current = self.data[item_id]
updated = await self._update_data(current, updates)
self.data[item_id] = updated
self._async_schedule_save()
await self.notify_change(CHANGE_UPDATED, item_id, updated)
return self.data[item_id]
async def async_delete_item(self, item_id: str) -> None:
"""Delete item."""
if item_id not in self.data:
raise ItemNotFound(item_id)
item = self.data.pop(item_id)
self._async_schedule_save()
await self.notify_change(CHANGE_REMOVED, item_id, item)
@callback
def _async_schedule_save(self) -> None:
"""Schedule saving the area registry."""
self.store.async_delay_save(self._data_to_save, SAVE_DELAY)
@callback
def _data_to_save(self) -> dict:
"""Return data of area registry to store in a file."""
return {"items": list(self.data.values())}
class IDLessCollection(ObservableCollection):
"""A collection without IDs."""
counter = 0
async def async_load(self, data: List[dict]) -> None:
"""Load the collection. Overrides existing data."""
await asyncio.gather(
*[
self.notify_change(CHANGE_REMOVED, item_id, item)
for item_id, item in list(self.data.items())
]
)
self.data.clear()
for item in data:
self.counter += 1
item_id = f"fakeid-{self.counter}"
self.data[item_id] = item
await asyncio.gather(
*[
self.notify_change(CHANGE_ADDED, item_id, item)
for item_id, item in self.data.items()
]
)
@callback
def attach_entity_component_collection(
entity_component: EntityComponent,
collection: ObservableCollection,
create_entity: Callable[[dict], Entity],
) -> None:
"""Map a collection to an entity component."""
entities = {}
async def _collection_changed(change_type: str, item_id: str, config: dict) -> None:
"""Handle a collection change."""
if change_type == CHANGE_ADDED:
entity = create_entity(config)
await entity_component.async_add_entities([entity])
entities[item_id] = entity
return
if change_type == CHANGE_REMOVED:
entity = entities.pop(item_id)
await entity.async_remove()
return
# CHANGE_UPDATED
await entities[item_id].async_update_config(config) # type: ignore
collection.async_add_listener(_collection_changed)
@callback
def attach_entity_registry_cleaner(
hass: HomeAssistantType,
domain: str,
platform: str,
collection: ObservableCollection,
) -> None:
"""Attach a listener to clean up entity registry on collection changes."""
async def _collection_changed(change_type: str, item_id: str, config: Dict) -> None:
"""Handle a collection change: clean up entity registry on removals."""
if change_type != CHANGE_REMOVED:
return
ent_reg = await entity_registry.async_get_registry(hass)
ent_to_remove = ent_reg.async_get_entity_id(domain, platform, item_id)
if ent_to_remove is not None:
ent_reg.async_remove(ent_to_remove)
collection.async_add_listener(_collection_changed)
class StorageCollectionWebsocket:
"""Class to expose storage collection management over websocket."""
def __init__(
self,
storage_collection: StorageCollection,
api_prefix: str,
model_name: str,
create_schema: dict,
update_schema: dict,
):
"""Initialize a websocket CRUD."""
self.storage_collection = storage_collection
self.api_prefix = api_prefix
self.model_name = model_name
self.create_schema = create_schema
self.update_schema = update_schema
assert self.api_prefix[-1] != "/", "API prefix should not end in /"
@property
def item_id_key(self) -> str:
"""Return item ID key."""
return f"{self.model_name}_id"
@callback
def async_setup(self, hass: HomeAssistant, *, create_list: bool = True) -> None:
"""Set up the websocket commands."""
if create_list:
websocket_api.async_register_command(
hass,
f"{self.api_prefix}/list",
self.ws_list_item,
websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): f"{self.api_prefix}/list"}
),
)
websocket_api.async_register_command(
hass,
f"{self.api_prefix}/create",
websocket_api.require_admin(
websocket_api.async_response(self.ws_create_item)
),
websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
**self.create_schema,
vol.Required("type"): f"{self.api_prefix}/create",
}
),
)
websocket_api.async_register_command(
hass,
f"{self.api_prefix}/update",
websocket_api.require_admin(
websocket_api.async_response(self.ws_update_item)
),
websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
**self.update_schema,
vol.Required("type"): f"{self.api_prefix}/update",
vol.Required(self.item_id_key): str,
}
),
)
websocket_api.async_register_command(
hass,
f"{self.api_prefix}/delete",
websocket_api.require_admin(
websocket_api.async_response(self.ws_delete_item)
),
websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
vol.Required("type"): f"{self.api_prefix}/delete",
vol.Required(self.item_id_key): str,
}
),
)
def ws_list_item(
self, hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict
) -> None:
"""List items."""
connection.send_result(msg["id"], self.storage_collection.async_items())
async def ws_create_item(
self, hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict
) -> None:
"""Create a item."""
try:
data = dict(msg)
data.pop("id")
data.pop("type")
item = await self.storage_collection.async_create_item(data)
connection.send_result(msg["id"], item)
except vol.Invalid as err:
connection.send_error(
msg["id"],
websocket_api.const.ERR_INVALID_FORMAT,
humanize_error(data, err),
)
except ValueError as err:
connection.send_error(
msg["id"], websocket_api.const.ERR_INVALID_FORMAT, str(err)
)
async def ws_update_item(
self, hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict
) -> None:
"""Update a item."""
data = dict(msg)
msg_id = data.pop("id")
item_id = data.pop(self.item_id_key)
data.pop("type")
try:
item = await self.storage_collection.async_update_item(item_id, data)
connection.send_result(msg_id, item)
except ItemNotFound:
connection.send_error(
msg["id"],
websocket_api.const.ERR_NOT_FOUND,
f"Unable to find {self.item_id_key} {item_id}",
)
except vol.Invalid as err:
connection.send_error(
msg["id"],
websocket_api.const.ERR_INVALID_FORMAT,
humanize_error(data, err),
)
except ValueError as err:
connection.send_error(
msg_id, websocket_api.const.ERR_INVALID_FORMAT, str(err)
)
async def ws_delete_item(
self, hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict
) -> None:
"""Delete a item."""
try:
await self.storage_collection.async_delete_item(msg[self.item_id_key])
except ItemNotFound:
connection.send_error(
msg["id"],
websocket_api.const.ERR_NOT_FOUND,
f"Unable to find {self.item_id_key} {msg[self.item_id_key]}",
)
connection.send_result(msg["id"])
|
|
from mock.mock import patch
import os
import pytest
import ca_test_common
import ceph_crush_rule
fake_cluster = 'ceph'
fake_container_binary = 'podman'
fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
fake_name = 'foo'
fake_bucket_root = 'default'
fake_bucket_type = 'host'
fake_device_class = 'ssd'
fake_profile = 'default'
fake_user = 'client.admin'
fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user)
class TestCephCrushRuleModule(object):
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
def test_without_parameters(self, m_fail_json):
ca_test_common.set_module_args({})
m_fail_json.side_effect = ca_test_common.fail_json
with pytest.raises(ca_test_common.AnsibleFailJson) as result:
ceph_crush_rule.main()
result = result.value.args[0]
assert result['msg'] == 'missing required arguments: name'
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
def test_with_name_only(self, m_fail_json):
ca_test_common.set_module_args({
'name': fake_name
})
m_fail_json.side_effect = ca_test_common.fail_json
with pytest.raises(ca_test_common.AnsibleFailJson) as result:
ceph_crush_rule.main()
result = result.value.args[0]
assert result['msg'] == 'state is present but all of the following are missing: rule_type'
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
def test_with_check_mode(self, m_exit_json):
ca_test_common.set_module_args({
'name': fake_name,
'rule_type': 'replicated',
'bucket_root': fake_bucket_root,
'bucket_type': fake_bucket_type,
'_ansible_check_mode': True
})
m_exit_json.side_effect = ca_test_common.exit_json
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_crush_rule.main()
result = result.value.args[0]
assert not result['changed']
assert result['rc'] == 0
assert not result['stdout']
assert not result['stderr']
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_create_non_existing_replicated_rule(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'name': fake_name,
'rule_type': 'replicated',
'bucket_root': fake_bucket_root,
'bucket_type': fake_bucket_type
})
m_exit_json.side_effect = ca_test_common.exit_json
get_rc = 2
get_stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name)
get_stdout = ''
create_rc = 0
create_stderr = ''
create_stdout = ''
m_run_command.side_effect = [
(get_rc, get_stdout, get_stderr),
(create_rc, create_stdout, create_stderr)
]
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_crush_rule.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
'--cluster', fake_cluster, 'osd', 'crush', 'rule',
'create-replicated', fake_name, fake_bucket_root, fake_bucket_type]
assert result['rc'] == create_rc
assert result['stderr'] == create_stderr
assert result['stdout'] == create_stdout
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_create_existing_replicated_rule(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'name': fake_name,
'rule_type': 'replicated',
'bucket_root': fake_bucket_root,
'bucket_type': fake_bucket_type
})
m_exit_json.side_effect = ca_test_common.exit_json
rc = 0
stderr = ''
stdout = '{{"rule_name":"{}","type":1,"steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type)
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_crush_rule.main()
result = result.value.args[0]
assert not result['changed']
assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
'--cluster', fake_cluster, 'osd', 'crush', 'rule',
'dump', fake_name, '--format=json']
assert result['rc'] == 0
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_create_non_existing_replicated_rule_device_class(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'name': fake_name,
'rule_type': 'replicated',
'bucket_root': fake_bucket_root,
'bucket_type': fake_bucket_type,
'device_class': fake_device_class
})
m_exit_json.side_effect = ca_test_common.exit_json
get_rc = 2
get_stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name)
get_stdout = ''
create_rc = 0
create_stderr = ''
create_stdout = ''
m_run_command.side_effect = [
(get_rc, get_stdout, get_stderr),
(create_rc, create_stdout, create_stderr)
]
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_crush_rule.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
'--cluster', fake_cluster, 'osd', 'crush', 'rule',
'create-replicated', fake_name, fake_bucket_root, fake_bucket_type, fake_device_class]
assert result['rc'] == create_rc
assert result['stderr'] == create_stderr
assert result['stdout'] == create_stdout
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_create_existing_replicated_rule_device_class(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'name': fake_name,
'rule_type': 'replicated',
'bucket_root': fake_bucket_root,
'bucket_type': fake_bucket_type,
'device_class': fake_device_class
})
m_exit_json.side_effect = ca_test_common.exit_json
rc = 0
stderr = ''
stdout = '{{"rule_name":"{}","type":1,"steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type)
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_crush_rule.main()
result = result.value.args[0]
assert not result['changed']
assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
'--cluster', fake_cluster, 'osd', 'crush', 'rule',
'dump', fake_name, '--format=json']
assert result['rc'] == 0
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_create_non_existing_erasure_rule(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'name': fake_name,
'rule_type': 'erasure',
'profile': fake_profile
})
m_exit_json.side_effect = ca_test_common.exit_json
get_rc = 2
get_stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name)
get_stdout = ''
create_rc = 0
create_stderr = ''
create_stdout = 'created rule {} at 1'.format(fake_name)
m_run_command.side_effect = [
(get_rc, get_stdout, get_stderr),
(create_rc, create_stdout, create_stderr)
]
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_crush_rule.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
'--cluster', fake_cluster, 'osd', 'crush', 'rule',
'create-erasure', fake_name, fake_profile]
assert result['rc'] == create_rc
assert result['stderr'] == create_stderr
assert result['stdout'] == create_stdout
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_create_existing_erasure_rule(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'name': fake_name,
'rule_type': 'erasure',
'profile': fake_profile
})
m_exit_json.side_effect = ca_test_common.exit_json
rc = 0
stderr = ''
stdout = '{{"type":3,"rule_name":"{}","steps":[{{"item_name":"default"}},{{"type":"host"}}]}}'.format(fake_name)
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_crush_rule.main()
result = result.value.args[0]
assert not result['changed']
assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
'--cluster', fake_cluster, 'osd', 'crush', 'rule',
'dump', fake_name, '--format=json']
assert result['rc'] == 0
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_update_existing_replicated_rule(self, m_run_command, m_fail_json):
ca_test_common.set_module_args({
'name': fake_name,
'rule_type': 'replicated',
'bucket_root': fake_bucket_root,
'bucket_type': fake_bucket_type,
'device_class': fake_device_class
})
m_fail_json.side_effect = ca_test_common.fail_json
rc = 0
stderr = ''
stdout = '{{"type":3,"rule_name":"{}","steps":[{{"item_name":"default"}},{{"type":"host"}}]}}'.format(fake_name)
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleFailJson) as result:
ceph_crush_rule.main()
result = result.value.args[0]
print(result)
assert not result['changed']
assert result['msg'] == 'Can not convert crush rule {} to replicated'.format(fake_name)
assert result['rc'] == 1
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_update_existing_erasure_rule(self, m_run_command, m_fail_json):
ca_test_common.set_module_args({
'name': fake_name,
'rule_type': 'erasure',
'profile': fake_profile
})
m_fail_json.side_effect = ca_test_common.fail_json
rc = 0
stderr = ''
stdout = '{{"type":1,"rule_name":"{}","steps":[{{"item_name":"default"}},{{"type":"host"}}]}}'.format(fake_name)
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleFailJson) as result:
ceph_crush_rule.main()
result = result.value.args[0]
print(result)
assert not result['changed']
assert result['msg'] == 'Can not convert crush rule {} to erasure'.format(fake_name)
assert result['rc'] == 1
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_remove_non_existing_rule(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'name': fake_name,
'state': 'absent'
})
m_exit_json.side_effect = ca_test_common.exit_json
rc = 2
stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name)
stdout = ''
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_crush_rule.main()
result = result.value.args[0]
assert not result['changed']
assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
'--cluster', fake_cluster, 'osd', 'crush', 'rule',
'dump', fake_name, '--format=json']
assert result['rc'] == 0
assert result['stderr'] == stderr
assert result['stdout'] == "Crush Rule {} doesn't exist".format(fake_name)
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_remove_existing_rule(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'name': fake_name,
'state': 'absent'
})
m_exit_json.side_effect = ca_test_common.exit_json
get_rc = 0
get_stderr = ''
get_stdout = '{{"rule_name":"{}","steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type)
remove_rc = 0
remove_stderr = ''
remove_stdout = ''
m_run_command.side_effect = [
(get_rc, get_stdout, get_stderr),
(remove_rc, remove_stdout, remove_stderr)
]
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_crush_rule.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
'--cluster', fake_cluster, 'osd', 'crush', 'rule',
'rm', fake_name]
assert result['rc'] == remove_rc
assert result['stderr'] == remove_stderr
assert result['stdout'] == remove_stdout
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_get_non_existing_rule(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'name': fake_name,
'state': 'info'
})
m_exit_json.side_effect = ca_test_common.exit_json
rc = 2
stderr = 'Error ENOENT: unknown crush rule \'{}\''.format(fake_name)
stdout = ''
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_crush_rule.main()
result = result.value.args[0]
assert not result['changed']
assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
'--cluster', fake_cluster, 'osd', 'crush', 'rule',
'dump', fake_name, '--format=json']
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_get_existing_rule(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'name': fake_name,
'state': 'info'
})
m_exit_json.side_effect = ca_test_common.exit_json
rc = 0
stderr = ''
stdout = '{{"rule_name":"{}","steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type)
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_crush_rule.main()
result = result.value.args[0]
assert not result['changed']
assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring,
'--cluster', fake_cluster, 'osd', 'crush', 'rule',
'dump', fake_name, '--format=json']
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
@patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_with_container(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'name': fake_name,
'state': 'info'
})
m_exit_json.side_effect = ca_test_common.exit_json
rc = 0
stderr = ''
stdout = '{{"rule_name":"{}","steps":[{{"item_name":"{}"}},{{"type":"{}"}}]}}'.format(fake_name, fake_bucket_root, fake_bucket_type)
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_crush_rule.main()
result = result.value.args[0]
assert not result['changed']
assert result['cmd'] == [fake_container_binary, 'run', '--rm', '--net=host',
'-v', '/etc/ceph:/etc/ceph:z',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph', fake_container_image,
'-n', fake_user, '-k', fake_keyring,
'--cluster', fake_cluster, 'osd', 'crush',
'rule', 'dump', fake_name, '--format=json']
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
|
|
"""
@package mi.dataset.driver.FLORT_KN.STC_IMODEM.test.test_driver
@file marine-integrations/mi/dataset/driver/FLORT_KN/STC_IMODEM/driver.py
@author Emily Hahn
@brief Test cases for FLORT_KN__STC_IMODEM driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/dsa/test_driver
$ bin/dsa/test_driver -i [-t testname]
$ bin/dsa/test_driver -q [-t testname]
"""
__author__ = 'Emily Hahn'
__license__ = 'Apache 2.0'
import unittest
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.log import get_logger;
log = get_logger()
from mi.idk.exceptions import SampleTimeout
import os
from mi.idk.dataset.unit_test import DataSetTestCase
from mi.idk.dataset.unit_test import DataSetIntegrationTestCase
from mi.idk.dataset.unit_test import DataSetQualificationTestCase
from mi.dataset.dataset_driver import DataSourceConfigKey, DataSetDriverConfigKeys
from mi.dataset.dataset_driver import DriverParameter
from mi.dataset.driver.FLORT_KN.STC_IMODEM.driver import \
FLORT_KN_STC_IMODEM_DataSetDriver
from mi.dataset.parser.flort_kn__stc_imodem import \
Flort_kn_stc_imodemParserDataParticleTelemetered, \
Flort_kn_stc_imodemParserDataParticleRecovered, \
DataParticleType
from pyon.agent.agent import ResourceAgentState
from interface.objects import ResourceAgentErrorEvent
DIR_FLORT_KN_LIVE = '/tmp/flort/dsatest1'
DIR_FLORT_KN_INSTRUMENT_RECOVERED = '/tmp/flort/dsatest2'
# Fill in driver details
DataSetTestCase.initialize(
driver_module='mi.dataset.driver.FLORT_KN.STC_IMODEM.driver',
driver_class='FLORT_KN_STC_IMODEM_DataSetDriver',
agent_resource_id = '123xyz',
agent_name = 'Agent007',
agent_packet_config = FLORT_KN_STC_IMODEM_DataSetDriver.stream_config(),
startup_config = {
DataSourceConfigKey.RESOURCE_ID: 'flort_kn_stc_imodem',
DataSourceConfigKey.HARVESTER:
{
DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED:
{
DataSetDriverConfigKeys.DIRECTORY: DIR_FLORT_KN_LIVE,
DataSetDriverConfigKeys.PATTERN: 'E*.DAT',
DataSetDriverConfigKeys.FREQUENCY: 1,
},
DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED:
{
DataSetDriverConfigKeys.DIRECTORY: DIR_FLORT_KN_INSTRUMENT_RECOVERED,
DataSetDriverConfigKeys.PATTERN: 'E*.DAT',
DataSetDriverConfigKeys.FREQUENCY: 1,
}
},
DataSourceConfigKey.PARSER: {
DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED: {},
DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED: {}
}
}
)
###############################################################################
# INTEGRATION TESTS #
# Device specific integration tests are for #
# testing device specific capabilities #
###############################################################################
@attr('INT', group='mi')
class IntegrationTest(DataSetIntegrationTestCase):
def test_get(self):
"""
Test that we can get data from files. Verify that the driver
sampling can be started and stopped
"""
self.clear_sample_data()
# Start sampling and watch for an exception
self.driver.start_sampling()
#self.clear_async_data()
self.create_sample_data_set_dir('first.DAT', DIR_FLORT_KN_LIVE, "E0000001.DAT")
self.create_sample_data_set_dir('first.DAT', DIR_FLORT_KN_INSTRUMENT_RECOVERED, "E0000001.DAT")
self.assert_data(Flort_kn_stc_imodemParserDataParticleTelemetered, 'first.result.yml', count=1, timeout=10)
self.assert_data(Flort_kn_stc_imodemParserDataParticleRecovered, 'firstRecovered.result.yml', count=1, timeout=10)
self.create_sample_data_set_dir('second.DAT', DIR_FLORT_KN_LIVE, "E0000002.DAT")
self.assert_data(Flort_kn_stc_imodemParserDataParticleTelemetered, 'second.result.yml', count=4, timeout=10)
self.create_sample_data_set_dir('E0000303.DAT', DIR_FLORT_KN_LIVE, "E0000303.DAT")
# start is the same particle here, just use the same results
self.assert_data( Flort_kn_stc_imodemParserDataParticleTelemetered, count=32, timeout=10)
self.create_sample_data_set_dir('second.DAT', DIR_FLORT_KN_INSTRUMENT_RECOVERED, "E0000002.DAT")
self.assert_data(Flort_kn_stc_imodemParserDataParticleRecovered, 'secondRecovered.result.yml', count=4, timeout=10)
self.create_sample_data_set_dir('E0000303.DAT', DIR_FLORT_KN_INSTRUMENT_RECOVERED, "E0000303.DAT")
self.assert_data(Flort_kn_stc_imodemParserDataParticleRecovered, count=32, timeout=10)
def test_stop_resume(self):
"""
Test the ability to stop and restart the process
"""
path_1 = self.create_sample_data_set_dir('first.DAT', DIR_FLORT_KN_LIVE, "E0000001.DAT")
path_2 = self.create_sample_data_set_dir('second.DAT', DIR_FLORT_KN_LIVE, "E0000002.DAT")
path_3 = self.create_sample_data_set_dir('first.DAT', DIR_FLORT_KN_INSTRUMENT_RECOVERED, "E0000003.DAT")
path_4 = self.create_sample_data_set_dir('second.DAT', DIR_FLORT_KN_INSTRUMENT_RECOVERED, "E0000004.DAT")
# Create and store the new driver state
state = {DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED:
{'E0000001.DAT': self.get_file_state(path_1, True, 50),
'E0000002.DAT': self.get_file_state(path_2, False, 76)},
DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED:
{'E0000003.DAT': self.get_file_state(path_3, True, 50),
'E0000004.DAT': self.get_file_state(path_4, False, 76)},
}
self.driver = self._get_driver_object(memento=state)
self.clear_async_data()
self.driver.start_sampling()
self.assert_data( Flort_kn_stc_imodemParserDataParticleTelemetered, 'partial_second.result.yml', count=2, timeout=10)
self.assert_data(Flort_kn_stc_imodemParserDataParticleRecovered, 'partial_second_recovered.result.yml',
count=2, timeout=10)
def test_stop_start_ingest(self):
"""
Test the ability to stop and restart sampling, and ingesting files in the correct order
"""
# create some data to parse
self.clear_async_data()
self.driver.start_sampling()
self.create_sample_data_set_dir('second.DAT', DIR_FLORT_KN_LIVE, "E0000002.DAT")
self.create_sample_data_set_dir('first.DAT', DIR_FLORT_KN_LIVE, "E0000001.DAT")
self.create_sample_data_set_dir('second.DAT', DIR_FLORT_KN_INSTRUMENT_RECOVERED, "E0000004.DAT")
self.create_sample_data_set_dir('first.DAT', DIR_FLORT_KN_INSTRUMENT_RECOVERED, "E0000003.DAT")
self.assert_data(Flort_kn_stc_imodemParserDataParticleTelemetered, 'first.result.yml', count=1, timeout=10)
self.assert_data(Flort_kn_stc_imodemParserDataParticleRecovered, 'firstRecovered.result.yml',
count=1, timeout=10)
self.assert_file_ingested("E0000001.DAT", DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED)
self.assert_file_not_ingested("E0000002.DAT")
self.assert_file_ingested("E0000003.DAT", DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED)
self.assert_file_not_ingested("E0000004.DAT")
self.driver.stop_sampling()
self.driver.start_sampling()
self.assert_data(Flort_kn_stc_imodemParserDataParticleTelemetered, 'second.result.yml', count=4, timeout=10)
self.assert_data(Flort_kn_stc_imodemParserDataParticleRecovered, 'secondRecovered.result.yml',
count=4, timeout=10)
self.assert_file_ingested("E0000002.DAT", DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED)
self.assert_file_ingested("E0000004.DAT", DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED)
def test_get_any_order(self):
"""
Test that we can get data from files for all harvesters / parsers.
"""
log.info("=========== START INTEG TEST GET ANY ORDER ================")
# Start sampling.
self.clear_sample_data()
self.driver.start_sampling()
self.clear_async_data()
self.create_sample_data_set_dir(
'second.DAT', DIR_FLORT_KN_LIVE, 'E0000002.DAT')
self.create_sample_data_set_dir(
'first.DAT', DIR_FLORT_KN_LIVE, 'E0000001.DAT')
self.create_sample_data_set_dir(
'second.DAT', DIR_FLORT_KN_INSTRUMENT_RECOVERED, 'E0000004.DAT')
self.create_sample_data_set_dir(
'first.DAT', DIR_FLORT_KN_INSTRUMENT_RECOVERED, 'E0000003.DAT')
# get the first particle from the live directory
self.assert_data( Flort_kn_stc_imodemParserDataParticleTelemetered, 'first.result.yml',
count=1, timeout=10)
# get the first particle from the recovered directory
self.assert_data(Flort_kn_stc_imodemParserDataParticleRecovered, 'firstRecovered.result.yml',
count=1, timeout=10)
# get the next 4 particles from the live directory
self.assert_data( Flort_kn_stc_imodemParserDataParticleTelemetered, 'second.result.yml',
count=4, timeout=10)
# get the next 4 particles from the recovered directory
self.assert_data(Flort_kn_stc_imodemParserDataParticleRecovered, 'secondRecovered.result.yml',
count=4, timeout=10)
def test_sample_exception(self):
"""
test that a file is marked as parsed if it has a sample exception (which will happen with an empty file)
"""
self.clear_async_data()
config = self._driver_config()['startup_config']['harvester'][DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED]['pattern']
filename = config.replace("*", "foo")
self.create_sample_data_set_dir(filename, DIR_FLORT_KN_LIVE)
# Start sampling and watch for an exception
self.driver.start_sampling()
# an event catches the sample exception
self.assert_event('ResourceAgentErrorEvent')
self.assert_file_ingested(filename, DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED)
config2 = self._driver_config()['startup_config']['harvester'][DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED]['pattern']
filename2 = config2.replace("*", "foo")
self.create_sample_data_set_dir(filename2, DIR_FLORT_KN_INSTRUMENT_RECOVERED)
# an event catches the sample exception
self.assert_event('ResourceAgentErrorEvent')
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for #
# testing device specific capabilities #
###############################################################################
@attr('QUAL', group='mi')
class QualificationTest(DataSetQualificationTestCase):
def setUp(self):
super(QualificationTest, self).setUp()
def create_data_dir(self):
"""
Verify the test data directory is created and exists. Return the path to
the directory .
@return: path to data directory
@raise: IDKConfigMissing no harvester config
@raise: IDKException if data_dir exists, but not a directory
"""
startup_config = self._driver_config().get('startup_config')
if not startup_config:
raise IDKConfigMissing("Driver config missing 'startup_config'")
harvester_config = startup_config.get('harvester')
if not harvester_config:
raise IDKConfigMissing("Startup config missing 'harvester' config")
data_dir = []
for key in harvester_config:
data_dir_key = harvester_config[key].get("directory")
if not data_dir_key:
raise IDKConfigMissing("Harvester config missing 'directory'")
if not os.path.exists(data_dir_key):
log.debug("Creating data dir: %s", data_dir_key)
os.makedirs(data_dir_key)
elif not os.path.isdir(data_dir_key):
raise IDKException("%s is not a directory" % data_dir_key)
data_dir.append(data_dir_key)
return data_dir
def test_publish_path(self):
"""
Setup an agent/driver/harvester/parser and verify that data is
published out the agent
"""
self.create_sample_data_set_dir('second.DAT', DIR_FLORT_KN_LIVE, 'E0000001.DAT')
self.create_sample_data_set_dir('second.DAT', DIR_FLORT_KN_INSTRUMENT_RECOVERED, 'E0000002.DAT')
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# NOTE: If the processing is not slowed down here, the engineering samples are
# returned in the wrong order
self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})
self.assert_start_sampling()
#self.assert_initialize(
try:
resulta = self.data_subscribers.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED, 4)
resultb = self.data_subscribers.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED, 4)
log.debug("RESULT: %s", resulta)
log.debug("RESULT: %s", resultb)
# Verify values
self.assert_data_values(resulta, 'second.result.yml')
self.assert_data_values(resultb, 'secondRecovered.result.yml')
except Exception as e:
log.error("Exception trapped: %s", e)
self.fail("Sample timeout.")
def test_large_import(self):
"""
Test importing a large number of samples from the file at once
"""
self.create_sample_data_set_dir('E0000303.DAT', DIR_FLORT_KN_LIVE)
self.create_sample_data_set_dir('E0000427.DAT', DIR_FLORT_KN_LIVE)
self.create_sample_data_set_dir('E0000303.DAT', DIR_FLORT_KN_INSTRUMENT_RECOVERED)
self.create_sample_data_set_dir('E0000427.DAT', DIR_FLORT_KN_INSTRUMENT_RECOVERED)
self.assert_initialize()
# get results for each of the data particle streams
self.data_subscribers.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED,64,40)
self.data_subscribers.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED,64,40)
def test_status_in_middle(self):
"""
This file has status particles in the middle and at the end
"""
self.create_sample_data_set_dir('E0000039.DAT', DIR_FLORT_KN_LIVE)
self.create_sample_data_set_dir('E0000039.DAT', DIR_FLORT_KN_INSTRUMENT_RECOVERED)
self.assert_initialize()
# get results for each of the data particle streams
self.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED, 53, 40)
self.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED, 53, 40)
def test_stop_start(self):
"""
Test the agents ability to start data flowing, stop, then restart
at the correct spot.
"""
log.info("CONFIG: %s", self._agent_config())
self.create_sample_data_set_dir('first.DAT', DIR_FLORT_KN_LIVE, "E0000001.DAT")
self.create_sample_data_set_dir('first.DAT', DIR_FLORT_KN_INSTRUMENT_RECOVERED, "E0000001.DAT")
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Slow down processing to 1 per second to give us time to stop
self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})
self.assert_start_sampling()
# Verify we get one sample
try:
# Read the first file and verify the data
resulta = self.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED, 1, 5)
resultb = self.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED, 1, 5)
log.debug("RESULT: %s", resulta)
log.debug("RESULT: %s", resultb)
self.assert_sample_queue_size(DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED, 0)
self.assert_sample_queue_size(DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED, 0)
self.create_sample_data_set_dir('second.DAT', DIR_FLORT_KN_LIVE, "E0000002.DAT")
self.create_sample_data_set_dir('second.DAT', DIR_FLORT_KN_INSTRUMENT_RECOVERED, "E0000002.DAT")
# Now read the first two records of the second file then stop
resulta = self.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED, 2)
resultb = self.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED, 2)
log.debug("got result 1 %s", resulta)
log.debug("got result 1 %s", resultb)
self.assert_stop_sampling()
self.assert_sample_queue_size(DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED, 0)
self.assert_sample_queue_size(DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED, 0)
self.assert_start_sampling()
result2a = self.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED, 2)
result2b = self.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED, 2)
log.debug("got result 2 %s", result2a)
resulta.extend(result2a)
resultb.extend(result2b)
self.assert_data_values(resulta, 'second.result.yml')
self.assert_data_values(resultb, 'secondRecovered.result.yml')
self.assert_sample_queue_size(DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED, 0)
self.assert_sample_queue_size(DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED, 0)
except SampleTimeout as e:
log.error("Exception trapped: %s", e, exc_info=True)
self.fail("Sample timeout.")
def test_shutdown_restart(self):
"""
Test the agents ability to start data flowing, stop, then restart
at the correct spot.
"""
log.info("CONFIG: %s", self._agent_config())
self.create_sample_data_set_dir('first.DAT', DIR_FLORT_KN_LIVE, "E0000001.DAT")
self.create_sample_data_set_dir('first.DAT', DIR_FLORT_KN_INSTRUMENT_RECOVERED, "E0000001.DAT")
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Slow down processing to 1 per second to give us time to stop
self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})
self.assert_start_sampling()
try:
# Read the first file and verify the data
resulta = self.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED)
resultb = self.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED)
log.debug("RESULT: %s", resulta)
log.debug("RESULT: %s", resultb)
# Verify values
self.assert_data_values(resulta, 'first.result.yml')
self.assert_data_values(resultb, 'firstRecovered.result.yml')
self.assert_sample_queue_size(DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED, 0)
self.assert_sample_queue_size(DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED, 0)
self.create_sample_data_set_dir('second.DAT', DIR_FLORT_KN_LIVE, "E0000002.DAT")
self.create_sample_data_set_dir('second.DAT', DIR_FLORT_KN_INSTRUMENT_RECOVERED, "E0000002.DAT")
# Now read the first two records of the second file then stop
resulta = self.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED, 2)
resultb = self.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED, 2)
log.debug("RESULT: %s", resulta)
log.debug("RESULT: %s", resultb)
self.assert_stop_sampling()
self.assert_sample_queue_size(DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED, 0)
self.assert_sample_queue_size(DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED, 0)
# stop the agent
self.stop_dataset_agent_client()
# re-start the agent
self.init_dataset_agent_client()
#re-initialize
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Restart sampling and ensure we get the last 2 records of the file
self.assert_start_sampling()
result2a = self.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED, 2)
result2b = self.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED, 2)
log.debug("got result 2 %s", result2a)
resulta.extend(result2a)
log.debug("got result 2 %s", result2b)
resultb.extend(result2b)
self.assert_data_values(resulta, 'second.result.yml')
self.assert_data_values(resultb, 'secondRecovered.result.yml')
self.assert_sample_queue_size(DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED, 0)
self.assert_sample_queue_size(DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED, 0)
except SampleTimeout as e:
log.error("Exception trapped: %s", e, exc_info=True)
self.fail("Sample timeout.")
def test_parser_exception(self):
"""
Test an exception is raised after the driver is started during
record parsing.
"""
self.clear_sample_data()
self.create_sample_data_set_dir('bad.DAT', DIR_FLORT_KN_LIVE, 'E0000001.DAT')
self.create_sample_data_set_dir('first.DAT', DIR_FLORT_KN_LIVE, 'E0000002.DAT')
self.create_sample_data_set_dir('bad.DAT', DIR_FLORT_KN_INSTRUMENT_RECOVERED, 'E0000001.DAT')
self.create_sample_data_set_dir('first.DAT', DIR_FLORT_KN_INSTRUMENT_RECOVERED, 'E0000002.DAT')
self.assert_initialize()
self.event_subscribers.clear_events()
resulta = self.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED, 1)
resultb = self.get_samples(DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED, 1)
self.assert_data_values(resulta, 'first.result.yml')
self.assert_data_values(resultb, 'firstRecovered.result.yml')
self.assert_sample_queue_size(DataParticleType.FLORT_KN_INSTRUMENT_TELEMETERED, 0)
self.assert_sample_queue_size(DataParticleType.FLORT_KN_INSTRUMENT_RECOVERED, 0)
# Verify an event was raised and we are in our retry state
self.assert_event_received(ResourceAgentErrorEvent, 10)
self.assert_state_change(ResourceAgentState.STREAMING, 10)
|
|
# ===============================================================================
# Copyright 2018 Stephen Cox
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import struct
import time
from threading import Thread
from pychron.hardware.core.core_device import CoreDevice
try:
import LabJackPython
import u3
except ImportError:
print("Error loading LabJackPython driver")
class LamontFurnaceControl(CoreDevice):
_device = None
scl_pin = None
sda_pin = None
a_slope = None
a_offset = None
b_slope = None
b_offset = None
def __init__(self, *args, **kw):
super(LamontFurnaceControl, self).__init__(*args, **kw)
self.tc1_pin = 0 # 0 for AIN0, 2 for AIN2, etc
self.tc2_pin = 1
self.dac_pin = 2 # 0 for FIO4/5, 2 for FIO6/7 for steppers
self.furnace = 1 # defaults to furnace output 1
def to_double(self, buf):
right, left = struct.unpack("<Ii", struct.pack("B" * 8, *buf[0:8]))
return float(left) + float(right) / (2 ** 32)
def return_sn(self):
return self._device.serialNumber
def load(self, *args, **kw):
return True
def open(self, *args, **kw):
try:
self._device = u3.U3()
except BaseException:
self.warning("failed to create U3 device")
return
return True
def initialize(self, *args, **kw):
self.scl_pin = self.dac_pin + 4
self.sda_pin = self.scl_pin + 1
self._device.getFeedback(
u3.BitStateWrite(4, 0)
) # write both sleep lines low to prevent stepper from moving on load
self._device.getFeedback(
u3.BitStateWrite(5, 0)
) # write both sleep lines low to prevent stepper from moving on load
self._device.configIO(
FIOAnalog=15, NumberOfTimersEnabled=2, TimerCounterPinOffset=8
)
self._device.configTimerClock(
TimerClockBase=3, TimerClockDivisor=50
) # 3 = 1 Mhz; 50 ==> 1/50 = 20 kHz
self._device.getFeedback(
u3.Timer0Config(TimerMode=7, Value=100)
) # FreqOut mode; Value 20 gives (20 kHz)/(2*100) = 100 Hz
self._device.getFeedback(
u3.Timer1Config(TimerMode=7, Value=100)
) # FreqOut mode; Value 20 gives (20 kHz)/(2*100) = 100 Hz
print("device SN is ", self._device.serialNumber)
data = self._i2c(0x50, [64], NumI2CBytesToReceive=36)
response = data["I2CBytes"]
print(response[0:8])
self.a_slope = self.to_double(response[0:8])
self.a_offset = self.to_double(response[8:16])
self.b_slope = self.to_double(response[16:24])
self.b_offset = self.to_double(response[24:32])
self.test_connection()
def test_connection(self):
sn = self.return_sn()
if 256 <= sn <= 2147483647:
self.info("Labjack loaded")
ret = True
err = ""
else:
self.warning("Invalid Labjack serial number: check Labjack connection")
ret = False
err = "Invalid Labjack serial number: check Labjack connection"
return ret, err
def read_analog_in(self, pin):
v = self._device.getAIN(pin)
return v
def readTC(self, number):
temp = (
self.read_analog_in(number - 1) / 0.00004
) # replace with actual TC table obviously
return temp
def extract(self, value, units=None, furnace=1):
self.furnace = furnace
print(units)
if not units == "volts" or units == "temperature":
units = "percent"
self.info("set furnace output to {} {}".format(value, units))
if units == "percent":
value = value / 10
if value < 0:
self.warning(
"Consider changing you calibration curve. "
"{} percent converted to {}volts. Voltage must be positive".format(
value * 10, value
)
)
value = 0
elif units == "volts":
if value > 10:
self.warning(
"Did you mean to use percent units? "
"{} volts will set furnace to {}% output power.".format(
value, value * 10
)
)
value = 0
elif units == "temperature":
minvalue = 100
if value < minvalue:
self.warning(
"Did you mean to use power control? "
"{} degrees is too low for the furnace. Set to at least {} degrees.".format(
value, minvalue
)
)
value = 0
self.warning("Temperature control not implemented")
# Some PID control will be added later
self.set_furnace_setpoint(value)
def set_furnace_setpoint(self, value, furnace=1):
# this function can be called separately from extract if another script is performing the units logic
self.furnace = furnace
if self.furnace == 1:
v = self._map_voltage(48, value, self.a_slope, self.a_offset)
self._i2c(0x12, v)
elif self.furnace == 2:
v = self._map_voltage(49, value, self.b_slope, self.b_offset)
self._i2c(0x12, v)
else:
self.warning("Invalid furnace number. Only outputs 1 and 2 available.")
def _i2c(self, address, value, **kw):
return self._device.i2c(
address, value, SDAPinNum=self.sda_pin, SCLPinNum=self.scl_pin, **kw
)
def _map_voltage(self, tag, value, slope, offset):
m = value * slope + offset
a = int(m / 256)
b = int(m % 256)
return [tag, a, b]
# def _unmap_voltage(self, tag, a, b, slope, offset):
# m = a*256 + b
# value = (m - offset)/slope
# return [tag, value]
def drop_ball(self, position):
def func():
self.goto_ball(position)
time.sleep(5)
self.returnfrom_ball(position)
t = Thread(target=func)
t.start()
def goto_ball(self, position):
positions = [
[1, 5],
[1, 10],
[1, 15],
[1, 20],
[1, 25],
[1, 30],
[1, 200],
[1, 230],
[1, 260],
[1, 290],
[1, 320],
[1, 350],
]
stepper_number, runtime = positions[position - 1]
self.info(
"Going to position {}; running for {} seconds".format(position, runtime)
)
if position == 0: # position command zero does nothing
runtime = 0
if stepper_number == 1:
a, b = 5, 4
elif stepper_number == 2:
a, b = 4, 5
self._run_stepper(runtime, "forward", a, b)
time.sleep(runtime)
def returnfrom_ball(self, position):
positions = [
[1, 5],
[1, 10],
[1, 15],
[1, 20],
[1, 25],
[1, 30],
[1, 200],
[1, 230],
[1, 260],
[1, 290],
[1, 320],
[1, 350],
]
stepper_number, runtime = positions[position - 1]
if position == 0: # position command zero returns all the way
runtime = max([t for motor, t in positions])
if stepper_number == 1:
a, b = 5, 4
elif stepper_number == 2:
a, b = 4, 5
self._run_stepper(runtime + 3, "backward", a, b)
time.sleep(runtime)
def get_process_value(self):
# note it is not possible to read the current setting for the LJTick-DAC, so we must measure voltage
if self.furnace == 1:
pv = self.read_analog_in(
2
) # assumes LJTick-DAC first channel is wired to AIN 2
elif self.furnace == 2:
pv = self.read_analog_in(
3
) # assumes LJTick-DAC first channel is wired to AIN 3
else:
self.warning("Invalid furnace number. Only outputs 1 and 2 available.")
return pv
def get_summary(self):
summary = {
"time": time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()),
"OP1": self.read_analog_in(2),
"TC1": self.readTC(1),
"OP2": self.read_analog_in(3),
"TC2": self.readTC(2),
}
return summary
def _run_stepper(self, runtime, direction, a_id, b_id):
def func():
dev = self._device
if direction == "forward":
dev.getFeedback(u3.BitStateWrite(a_id, 0))
else:
dev.getFeedback(u3.BitStateWrite(a_id, 1))
# st = time.time()
dev.getFeedback(u3.BitStateWrite(b_id, 1))
time.sleep(runtime)
self.info("adjk;fdsajf ", runtime)
# while time.time() - st < runtime:
# time.sleep(1)
dev.getFeedback(u3.BitStateWrite(b_id, 0))
t = Thread(target=func)
t.start()
if __name__ == "__main__":
testDev = LamontFurnaceControl()
testDev.drop_ball(1)
testDev.extract(3.1, units="volts", furnace=1)
print(testDev.readTC(1))
# ============= EOF =============================================
|
|
# -*- coding: utf_8 -*-
#
# scrollpanel.py
#
# Scrollable widget which contains other widgets, for albow
#
from palette_view import PaletteView
from layout import Column
from utils import blit_in_rect
from pygame import event, Surface, SRCALPHA, Rect, draw
#-----------------------------------------------------------------------------
class ScrollRow(PaletteView):
def __init__(self, cell_size, nrows, **kwargs):
self.draw_zebra = kwargs.pop('draw_zebra', True)
scrolling = kwargs.pop('scrolling', True)
self.hscrolling = kwargs.pop('hscrolling', True)
self.hscroll = 0
self.virtual_width = 1000
PaletteView.__init__(self, cell_size, nrows, 1, scrolling=scrolling)
if self.hscrolling:
self.height += self.scroll_button_size
def draw_item(self, surf, row, row_rect):
if self.hscrolling:
if row_rect.bottom > self.scroll_left_rect().top:
return
row_data = self.row_data(row)
table = self.parent
height = row_rect.height
for i, x, width, column, cell_data in table.column_info(row_data):
cell_rect = Rect(x + self.margin - self.hscroll, row_rect.top, width, height)
self.draw_table_cell(surf, row, cell_data, cell_rect, column)
def draw_item_and_highlight(self, surface, i, rect, highlight):
if self.draw_zebra and i % 2:
surface.fill(self.zebra_color, rect)
if highlight:
self.draw_prehighlight(surface, i, rect)
if highlight and self.highlight_style == 'reverse':
fg = self.inherited('bg_color') or self.sel_color
else:
fg = self.fg_color
self.draw_item_with(surface, i, rect, fg)
if highlight:
self.draw_posthighlight(surface, i, rect)
def draw_table_cell(self, surf, i, data, cell_rect, column):
self.parent.draw_tree_cell(surf, i, data, cell_rect, column)
def item_is_selected(self, n):
return n == self.parent.selected_item_index
def num_items(self):
return self.parent.num_rows()
def num_rows(self):
return max(0, PaletteView.num_rows(self) - 1)
def row_data(self, row):
return self.parent.row_data(row)
def click_item(self, n, e):
self.parent.click_item(n, e)
def scroll_up_rect(self):
d = self.scroll_button_size
r = Rect(0, 0, d, d)
m = self.margin
r.top = m
r.right = self.width - m
r.inflate_ip(-4, -4)
return r
def scroll_down_rect(self):
d = self.scroll_button_size
r = Rect(0, 0, d, d)
m = self.margin
r.right = self.width - m
if self.hscrolling:
m += d
r.bottom = self.height - m
r.inflate_ip(-4, -4)
return r
def scroll_left_rect(self):
d = self.scroll_button_size
r = Rect(0, 0, d, d)
m = self.margin
r.bottom = self.height - m
r.left = m
r.inflate_ip(-4, -4)
return r
def scroll_right_rect(self):
d = self.scroll_button_size
r = Rect(0, 0, d, d)
m = self.margin
r.bottom = self.height - m
if self.scrolling:
m += d
r.right = self.width - m
r.inflate_ip(-4, -4)
return r
def can_scroll_left(self):
return self.hscrolling and self.hscroll > 0
def can_scroll_right(self):
return self.hscrolling and self.hscroll + self.width < self.virtual_width
def draw_scroll_left_button(self, surface):
r = self.scroll_left_rect()
c = self.scroll_button_color
draw.polygon(surface, c, [r.midleft, r.topright, r.bottomright])
def draw_scroll_right_button(self, surface):
r = self.scroll_right_rect()
c = self.scroll_button_color
draw.polygon(surface, c, [r.topleft, r.midright, r.bottomleft])
def draw(self, surface):
for row in xrange(self.num_rows()):
for col in xrange(self.num_cols()):
r = self.cell_rect(row, col)
self.draw_cell(surface, row, col, r)
if self.can_scroll_up():
self.draw_scroll_up_button(surface)
if self.can_scroll_down():
self.draw_scroll_down_button(surface)
if self.can_scroll_left():
self.draw_scroll_left_button(surface)
if self.can_scroll_right():
self.draw_scroll_right_button(surface)
def scroll_left(self):
if self.can_scroll_left():
self.hscroll -= self.cell_size[1]
def scroll_right(self):
if self.can_scroll_right():
self.hscroll += self.cell_size[1]
def mouse_down(self, event):
if event.button == 1:
if self.hscrolling:
p = event.local
if self.scroll_left_rect().collidepoint(p):
self.scroll_left()
return
elif self.scroll_right_rect().collidepoint(p):
self.scroll_right()
return
elif event.button == 6:
if self.hscrolling:
self.scroll_left()
elif event.button == 7:
if self.hscrolling:
self.scroll_right()
PaletteView.mouse_down(self, event)
def cell_rect(self, row, col):
w, h = self.cell_size
d = self.margin
x = col * w + d - self.hscroll
y = row * h + d
return Rect(x, y, w, h)
#-----------------------------------------------------------------------------
class ScrollPanel(Column):
column_margin = 2
def __init__(self, *args, **kwargs):
kwargs['margin'] = kwargs.get('margin', 0)
self.selected_item_index = None
self.rows = kwargs.pop('rows', [])
self.align = kwargs.get('align', 'l')
self.spacing = kwargs.get('spacing', 4)
self.draw_zebra = kwargs.pop('draw_zebra', True)
# self.row_height = kwargs.pop('row_height', max([a.height for a in self.rows] + [self.font.size(' ')[1],]))
self.row_height = kwargs.pop('row_height', max([a.height for a in self.rows] + [self.font.get_linesize(),]))
self.inner_width = kwargs.pop('inner_width', 500)
self.scrolling = kwargs.get('scrolling', True)
self.hscrolling = kwargs.get('hscrolling', True)
self.scrollRow = scrollRow = ScrollRow((self.inner_width, self.row_height), 10, draw_zebra=self.draw_zebra, spacing=0,
scrolling=self.scrolling, hscrolling=self.hscrolling)
self.selected = None
Column.__init__(self, [scrollRow,], **kwargs)
self.shrink_wrap()
def draw_tree_cell(self, surf, i, data, cell_rect, column):
"""..."""
if self.align.lower() == 'r':
cell_rect.right = self.right - self.margin
if self.scrollRow.can_scroll_up() or self.scrollRow.can_scroll_down():
cell_rect.right -= self.scrollRow.scroll_button_size
elif self.align.lower() == 'c':
cell_rect.left = self.centerx - (cell_rect.width / 2)
if type(data) in (str, unicode):
self.draw_text_cell(surf, i, data, cell_rect, self.align, self.font)
else:
self.draw_image_cell(surf, i, data, cell_rect, column)
def draw_image_cell(self, surf, i, data, cell_rect, column):
"""..."""
blit_in_rect(surf, data, cell_rect, self.align, self.margin)
def draw_text_cell(self, surf, i, data, cell_rect, align, font):
buf = font.render(unicode(data), True, self.fg_color)
blit_in_rect(surf, buf, cell_rect, align)
def num_rows(self):
return len(self.rows)
def row_data(self, row):
return self.rows[row]
def column_info(self, row_data):
m = self.column_margin
d = 2 * m
x = 0
width = 0
subs = row_data.subwidgets
for i in range(len(subs)):
sub = subs[i]
width += sub.width
surf = Surface((sub.width, sub.height), SRCALPHA)
sub.draw_all(surf)
yield i, x + m, sub.width - d, i, surf
x += width
def click_item(self, n, e):
if n < len(self.rows):
for sub in self.rows[n].subwidgets:
if sub:
x = e.local[0] - self.margin - self.rows[n].rect.left - self.rows[n].margin - self.scrollRow.cell_rect(n, 0).left - sub.rect.left
y = e.local[1] - self.margin - self.rows[n].rect.top - self.rows[n].margin - self.scrollRow.cell_rect(n, 0).top - sub.rect.top
if sub.left <= x <= sub.right:
_e = event.Event(e.type, {'alt': e.alt, 'meta': e.meta, 'ctrl': e.ctrl, 'shift': e.shift, 'button': e.button, 'cmd': e.cmd, 'num_clicks': e.num_clicks,
'local': (x, y), 'pos': e.local})
self.focus_on(sub)
if self.selected:
self.selected.is_modal = False
sub.is_modal = True
sub.mouse_down(_e)
self.selected = sub
break
|
|
# coding=utf-8
"""
Copyright 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import naarad.utils
import logging
import os
from naarad.metrics.metric import Metric
logger = logging.getLogger('naarad.metrics.INNOMetric')
class INNOMetric(Metric):
C_MAX_COMMANDS = 10
graph_lib = None
def __init__(self, metric_type, infile, hostname, aggr_metrics, outdir, resource_path, label, ts_start, ts_end, rule_strings, important_sub_metrics,
anomaly_detection_metrics, **other_options):
Metric.__init__(self, metric_type, infile, hostname, aggr_metrics, outdir, resource_path, label, ts_start, ts_end, rule_strings, important_sub_metrics,
anomaly_detection_metrics)
for (key, val) in other_options.iteritems():
setattr(self, key, val.split())
def get_csv_C(self, command, column):
outcsv = os.path.join(self.resource_directory, "{0}.{1}.{2}.csv".format(self.metric_type, command, column))
self.csv_column_map[outcsv] = command + '.' + column
return outcsv
def parse(self):
logger.info("Working on innotop metric: %s", self.infile)
if self.metric_type == "INNOTOP-C":
return self.parse_innotop_mode_c()
elif self.metric_type == "INNOTOP-M":
return self.parse_innotop_mode_m()
else:
return self.parse_innotop_mode_b()
def parse_innotop_mode_c(self):
with open(self.infile, 'r') as infh:
headerline = infh.readline()
columns = headerline.split()[2:]
outfilehandlers = {}
for line in infh:
l = line.strip().split(' ', 1)
if len(l) <= 1:
continue
ts = l[0].strip().replace('T', ' ')
try:
nameval = l[1].strip().split('\t', 1)
except IndexError:
logger.warn("Badly formatted line: %s", line)
logger.warn("Expected tab separated values")
continue
command = nameval[0]
if command not in outfilehandlers:
# Only looking at top N commands
if len(outfilehandlers) > self.C_MAX_COMMANDS:
continue
# TODO(rmaheshw) : Use collections.defaultdict instead to avoid initializing dicts
outfilehandlers[command] = {}
words = nameval[1].split('\t')
for i in range(len(words)):
if self.options and columns[i] not in self.options:
continue
if columns[i] not in outfilehandlers[command]:
outfilehandlers[command][columns[i]] = open(self.get_csv_C(command, columns[i]), 'w')
self.csv_files.append(self.get_csv_C(command, columns[i]))
ts = naarad.utils.reconcile_timezones(ts, self.timezone, self.graph_timezone)
outfilehandlers[command][columns[i]].write(ts + ',')
outfilehandlers[command][columns[i]].write(words[i])
outfilehandlers[command][columns[i]].write('\n')
for command in outfilehandlers:
for column in outfilehandlers[command]:
outfilehandlers[command][column].close()
return True
def parse_innotop_mode_b(self):
""" Generic parsing method for all other modes """
with open(self.infile, 'r') as infh:
# Pre processing to figure out different headers
max_row_quot = 0
valrow = -1
thisrowcolumns = {}
data = {}
while True:
line1 = infh.readline()
words = line1.split()
# special case for -I (iostat) option
# skipping all the 'thread' lines
if words[1] == "thread" and self.metric_type == "INNOTOP-I":
while True:
line1 = infh.readline()
words = line1.split()
if naarad.utils.is_number(words[1]):
line1 = infh.readline()
else:
break
if words[1] == "thread" and self.metric_type == "INNOTOP-R":
break
# Skip next line
infh.readline()
last_ts = words[0].strip().replace('T', ' ')
if not naarad.utils.is_number(words[1]):
thisrowcolumns[max_row_quot] = words[1:]
for column in words[1:]:
if self.options and column not in self.options:
continue
data[column] = []
if self.metric_type == "INNOTOP-I":
data["check_pt_age"] = []
max_row_quot += 1
else:
break
# infh.seek(0)
# Real Processing
for line in infh:
l = line.strip().split(' ', 1)
if len(l) <= 1:
continue
ts = l[0].strip().replace('T', ' ')
if not ts == last_ts:
last_ts = ts
valrow = -1
try:
words = l[1].strip().split('\t')
except IndexError:
logger.warn("Bad line: %s", line)
continue
# special case for -I (iostat) option
# skipping all the 'thread' lines
if words[0] == "thread" or (naarad.utils.is_number(words[0]) and "thread" in words[1]):
continue
if naarad.utils.is_number(words[0]):
valrow += 1
quot = valrow % max_row_quot
# Special case for -R, skipping all 'thread' value lines
if quot >= len(thisrowcolumns):
continue
columns = thisrowcolumns[quot]
if len(words) > len(columns):
continue
for i in range(len(words)):
if self.options and columns[i] not in self.options:
continue
column = columns[i]
# Converting -- to 0, seen this for buf_pool_hit_rate
if words[i] == "--":
words[i] = "0"
ts = naarad.utils.reconcile_timezones(ts, self.timezone, self.graph_timezone)
# Calculating check point age
if self.metric_type == "INNOTOP-I":
if column == "log_seq_no":
log_seq_no = int(words[i])
elif column == "log_flushed_to":
check_pt_age = log_seq_no - int(words[i])
tup = [ts, str(check_pt_age)]
data["check_pt_age"].append(tup)
tup = [ts, words[i]]
data[column].append(tup)
# Post Proc, writing the different out files
for column in data:
csvfile = self.get_csv(column)
self.csv_files.append(csvfile)
with open(csvfile, 'w') as outfh:
for tup in data[column]:
outfh.write(','.join(tup))
outfh.write('\n')
return True
def parse_innotop_mode_m(self):
""" Special parsing method for Innotop "Replication Status" results (innotop --mode M)"""
with open(self.infile, 'r') as infh:
# Pre processing to figure out different headers
max_row_quot = 0
valrow = -1
thisrowcolumns = {}
data = {}
last_ts = None
while True:
# 2012-05-11T00:00:02 master_host slave_sql_running time_behind_master slave_catchup_rate slave_open_temp_tables relay_log_pos last_error
line1 = infh.readline()
words = line1.split()
# Skip next line
infh.readline()
is_header = True
for word in words:
if naarad.utils.is_number(word):
last_ts = words[0].strip().replace('T', ' ')
is_header = False
break # from this loop
if len(words) > 2 and is_header:
thisrowcolumns[max_row_quot] = words[2:]
for column in thisrowcolumns[max_row_quot]:
data[column] = []
max_row_quot += 1
else:
break
# from pre-processing. All headers accounted for
# Real Processing
if not last_ts:
logger.warn("last_ts not set, looks like there is no data in file %s", self.infile)
return True
infh.seek(0)
is_bad_line = False
outfilehandlers = {}
for line in infh:
l = line.strip().split(' ', 1)
# Blank line
if len(l) <= 1:
continue
ts = l[0].strip().replace('T', ' ')
if ts != last_ts:
last_ts = ts
valrow = -1
nameval = l[1].strip().split('\t', 1)
try:
words = nameval[1].split('\t')
except IndexError:
logger.warn("Bad line: %s", line)
continue
valrow += 1
command = nameval[0]
if command not in outfilehandlers:
outfilehandlers[command] = {}
quot = valrow % max_row_quot
columns = thisrowcolumns[quot]
for i in range(len(words)):
if len(words) > len(columns):
logger.warn("Mismatched number of columns: %s", line)
logger.warn("%d %d", len(words), len(columns))
break
if words[i] in columns:
logger.warn("Skipping line: %s", line)
valrow -= 1
break
if self.options and columns[i] not in self.options:
continue
if columns[i] not in outfilehandlers[command]:
outfilehandlers[command][columns[i]] = open(self.get_csv_C(command, columns[i]), 'w')
self.csv_files.append(self.get_csv_C(command, columns[i]))
ts = naarad.utils.reconcile_timezones(ts, self.timezone, self.graph_timezone)
outfilehandlers[command][columns[i]].write(ts + ',')
outfilehandlers[command][columns[i]].write(words[i])
outfilehandlers[command][columns[i]].write('\n')
for command in outfilehandlers:
for column in outfilehandlers[command]:
outfilehandlers[command][column].close()
return True
|
|
#! /usr/bin/env python
import os
import sys
sys.path.append(os.path.abspath('../../'))
from src.util.FileSystem import FileSystem
from src.util.MLClass import MLClass
import MySQLdb as mdb
import logging
from operator import itemgetter
import warnings
import cPickle as pk
# The assumption is that the table that is being written to is cleared!
tardir = os.path.join(FileSystem.getDataDir(), 'ast')
logdir = os.path.join(FileSystem.getLogDir(), 'populatedb')
USESKIPLIST = True
#problemList = [(1,1),(1,2),(1,3),(2,6),(4,4)]
maxEntries = 300
MAXQUEUESIZE = 100
dbread = {}
dbread['Server'] = 'evariste'
dbread['User'] = 'codewebdb'
dbread['Pwd'] = 'n3gr0n1'
dbread['Name'] = 'codewebdb'
dbread['TableName'] = 'original_submissions'
dbwrite = {}
dbwrite['Server'] = 'evariste'
dbwrite['User'] = 'codewebdb'
dbwrite['Pwd'] = 'n3gr0n1'
dbwrite['Name'] = 'codewebdb'
dbwrite['TableName'] = 'octave'
class MultiInserter(object):
def __init__(self,db,maxQueueSize):
self.queue = []
print('Opening database: ' + db['Name'] + '.')
self.con = mdb.connect(db['Server'],db['User'],db['Pwd'],db['Name'])
self.cur = self.con.cursor()
self.db = db
self.maxQueueSize = maxQueueSize
def __del__(self):
print('Flushing...')
self.flush()
print('Closing database: ' + self.db['Name'] + '.')
if self.con:
self.con.close()
self.cur.close()
def add(self, dbentry):
dbentryTuple = (dbentry['hw_id'],dbentry['part_id'],dbentry['ast_id'],dbentry['codestr'],str(dbentry['idlist']),dbentry['jsonstr'],dbentry['mapstr'],dbentry['output'],dbentry['correct'])
self.queue.append(dbentryTuple)
if len(self.queue) == self.maxQueueSize:
self.flush()
def flush(self):
with warnings.catch_warnings():
warnings.simplefilter('error', mdb.Warning)
try:
self.cur.executemany("""INSERT INTO """ + self.db['TableName'] + """ (homework_id,part_id,ast_id,code,coursera_submission_ids,json,map,output,correct) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)""",self.queue)
self.con.commit()
except mdb.Error, e:
raise e
self.queue = []
def opendb(db):
print('Opening database: ' + db['Name'] + '.')
con = mdb.connect(db['Server'],db['User'],db['Pwd'],db['Name'])
cur = con.cursor()
db['connection'] = (con,cur)
def closedb(db):
print('Closing database: ' + db['Name'] + '.')
(con,cur) = db['connection']
if con:
con.close()
cur.close()
def grabOutput(db, submissionids):
(con,cur) = db['connection']
corrects = [0, 0]
outputs = {}
for subid in submissionids:
cur.execute("SELECT output, raw_score FROM " + db['TableName'] + " WHERE id = %s", (subid,))
r = cur.fetchone()
try:
outputs[r[0]] += 1
except KeyError:
outputs[r[0]] = 1
corrects[int(int(r[1])>0)] += 1
correct = int(corrects[0] < corrects[1])
output = max(outputs.iteritems(), key = itemgetter(1))[0]
count = correct
return output, correct, count
def printEntry(dbentry):
print('Homework id: ' + str(dbentry['hw_id']))
print('Part id: ' + str(dbentry['part_id']))
print('Correct: ' + str(dbentry['correct']))
print('Number of submissions: ' + str(len(dbentry['idlist'])))
def loadSubmissionsFile(fname):
submissionids = []
fid = open(fname)
rows = fid.readlines()
for r in rows[2:]:
tmp = r.split(':')
astindex = int(tmp[0])
numsubmissions = int(tmp[1])
idlist = [int(x) for x in tmp[2].split(',')[:-1]]
submissionids.append(idlist)
fid.close()
return submissionids
def loadTextFile(fname):
return open(fname).read()
def loadSkipList(fname):
try:
fid = open(fname,'r')
except IOError:
fid = open(fname,'wt')
fid.write('')
fid.close()
return []
rows = fid.readlines()
fid.close()
pList = []
for r in rows:
pList.append(tuple([int(x) for x in r.rstrip(' \n').split()]))
return pList
def logHwPart(hw_id,part_id,fname):
fid = open(fname,'a')
fid.write(str(hw_id) + ' ' + str(part_id) + '\n')
fid.close()
def report(dbentry,ast_id,numUniqueAST):
rpt = '\n+------------------------------------------------\n' \
+ 'Homework: ' + str(dbentry['hw_id']) + ', Part: ' + str(dbentry['part_id']) + '\n' \
+ 'On AST #' + str(ast_id) + ' of ' + str(numUniqueAST) + '\n' \
+ 'Number of matching submissions: ' + str(len(dbentry['idlist'])) + '\n'
return rpt
def run(writeToTestDB):
logfilename = os.path.join(logdir,'log')
logging.basicConfig(filename = logfilename, format='%(asctime)s %(message)s',\
datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG)
coarselogfilename = os.path.join(logdir,'coarselog')
skipList = loadSkipList(coarselogfilename)
#set up problems and allfiles
allfiles = []
for (h,p) in MLClass.allProblems():
tarfilepath = 'ast_' + str(h) + '_' + str(p) + '.tar.gz'
allfiles.append(os.path.join(tardir, tarfilepath))
# open database connections
Inserter = MultiInserter(dbwrite,MAXQUEUESIZE)
opendb(dbread)
# iterate through files
for tarfile, prob in zip(allfiles,MLClass.allProblems()):
# filter out problems that we don't want to expand
(hw_id,part_id) = [int(x) for x in prob]
if USESKIPLIST and (hw_id,part_id) in skipList:
continue
print('Untarring Homework: ' + str(hw_id) + ', Problem: ' + str(part_id))
dirname = tarfile[:(-7)]
if not os.path.isdir(os.path.join(tardir,dirname)):
os.system('tar -xzf ' + tarfile + ' -C ' + tardir)
submissionsfile = os.path.join(dirname, 'submissionids.dat')
submissionIDs = loadSubmissionsFile(submissionsfile)
# iterate through each ast id
for idlist,ast_id in zip(submissionIDs,range(len(submissionIDs))):
if ast_id % 100 == 0:
print(str(ast_id) + ' of ' + str(len(submissionIDs)))
if writeToTestDB == True and ast_id >= maxEntries:
break
# load json, map and code files
fname_prefix = os.path.join(dirname,'ast_' + str(ast_id))
fname_json = fname_prefix + '.json'
fname_map = fname_prefix + '.map'
fname_code = fname_prefix + '.code'
# output and correct (grab this from other database)
dbentry = {}
dbentry['output'],dbentry['correct'],count = grabOutput(dbread, idlist)
dbentry['jsonstr'] = loadTextFile(fname_json)
dbentry['mapstr'] = loadTextFile(fname_map)
dbentry['codestr'] = loadTextFile(fname_code)
dbentry['hw_id'] = hw_id
dbentry['part_id'] = part_id
dbentry['idlist'] = idlist
dbentry['ast_id'] = ast_id
# write to db and log entry
Inserter.add(dbentry)
if ast_id % 20 == 0:
logging.debug(report(dbentry,ast_id,len(submissionIDs)))
# delete the folder
# os.system('rm -rf ' + dirname)
logHwPart(hw_id,part_id,coarselogfilename)
# close database connections
closedb(dbread)
if len(sys.argv) == 2:
argstr = sys.argv[1]
if len(sys.argv) != 2 or (argstr != '-test' and argstr != '-full'):
print('Usage: python populateDB.py [-test, -full]')
sys.exit(1)
print(argstr)
testoption = False
if argstr == '-test':
testoption = True
dbwrite['Name'] = 'codewebdb_test'
run(testoption)
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import time
import calendar
import traceback
import six
import requests
from st2client import models
from st2client.config_parser import CLIConfigParser
from st2client.config_parser import ST2_CONFIG_DIRECTORY
from st2client.config_parser import ST2_CONFIG_PATH
from st2client.client import Client
from st2client.config import get_config
from st2client.utils.date import parse as parse_isotime
from st2client.utils.misc import merge_dicts
__all__ = [
'BaseCLIApp'
]
# How many seconds before the token actual expiration date we should consider the token as
# expired. This is used to prevent the operation from failing durig the API request because the
# token was just about to expire.
TOKEN_EXPIRATION_GRACE_PERIOD_SECONDS = 15
CONFIG_OPTION_TO_CLIENT_KWARGS_MAP = {
'base_url': ['general', 'base_url'],
'auth_url': ['auth', 'url'],
'api_url': ['api', 'url'],
'api_version': ['general', 'api_version'],
'api_key': ['credentials', 'api_key'],
'cacert': ['general', 'cacert'],
'debug': ['cli', 'debug']
}
class BaseCLIApp(object):
"""
Base class for StackStorm CLI apps.
"""
LOG = None # logger instance to use
client = None # st2client instance
# A list of command classes for which automatic authentication should be skipped.
SKIP_AUTH_CLASSES = []
def get_client(self, args, debug=False):
ST2_CLI_SKIP_CONFIG = os.environ.get('ST2_CLI_SKIP_CONFIG', 0)
ST2_CLI_SKIP_CONFIG = int(ST2_CLI_SKIP_CONFIG)
skip_config = args.skip_config
skip_config = skip_config or ST2_CLI_SKIP_CONFIG
# Note: Options provided as the CLI argument have the highest precedence
# Precedence order: cli arguments > environment variables > rc file variables
cli_options = ['base_url', 'auth_url', 'api_url', 'api_version', 'cacert']
cli_options = {opt: getattr(args, opt, None) for opt in cli_options}
config_file_options = self._get_config_file_options(args=args)
kwargs = {}
if not skip_config:
# Config parsing is not skipped
kwargs = merge_dicts(kwargs, config_file_options)
kwargs = merge_dicts(kwargs, cli_options)
kwargs['debug'] = debug
client = Client(**kwargs)
if skip_config:
# Config parsing is skipped
self.LOG.info('Skipping parsing CLI config')
return client
# Ok to use config at this point
rc_config = get_config()
# Silence SSL warnings
silence_ssl_warnings = rc_config.get('general', {}).get('silence_ssl_warnings', False)
if silence_ssl_warnings:
requests.packages.urllib3.disable_warnings()
# We skip automatic authentication for some commands such as auth
try:
command_class_name = args.func.im_class.__name__
except Exception:
command_class_name = None
if command_class_name in self.SKIP_AUTH_CLASSES:
return client
# We also skip automatic authentication if token is provided via the environment variable
# or as a command line argument
env_var_token = os.environ.get('ST2_AUTH_TOKEN', None)
cli_argument_token = getattr(args, 'token', None)
env_var_api_key = os.environ.get('ST2_API_KEY', None)
cli_argument_api_key = getattr(args, 'api_key', None)
if env_var_token or cli_argument_token or env_var_api_key or cli_argument_api_key:
return client
# If credentials are provided in the CLI config use them and try to authenticate
credentials = rc_config.get('credentials', {})
username = credentials.get('username', None)
password = credentials.get('password', None)
cache_token = rc_config.get('cli', {}).get('cache_token', False)
if username and password:
# Credentials are provided, try to authenticate agaist the API
try:
token = self._get_auth_token(client=client, username=username, password=password,
cache_token=cache_token)
except requests.exceptions.ConnectionError as e:
self.LOG.warn('Auth API server is not available, skipping authentication.')
self.LOG.exception(e)
return client
except Exception as e:
print('Failed to authenticate with credentials provided in the config.')
raise e
client.token = token
# TODO: Hack, refactor when splitting out the client
os.environ['ST2_AUTH_TOKEN'] = token
return client
def _get_config_file_options(self, args):
"""
Parse the config and return kwargs which can be passed to the Client
constructor.
:rtype: ``dict``
"""
rc_options = self._parse_config_file(args=args)
result = {}
for kwarg_name, (section, option) in six.iteritems(CONFIG_OPTION_TO_CLIENT_KWARGS_MAP):
result[kwarg_name] = rc_options.get(section, {}).get(option, None)
return result
def _parse_config_file(self, args):
config_file_path = self._get_config_file_path(args=args)
parser = CLIConfigParser(config_file_path=config_file_path, validate_config_exists=False)
result = parser.parse()
return result
def _get_config_file_path(self, args):
"""
Retrieve path to the CLI configuration file.
:rtype: ``str``
"""
path = os.environ.get('ST2_CONFIG_FILE', ST2_CONFIG_PATH)
if args.config_file:
path = args.config_file
path = os.path.abspath(path)
if path != ST2_CONFIG_PATH and not os.path.isfile(path):
raise ValueError('Config "%s" not found' % (path))
return path
def _get_auth_token(self, client, username, password, cache_token):
"""
Retrieve a valid auth token.
If caching is enabled, we will first try to retrieve cached token from a
file system. If cached token is expired or not available, we will try to
authenticate using the provided credentials and retrieve a new auth
token.
:rtype: ``str``
"""
if cache_token:
token = self._get_cached_auth_token(client=client, username=username,
password=password)
else:
token = None
if not token:
# Token is either expired or not available
token_obj = self._authenticate_and_retrieve_auth_token(client=client,
username=username,
password=password)
self._cache_auth_token(token_obj=token_obj)
token = token_obj.token
return token
def _get_cached_auth_token(self, client, username, password):
"""
Retrieve cached auth token from the file in the config directory.
:rtype: ``str``
"""
if not os.path.isdir(ST2_CONFIG_DIRECTORY):
os.makedirs(ST2_CONFIG_DIRECTORY)
cached_token_path = self._get_cached_token_path_for_user(username=username)
if not os.access(ST2_CONFIG_DIRECTORY, os.R_OK):
# We don't have read access to the file with a cached token
message = ('Unable to retrieve cached token from "%s" (user %s doesn\'t have read '
'access to the parent directory). Subsequent requests won\'t use a '
'cached token meaning they may be slower.' % (cached_token_path,
os.getlogin()))
self.LOG.warn(message)
return None
if not os.path.isfile(cached_token_path):
return None
if not os.access(cached_token_path, os.R_OK):
# We don't have read access to the file with a cached token
message = ('Unable to retrieve cached token from "%s" (user %s doesn\'t have read '
'access to this file). Subsequent requests won\'t use a cached token '
'meaning they may be slower.' % (cached_token_path, os.getlogin()))
self.LOG.warn(message)
return None
# Safety check for too permissive permissions
file_st_mode = oct(os.stat(cached_token_path).st_mode & 0777)
others_st_mode = int(file_st_mode[-1])
if others_st_mode >= 4:
# Every user has access to this file which is dangerous
message = ('Permissions (%s) for cached token file "%s" are to permissive. Please '
'restrict the permissions and make sure only your own user can read '
'from the file' % (file_st_mode, cached_token_path))
self.LOG.warn(message)
with open(cached_token_path) as fp:
data = fp.read()
try:
data = json.loads(data)
token = data['token']
expire_timestamp = data['expire_timestamp']
except Exception as e:
msg = ('File "%s" with cached token is corrupted or invalid (%s). Please delete '
' this file' % (cached_token_path, str(e)))
raise ValueError(msg)
now = int(time.time())
if (expire_timestamp - TOKEN_EXPIRATION_GRACE_PERIOD_SECONDS) < now:
self.LOG.debug('Cached token from file "%s" has expired' % (cached_token_path))
# Token has expired
return None
self.LOG.debug('Using cached token from file "%s"' % (cached_token_path))
return token
def _cache_auth_token(self, token_obj):
"""
Cache auth token in the config directory.
:param token_obj: Token object.
:type token_obj: ``object``
"""
if not os.path.isdir(ST2_CONFIG_DIRECTORY):
os.makedirs(ST2_CONFIG_DIRECTORY)
username = token_obj.user
cached_token_path = self._get_cached_token_path_for_user(username=username)
if not os.access(ST2_CONFIG_DIRECTORY, os.W_OK):
# We don't have write access to the file with a cached token
message = ('Unable to write token to "%s" (user %s doesn\'t have write '
'access to the parent directory). Subsequent requests won\'t use a '
'cached token meaning they may be slower.' % (cached_token_path,
os.getlogin()))
self.LOG.warn(message)
return None
if os.path.isfile(cached_token_path) and not os.access(cached_token_path, os.W_OK):
# We don't have write access to the file with a cached token
message = ('Unable to write token to "%s" (user %s doesn\'t have write '
'access to this file). Subsequent requests won\'t use a '
'cached token meaning they may be slower.' % (cached_token_path,
os.getlogin()))
self.LOG.warn(message)
return None
token = token_obj.token
expire_timestamp = parse_isotime(token_obj.expiry)
expire_timestamp = calendar.timegm(expire_timestamp.timetuple())
data = {}
data['token'] = token
data['expire_timestamp'] = expire_timestamp
data = json.dumps(data)
# Note: We explictly use fdopen instead of open + chmod to avoid a security issue.
# open + chmod are two operations which means that during a short time frame (between
# open and chmod) when file can potentially be read by other users if the default
# permissions used during create allow that.
fd = os.open(cached_token_path, os.O_WRONLY | os.O_CREAT, 0600)
with os.fdopen(fd, 'w') as fp:
fp.write(data)
self.LOG.debug('Token has been cached in "%s"' % (cached_token_path))
return True
def _authenticate_and_retrieve_auth_token(self, client, username, password):
manager = models.ResourceManager(models.Token, client.endpoints['auth'],
cacert=client.cacert, debug=client.debug)
instance = models.Token()
instance = manager.create(instance, auth=(username, password))
return instance
def _get_cached_token_path_for_user(self, username):
"""
Retrieve cached token path for the provided username.
"""
file_name = 'token-%s' % (username)
result = os.path.abspath(os.path.join(ST2_CONFIG_DIRECTORY, file_name))
return result
def _print_config(self, args):
config = self._parse_config_file(args=args)
for section, options in six.iteritems(config):
print('[%s]' % (section))
for name, value in six.iteritems(options):
print('%s = %s' % (name, value))
def _print_debug_info(self, args):
# Print client settings
self._print_client_settings(args=args)
# Print exception traceback
traceback.print_exc()
def _print_client_settings(self, args):
client = self.client
if not client:
return
config_file_path = self._get_config_file_path(args=args)
print('CLI settings:')
print('----------------')
print('Config file path: %s' % (config_file_path))
print('Client settings:')
print('----------------')
print('ST2_BASE_URL: %s' % (client.endpoints['base']))
print('ST2_AUTH_URL: %s' % (client.endpoints['auth']))
print('ST2_API_URL: %s' % (client.endpoints['api']))
print('ST2_AUTH_TOKEN: %s' % (os.environ.get('ST2_AUTH_TOKEN')))
print('')
print('Proxy settings:')
print('---------------')
print('HTTP_PROXY: %s' % (os.environ.get('HTTP_PROXY', '')))
print('HTTPS_PROXY: %s' % (os.environ.get('HTTPS_PROXY', '')))
print('')
|
|
#!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module contains functions for handling requests in relation to security
groups
"""
from ec2stack import helpers, errors
from ec2stack.providers import cloudstack
from ec2stack.core import Ec2stackError
from ec2stack.providers.cloudstack import requester
@helpers.authentication_required
def authenticate_security_group_egress():
"""
Add egress rules to a security group.
@return: Response.
"""
rule_type = 'egress'
response = _authenticate_security_group_request(rule_type)
return _authenticate_security_group_response(response, rule_type)
def _authenticate_security_group_request(rule_type):
"""
Request to add an egress rule to a security group.
@param rule_type: The type of rule to add.
@return: Response.
"""
args = _parse_security_group_request()
if rule_type == 'egress':
args['command'] = 'authorizeSecurityGroupEgress'
elif rule_type == 'ingress':
args['command'] = 'authorizeSecurityGroupIngress'
response = requester.make_request_async(args)
return response
def _authenticate_security_group_response(response, rule_type):
"""
Generate a response for authenticate security group request.
@param response: Cloudstack response.
@param rule_type: The type of rule to add.
@raise Ec2stackError: If authorize security group fails.
@return: Response
"""
if 'errortext' in response:
if 'Failed to authorize security group' in response['errortext']:
cidrlist = str(helpers.get('CidrIp'))
protocol = str(helpers.get('IpProtocol'))
from_port = str(helpers.get('FromPort'))
to_port = str(helpers.get('toPort'))
raise Ec2stackError(
'400',
'InvalidPermission.Duplicate',
'the specified rule "peer: ' + cidrlist + ', ' + protocol +
', from port: ' + from_port + ', to port: ' + to_port +
', ALLOW" already exists'
)
elif 'Unable to find security group' in response['errortext']:
errors.invalid_security_group()
else:
errors.invalid_request(response['errortext'])
else:
if rule_type == 'ingress':
rule_type = 'AuthorizeSecurityGroupIngressResponse'
elif rule_type == 'egress':
rule_type = 'AuthorizeSecurityGroupEgressResponse'
return {
'template_name_or_list': 'status.xml',
'response_type': rule_type,
'return': 'true'
}
@helpers.authentication_required
def create_security_group():
"""
Create a security group.
@return: Response.
"""
helpers.require_parameters(['GroupName', 'GroupDescription'])
response = _create_security_group_request()
return _create_security_group_response(response)
def _create_security_group_request():
"""
Request to create a security group.
@return: response.
"""
args = {'command': 'createSecurityGroup', 'name': helpers.get('GroupName'),
'description': helpers.get('GroupDescription')}
response = requester.make_request(args)
response = response['createsecuritygroupresponse']
return response
def _create_security_group_response(response):
"""
Generate a response for create security group request.
@param response: Cloudstack response.
@return: Response.
"""
if 'errortext' in response:
errors.duplicate_security_group()
else:
response = response['securitygroup']
return {
'template_name_or_list': 'create_security_group.xml',
'response_type': 'CreateSecurityGroupResponse',
'id': response['id'],
'return': 'true'
}
@helpers.authentication_required
def delete_security_group():
"""
Deletes a specified security group.
@return: Response.
"""
_delete_security_group_request()
return _delete_security_group_response()
def _delete_security_group_request():
"""
Request to delete a security group.
@return: Response.
"""
args = {}
helpers.require_atleast_one_parameter(['GroupName', 'GroupId'])
if helpers.contains_parameter('GroupName'):
args['name'] = helpers.get('GroupName')
elif helpers.contains_parameter('GroupId'):
args['id'] = helpers.get('GroupId')
args['command'] = 'deleteSecurityGroup'
response = requester.make_request(args)
return response
def _delete_security_group_response():
"""
Generate a response for delete security group request.
@return: response
"""
return {
'template_name_or_list': 'status.xml',
'response_type': 'DeleteSecurityGroupResponse',
'return': 'true'
}
@helpers.authentication_required
def describe_security_groups():
"""
Describe one or more security groups.
@return: Response
"""
args = {'command': 'listSecurityGroups'}
response = cloudstack.describe_item(
args, 'securitygroup', errors.invalid_security_group, 'Group'
)
return _describe_security_groups_response(
response
)
def _describe_security_groups_response(response):
"""
Generates a response for describe security group request.
@param response: Cloudstack response.
@return: Response.
"""
return {
'template_name_or_list': 'securitygroups.xml',
'response_type': 'DescribeSecurityGroupsResponse',
'response': response
}
@helpers.authentication_required
def authenticate_security_group_ingress():
"""
Add one or more ingress rules to a security group.
@return: Response.
"""
rule_type = 'ingress'
response = _authenticate_security_group_request(rule_type)
return _authenticate_security_group_response(response, rule_type)
@helpers.authentication_required
def revoke_security_group_ingress():
"""
Removes one or more ingress rules from a security group.
@return: Response.
"""
rule_type = 'ingress'
_revoke_security_group_request(rule_type)
return _revoke_security_group_response(rule_type)
@helpers.authentication_required
def revoke_security_group_egress():
"""
Removes one or more egress rules from a security group.
@return: Response.
"""
rule_type = 'egress'
_revoke_security_group_request(rule_type)
return _revoke_security_group_response(rule_type)
def _revoke_security_group_request(rule_type):
"""
Request to remove rule from security group
@param rule_type: The type of rule to remove.
@return: Response.
"""
args = {}
rules = _parse_security_group_request()
if rule_type == 'ingress':
args['command'] = 'revokeSecurityGroupIngress'
args['id'] = _find_rule(rules, 'ingressrule')
elif rule_type == 'egress':
args['command'] = 'revokeSecurityGroupEgress'
args['id'] = _find_rule(rules, 'egressrule')
response = requester.make_request_async(args)
return response
def _revoke_security_group_response(rule_type):
"""
Generate a response for revoke security group requests.
@param rule_type: The type of rule
@return: Response.
"""
if rule_type == 'ingress':
rule_type = 'RevokeSecurityGroupIngressResponse'
elif rule_type == 'egress':
rule_type = 'RevokeSecurityGroupEgressResponse'
return {
'template_name_or_list': 'status.xml',
'response_type': rule_type,
'return': 'true'
}
def _find_rule(rule, rule_type):
"""
Searches a Cloudstack response for a rule and returns its Id.
@param rule: Rule to be found.
@param rule_type: Type of rule.
@return: Id of the rule.
"""
security_group = _get_security_group(rule)
if rule_type in security_group:
found_rules = security_group[rule_type]
for found_rule in found_rules:
if _compare_rules(rule, found_rule):
return found_rule['ruleid']
errors.invalid_permission()
def _compare_rules(left, right):
"""
Compares two rules to see if they are the same.
@param left: rule to be compared.
@param right: rule to compare with.
@return: Boolean
"""
protocol_match = str(left['protocol']) == str(right['protocol'])
cidr_match = str(left['cidrlist']) == str(right['cidr'])
if 'startport' in left and 'startport' in right:
startport_match = str(left['startport']) == str(right['startport'])
elif 'icmptype' in left and 'icmptype' in right:
startport_match = str(left['icmptype']) == str(right['icmptype'])
else:
startport_match = False
if 'endport' in left and 'endport' in right:
endport_match = str(left['endport']) == str(right['endport'])
elif 'icmpcode' in left and 'icmpcode' in right:
endport_match = str(left['icmpcode']) == str(right['icmpcode'])
else:
endport_match = False
return protocol_match and cidr_match and startport_match and endport_match
def _get_security_group(args):
"""
Get the security group with the specified name.
@param args: Arguments to pass to request.
@return: Response.
"""
args['command'] = 'listSecurityGroups'
response = cloudstack.describe_item_request(
args, 'securitygroup', errors.invalid_security_group
)
return response
def _parse_security_group_request(args=None):
"""
Parse the request parameters into a Cloudstack request payload.
@param args: Arguments to include in the request.
@return: Request payload.
"""
if args is None:
args = {}
helpers.require_atleast_one_parameter(['GroupName', 'GroupId'])
if helpers.contains_parameter('GroupName'):
args['securityGroupName'] = helpers.get('GroupName')
args['name'] = helpers.get('GroupName')
elif helpers.contains_parameter('GroupId'):
args['securityGroupId'] = helpers.get('GroupId')
args['id'] = helpers.get('GroupId')
helpers.require_parameters(['IpProtocol'])
args['protocol'] = helpers.get('IpProtocol')
helpers.require_parameters(['FromPort', 'ToPort'])
if args['protocol'] in ['icmp']:
args['icmptype'] = helpers.get('FromPort')
args['icmpcode'] = helpers.get('ToPort')
else:
args['startport'] = helpers.get('FromPort')
args['endport'] = helpers.get('ToPort')
if helpers.get('CidrIp') is None:
args['cidrlist'] = '0.0.0.0/0'
else:
args['cidrlist'] = helpers.get('CidrIp')
return args
|
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#======================================================================
#
# fzf_cd.py - Change Directory in Total Commander with fzf
#
# Created by skywind on 2020/10/31
# Last Modified: 2020/10/31 22:15:03
#
#======================================================================
from __future__ import print_function, unicode_literals
import sys
import time
import array
import struct
import os
import ctypes
#----------------------------------------------------------------------
# 2/3 compatible
#----------------------------------------------------------------------
if sys.version_info[0] >= 3:
unicode = str
xrange = range
long = int
#----------------------------------------------------------------------
# Win32API
#----------------------------------------------------------------------
class Win32API (object):
def __init__ (self):
import ctypes
self.kernel32 = ctypes.windll.LoadLibrary('kernel32.dll')
self.user32 = ctypes.windll.LoadLibrary('user32.dll')
self._query_interface()
self._guess_encoding()
self._setup_struct()
def _query_interface (self):
import ctypes
import ctypes.wintypes
self.ctypes = ctypes
self.wintypes = ctypes.wintypes
wintypes = ctypes.wintypes
HWND, LONG, BOOL = wintypes.HWND, wintypes.LONG, wintypes.BOOL
UINT, DWORD, c_int = wintypes.UINT, wintypes.DWORD, ctypes.c_int
WPARAM, LPARAM = wintypes.WPARAM, wintypes.LPARAM
self.WNDENUMPROC = ctypes.WINFUNCTYPE(
wintypes.BOOL,
wintypes.HWND, # _In_ hWnd
wintypes.LPARAM,) # _In_ lParam
self.user32.EnumThreadWindows.argtypes = (
wintypes.DWORD,
self.WNDENUMPROC,
wintypes.LPARAM)
self.user32.EnumThreadWindows.restype = wintypes.BOOL
self.user32.GetParent.argtypes = (wintypes.HWND,)
self.user32.GetParent.restype = wintypes.HWND
self.kernel32.GetConsoleWindow.argtypes = []
self.kernel32.GetConsoleWindow.restype = wintypes.HWND
self.user32.GetWindowLongA.argtypes = (HWND, ctypes.c_int)
self.user32.GetWindowLongA.restype = LONG
self.user32.SetWindowLongA.argtypes = (HWND, ctypes.c_int, LONG)
self.user32.SetWindowLongA.restype = LONG
self.kernel32.GetCurrentThreadId.argtypes = []
self.kernel32.GetCurrentThreadId.restype = wintypes.DWORD
self.user32.SendMessageA.argtypes = (HWND, UINT, WPARAM, LPARAM)
self.user32.SendMessageA.restype = wintypes.LONG
self.user32.SendMessageW.argtypes = (HWND, UINT, WPARAM, LPARAM)
self.user32.SendMessageW.restype = wintypes.LONG
self.user32.PostMessageA.argtypes = (HWND, UINT, WPARAM, LPARAM)
self.user32.PostMessageA.restype = wintypes.LONG
self.user32.PostMessageW.argtypes = (HWND, UINT, WPARAM, LPARAM)
self.user32.PostMessageW.restype = wintypes.LONG
self.user32.FindWindowA.argtypes = (ctypes.c_char_p, ctypes.c_char_p)
self.user32.FindWindowA.restype = HWND
self.user32.FindWindowW.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p)
self.user32.FindWindowW.restype = HWND
args = (HWND, HWND, c_int, c_int, c_int, c_int, UINT)
self.user32.SetWindowPos.argtypes = args
self.user32.SetWindowPos.restype = LONG
args = (HWND, wintypes.COLORREF, wintypes.BYTE, DWORD)
self.user32.SetLayeredWindowAttributes.argtypes = args
self.user32.SetLayeredWindowAttributes.restype = BOOL
self.user32.GetAsyncKeyState.argtypes = (c_int,)
self.user32.GetAsyncKeyState.restype = wintypes.SHORT
self.user32.GetActiveWindow.argtypes = []
self.user32.GetActiveWindow.restype = HWND
args = [ ctypes.c_char_p, ctypes.c_char_p, DWORD ]
self.kernel32.GetShortPathNameA.argtypes = args
self.kernel32.GetShortPathNameA.restype = DWORD
self.kernel32.GetLongPathNameA.argtypes = args
self.kernel32.GetLongPathNameA.restype = DWORD
args = [ ctypes.c_wchar_p, ctypes.c_wchar_p, DWORD ]
self.kernel32.GetShortPathNameW.argtypes = args
self.kernel32.GetShortPathNameW.restype = DWORD
self.kernel32.GetLongPathNameW.argtypes = args
self.kernel32.GetLongPathNameW.restype = DWORD
self.kernel32.GetFileAttributesA.argtypes = [ctypes.c_char_p]
self.kernel32.GetFileAttributesA.restype = DWORD
self.kernel32.GetFileAttributesW.argtypes = [ctypes.c_wchar_p]
self.kernel32.GetFileAttributesW.restype = DWORD
return 0
def _guess_encoding (self):
guess = []
try:
import locale
guess.append(locale.getpreferredencoding())
except:
pass
guess.append(sys.getdefaultencoding)
for fp in (sys.stdout, sys.stdin):
if fp and hasattr(fp, 'encoding'):
if fp.encoding:
guess.append(fp.encoding)
guess.append('utf-8')
self.encoding = guess[0]
return self.encoding
def _setup_struct (self):
import ctypes
self.cbuffer = ctypes.create_string_buffer(8192)
self.wbuffer = ctypes.create_unicode_buffer(8192)
return 0
def EnumThreadWindows (self, id, proc, lparam):
return self.user32.EnumThreadWindows(id, proc, lparam)
def GetWindowLong (self, hwnd, index):
return self.user32.GetWindowLongA(hwnd, index)
def SetWindowLong (self, hwnd, index, value):
return self.user32.SetWindowLongA(hwnd, index, value)
def GetCurrentThreadId (self):
return self.kernel32.GetCurrentThreadId()
def GetConsoleWindow (self):
return self.kernel32.GetConsoleWindow()
def GetParent (self, hwnd):
return self.user32.GetParent(hwnd)
def ConvertToParam (self, data):
if data is None:
return 0
if isinstance(data, int):
return data
if isinstance(data, array.array):
address, size = data.buffer_info()
return address
if isinstance(data, ctypes.Array):
return ctypes.addressof(data)
return data
def SendMessageA (self, hwnd, msg, wparam, lparam):
wparam = self.ConvertToParam(wparam)
lparam = self.ConvertToParam(lparam)
return self.user32.SendMessageA(hwnd, msg, wparam, lparam)
def SendMessageW (self, hwnd, msg, wparam, lparam):
wparam = self.ConvertToParam(wparam)
lparam = self.ConvertToParam(lparam)
return self.user32.SendMessageW(hwnd, msg, wparam, lparam)
def PostMessageA (self, hwnd, msg, wparam, lparam):
wparam = self.ConvertToParam(wparam)
lparam = self.ConvertToParam(lparam)
return self.user32.PostMessageA(hwnd, msg, wparam, lparam)
def PostMessageW (self, hwnd, msg, wparam, lparam):
wparam = self.ConvertToParam(wparam)
lparam = self.ConvertToParam(lparam)
return self.user32.PostMessageW(hwnd, msg, wparam, lparam)
def SetWindowPos (self, hwnd, after, x, y, cx, cy, flags):
return self.user32.SetWindowPos(hwnd, after, x, y, cx, cy, flags)
def SetLayeredWindowAttributes (self, hwnd, cc, alpha, flag):
return self.user32.SetLayeredWindowAttributes(hwnd, cc, alpha, flag)
def GetAsyncKeyState (self, keycode):
if isinstance(keycode, str):
keycode = keycode and ord(keycode[0]) or 0
return self.user32.GetAsyncKeyState(keycode)
def GetActiveWindow (self):
return self.user32.GetActiveWindow()
def ConvertToWide (self, text):
if text is None:
return None
if isinstance(text, bytes):
for enc in (self.encoding, 'utf-8', 'gbk'):
try:
p = text.decode(enc)
text = p
break
except:
pass
if isinstance(text, bytes):
text = text.decode('utf-8', 'ignore')
return text
def ConvertToAnsi (self, text):
if text is None:
return None
if isinstance(text, str):
for enc in (self.encoding, 'utf-8', 'gbk'):
try:
p = text.encode(enc)
text = p
break
except:
pass
if isinstance(text, str):
text = text.encode('utf-8', 'ignore')
return text
def GetShortPathNameA (self, path):
path = self.ConvertToAnsi(path)
hr = self.kernel32.GetShortPathNameA(path, self.cbuffer, 4097)
if hr <= 0:
return None
value = bytes(self.cbuffer[:hr])
x = self.ConvertToWide(value)
return x
def GetShortPathNameW (self, path):
path = self.ConvertToWide(path)
hr = self.kernel32.GetShortPathNameW(path, self.wbuffer, 4097)
if hr <= 0:
return None
value = str(self.wbuffer[:hr])
return value
def GetLongPathNameA (self, path):
path = self.ConvertToAnsi(path)
hr = self.kernel32.GetLongPathNameA(path, self.cbuffer, 4097)
if hr <= 0:
return None
value = bytes(self.cbuffer[:hr])
return self.ConvertToWide(value)
def GetLongPathNameW (self, path):
path = self.ConvertToWide(path)
hr = self.kernel32.GetLongPathNameW(path, self.wbuffer, 4097)
if hr <= 0:
return None
value = str(self.wbuffer[:hr])
return value
def FindWindowA (self, ClassName, WindowName):
ClassName = self.ConvertToAnsi(ClassName)
WindowName = self.ConvertToAnsi(WindowName)
return self.user32.FindWindowA(ClassName, WindowName)
def FindWindowW (self, ClassName, WindowName):
ClassName = self.ConvertToWide(ClassName)
WindowName = self.ConvertToWide(WindowName)
return self.user32.FindWindowW(ClassName, WindowName)
def GetFileAttributes (self, name):
if isinstance(name, bytes):
return self.kernel32.GetFileAttributesA(name)
return self.kernel32.GetFileAttributesW(name)
def CopyData (self, hwnd, msg, payload, source = None):
payload = self.ConvertToAnsi(payload)
data_size = 0
data_address = 0
data_ptr = None
if payload:
data_ptr = array.array('B', payload)
data_address, data_size = data_ptr.buffer_info()
copy_struct = struct.pack('PLP', msg, data_size, data_address)
p1 = array.array('B', copy_struct)
return self.SendMessageA(hwnd, 74, source, p1)
def GetRightPathCase (self, path):
path = self.GetShortPathNameW(path)
path = self.GetLongPathNameW(path)
if len(path) > 2:
if path[1] == ':' and path[0].isalpha():
path = path[0].upper() + path[1:]
return path
#----------------------------------------------------------------------
# Globals
#----------------------------------------------------------------------
DIRNAME = os.path.dirname(os.path.abspath(__file__))
SRCNAME = os.path.abspath(__file__)
#----------------------------------------------------------------------
# Configure
#----------------------------------------------------------------------
class Configure (object):
def __init__ (self):
self.dirname = DIRNAME
self.cmdhome = None
self._cache = {}
self._guess_encoding()
self._load_config()
self.cmdhome = self._search_home()
self.cmdconf = self._search_conf()
self.origin = {}
self.origin['path'] = os.environ.get('COMMANDER_PATH', '')
self.origin['ini'] = os.environ.get('COMMANDER_INI', '')
if self.cmdhome:
os.environ['COMMANDER_PATH'] = self.cmdhome
if self.cmdconf:
os.environ['COMMANDER_INI'] = self.cmdconf
self.ghisler = self._setup_dir()
self.database = os.path.join(self.ghisler, 'fzfmru.txt')
def _guess_encoding (self):
guess = []
try:
import locale
guess.append(locale.getpreferredencoding())
except:
pass
guess.append(sys.getdefaultencoding)
for fp in (sys.stdout, sys.stdin):
if fp and hasattr(fp, 'encoding'):
if fp.encoding:
guess.append(fp.encoding)
guess.append('utf-8')
self.encoding = guess[0]
return self.encoding
def replace_file (self, srcname, dstname):
import sys, os
if sys.platform[:3] != 'win':
try:
os.rename(srcname, dstname)
except OSError:
return False
else:
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
wp, vp, cp = ctypes.c_wchar_p, ctypes.c_void_p, ctypes.c_char_p
DWORD, BOOL = ctypes.wintypes.DWORD, ctypes.wintypes.BOOL
kernel32.ReplaceFileA.argtypes = [ cp, cp, cp, DWORD, vp, vp ]
kernel32.ReplaceFileW.argtypes = [ wp, wp, wp, DWORD, vp, vp ]
kernel32.ReplaceFileA.restype = BOOL
kernel32.ReplaceFileW.restype = BOOL
kernel32.GetLastError.argtypes = []
kernel32.GetLastError.restype = DWORD
success = False
try:
os.rename(srcname, dstname)
success = True
except OSError:
pass
if success:
return True
if sys.version_info[0] < 3 and isinstance(srcname, str):
hr = kernel32.ReplaceFileA(dstname, srcname, None, 2, None, None)
else:
hr = kernel32.ReplaceFileW(dstname, srcname, None, 2, None, None)
if not hr:
return False
return True
# load content
def load_file_content (self, filename, mode = 'r'):
if hasattr(filename, 'read'):
try: content = filename.read()
except: pass
return content
try:
fp = open(filename, mode)
content = fp.read()
fp.close()
except:
content = None
return content
# save content
def save_file_content (self, filename, content, mode = 'w'):
try:
fp = open(filename, mode)
fp.write(content)
fp.close()
except:
return False
return True
# load file and guess encoding
def load_file_text (self, filename, encoding = None):
content = self.load_file_content(filename, 'rb')
if content is None:
return None
if content[:3] == b'\xef\xbb\xbf':
text = content[3:].decode('utf-8')
elif encoding is not None:
text = content.decode(encoding, 'ignore')
else:
text = None
guess = [sys.getdefaultencoding(), 'utf-8']
if sys.stdout and sys.stdout.encoding:
guess.append(sys.stdout.encoding)
try:
import locale
guess.append(locale.getpreferredencoding())
except:
pass
visit = {}
for name in guess + ['gbk', 'ascii', 'latin1']:
if name in visit:
continue
visit[name] = 1
try:
text = content.decode(name)
break
except:
pass
if text is None:
text = content.decode('utf-8', 'ignore')
return text
# save file text
def save_file_text (self, filename, content, encoding = None):
import codecs
if encoding is None:
encoding = 'utf-8'
if (not isinstance(content, unicode)) and isinstance(content, bytes):
return self.save_file_content(filename, content)
with codecs.open(filename, 'w',
encoding = encoding,
errors = 'ignore') as fp:
fp.write(content)
return True
# load ini without ConfigParser
def load_ini (self, filename, encoding = None):
text = self.load_file_text(filename, encoding)
config = {}
sect = 'default'
if text is None:
return None
for line in text.split('\n'):
line = line.strip('\r\n\t ')
if not line:
continue
elif line[:1] in ('#', ';'):
continue
elif line.startswith('['):
if line.endswith(']'):
sect = line[1:-1].strip('\r\n\t ')
if sect not in config:
config[sect] = {}
else:
pos = line.find('=')
if pos >= 0:
key = line[:pos].rstrip('\r\n\t ')
val = line[pos + 1:].lstrip('\r\n\t ')
if sect not in config:
config[sect] = {}
config[sect][key] = val
return config
def read_ini (self, name, encoding = None):
obj = self.load_ini(name, encoding)
if not obj:
obj = {}
else:
newobj = {}
for sect in obj:
section = {}
for k, v in obj[sect].items():
section[k.lower()] = v
newobj[sect.lower()] = section
obj = newobj
return obj
# get ini file
def read_config (self, ininame, encoding):
ininame = os.path.abspath(ininame)
ininame = os.path.normcase(ininame)
if ininame in self._cache:
return self._cache[ininame]
if not os.path.exists(ininame):
return None
obj = self.read_ini(ininame, encoding)
self._cache[ininame] = obj
return obj
def reset (self):
self._cache = {}
return True
def _load_config (self):
name = os.path.abspath(__file__)
main = os.path.splitext(name)[0] + '.ini'
obj = self.read_ini(main)
if 'default' not in obj:
obj['default'] = {}
self.config = obj
return obj
def option (self, section, key, default = None):
section = section.lower()
if section not in self.config:
return default
sect = self.config[section]
key = key.lower()
if key not in sect:
return default
return sect[key]
def tmpname (self, filename, fill = 5):
import time, os, random
while 1:
name = '.' + str(int(time.time() * 1000000))
for i in range(fill):
k = random.randint(0, 51)
name += (k < 26) and chr(ord('A') + k) or chr(ord('a') + k - 26)
test = filename + name + str(os.getpid())
if not os.path.exists(test):
return test
return None
def save_atomic (self, filename, content):
if isinstance(content, list):
content = '\n'.join(content)
temp = self.tmpname(filename)
self.save_file_text(temp, content, 'utf-8')
return self.replace_file(temp, filename)
# find root
def find_root (self, path, markers = None, fallback = False):
if markers is None:
markers = ('.git', '.svn', '.hg', '.project', '.root')
if path is None:
path = os.getcwd()
path = os.path.abspath(path)
base = path
while True:
parent = os.path.normpath(os.path.join(base, '..'))
for marker in markers:
test = os.path.join(base, marker)
if os.path.exists(test):
return base
if os.path.normcase(parent) == os.path.normcase(base):
break
base = parent
if fallback:
return path
return None
def _check_home (self, home):
if not home:
return False
if not os.path.exists(home):
return False
if os.path.exists(os.path.join(home, 'totalcmd.exe')):
return True
if os.path.exists(os.path.join(home, 'totalcmd64.exe')):
return True
return False
def _search_home (self):
path = self.option('default', 'commander_path')
if path:
if self._check_home(path) and 1:
return path
if 'COMMANDER_PATH' in os.environ:
path = os.environ['COMMANDER_PATH']
if self._check_home(path):
return path
test = self.dirname
while 1:
if self._check_home(test):
return test
next = os.path.abspath(os.path.join(test, '..'))
if os.path.normcase(next) == os.path.normcase(test):
break
test = next
return None
def _search_conf (self):
if not self.cmdhome:
return None
path = self.option('default', 'commander_ini')
if path:
if os.path.exists(path):
return os.path.abspath(path)
if 'COMMANDER_INI' in os.environ:
path = os.environ['COMMANDER_INI']
if os.path.exists(path):
return path
path = os.path.join(self.cmdhome, 'wincmd.ini')
if os.path.exists(path):
config = self.read_ini(path, self.encoding)
section = config.get('configuration', {})
value = section.get('useiniinprogramdir', None)
if value and isinstance(value, str):
if value in ('t', 'true', 'y', 'yes', '1'):
return path
if value.isdigit():
try:
value = int(value, 0)
if value != 0:
return path
except:
pass
path = os.path.expandvars('%AppData%\Ghisler\wincmd.ini')
if os.path.exists(path):
return path
path = os.path.expandvars('%WinDir%\wincmd.ini')
if os.path.exists(path):
return path
return None
def _load_section (self, name, section):
config = self.read_config(name, self.encoding)
if config is None:
return None
obj = config.get(section, None)
if obj is None:
return None
if 'redirectsection' in obj:
redirect = obj.get('redirectsection', None)
if redirect:
path = os.path.expandvars(redirect)
if not os.path.exists(path):
return None
x = self._load_section(path, section)
return x
return obj
def load_history (self):
if not self.cmdconf:
return None
config = self.read_config(self.cmdconf, self.encoding)
if not config:
return None
if 'lefthistory' not in config:
if 'righthistory' not in config:
return None
history = [None, None]
fetch = [[], []]
fetch[0] = self._load_section(self.cmdconf, 'lefthistory')
fetch[1] = self._load_section(self.cmdconf, 'righthistory')
for i in range(2):
obj = fetch[i]
items = []
if obj is not None:
for ii in range(len(obj)):
key = str(ii)
if key in obj:
value = obj[key].strip()
path, _, _ = value.partition('#')
path = path.strip()
if path:
items.append(path)
history[i] = items
return history
def _setup_dir (self):
path = os.environ.get('USERPROFILE', '')
if 'AppData' in os.environ:
path = os.path.expandvars('%AppData%\Ghisler')
else:
if path:
path = os.path.join(path, 'AppData/Roaming/Ghisler')
else:
return None
if not os.path.exists(path):
os.makedirs(path)
return path
def mru_load (self, dbname):
content = self.load_file_text(dbname)
if content is None:
return []
lines = []
for line in content.split('\n'):
line = line.strip('\r\n\t ')
if line:
lines.append(line)
return lines
def mru_save (self, dbname, lines):
content = '\n'.join(lines)
self.save_atomic(dbname, content)
return 0
# which file
def which (self, name, prefix = None, postfix = None):
if not prefix:
prefix = []
if not postfix:
postfix = []
unix = sys.platform[:3] != 'win' and True or False
PATH = os.environ.get('PATH', '').split(unix and ':' or ';')
search = prefix + PATH + postfix
for path in search:
fullname = os.path.join(path, name)
if os.path.exists(fullname):
return fullname
return None
#----------------------------------------------------------------------
# TotalCommander
#----------------------------------------------------------------------
class TotalCommander (object):
def __init__ (self):
self.config = Configure()
self.win32 = Win32API()
self.hwnd = self.FindTC()
self.source = None
self.MSG_EM = ord('E') + ord('M') * 256
self.MSG_CD = ord('C') + ord('D') * 256
self.mode = None
self.exec = {}
self.ConfigFinder()
def FindTC (self):
return self.win32.FindWindowW('TTOTAL_CMD', None)
def CheckTC (self):
if self.config.cmdhome is None:
print('can not locate tc home, please set %COMMANDER_PATH%')
return -1
if self.config.cmdconf is None:
print('can not locate tc ini, please set %COMMANDER_INI%')
return -2
if not self.FindTC():
print('TC is not running')
return -3
return 0
def ConfigFinder (self):
mode = self.config.option('default', 'mode', '')
support = ('peco', 'gof', 'fzf')
for name in support:
path = self.config.option('default', name, None)
if path:
test = path.lower()
ends = False
for ext in ('.exe', '.cmd', '.bat', '.ps1'):
if test.endswith(ext):
ends = True
if not ends:
path = path + '.exe'
if not os.path.exists(path):
print('config error, not find executable: %s'%path)
continue
if not path:
test = os.path.join(DIRNAME, name + '.exe')
if os.path.exists(test):
path = test
if not path:
test = self.config.which(name + '.exe', [], [])
if test:
path = test
self.exec[name] = path
if mode:
mode = mode.strip().lower()
if mode:
if not mode in support:
print('config error, unsupported mode %s'%mode)
return False
if not self.exec[mode]:
print('config error, not find executable for %s'%mode)
return False
self.mode = mode
return True
for mode in support:
test = os.path.join(DIRNAME, mode + '.exe')
if os.path.exists(test):
self.exec[mode] = test
self.mode = mode
return True
for mode in support:
if self.exec[mode]:
self.mode = mode
return True
return False
def SendMessage (self, msg, text):
text = self.win32.ConvertToAnsi(text)
code = 0
if isinstance(msg, str):
for i, ch in enumerate(msg):
code += ord(ch) * (i << 8)
elif isinstance(msg, int):
code = msg
return self.win32.CopyData(self.hwnd, code, text, self.source)
def SendUserCommand (self, command):
return self.SendMessage(self.MSG_EM, command)
def SendChangeDirectory (self, first, second, flag):
params = []
if (not first) and (not second):
return -1
for param in (first, second, flag):
params.append(self.win32.ConvertToAnsi(param))
output = b''
first, second, flag = params
if first:
output = first
output += b'\r'
if second:
output += second
output += b'\x00'
if flag:
output += flag
return self.SendMessage(self.MSG_CD, output)
def ChangeDirectory (self, path):
return self.SendChangeDirectory(path, None, 'S')
def Filter (self, text):
for mark in ('.git', '.svn', '.ssh'):
if '/' + mark in text:
return False
if '\\' + mark in text:
return False
return True
def LoadHistory (self):
ini = self.config.load_history()
mru = self.config.mru_load(self.config.database)
history = []
for i in range(max(len(ini[0]), len(ini[1]))):
if i < len(ini[0]):
history.append(ini[0][i])
if i < len(ini[1]):
history.append(ini[1][i])
skips = {}
for n in mru:
history.append(n)
skips[os.path.normcase(n)] = 1
unique = []
exists = {}
for path in history:
path = os.path.abspath(path)
key = os.path.normcase(path)
if key not in exists:
if path.startswith('\\\\'):
continue
if len(path) < 2:
continue
elif not path[0].isalpha():
continue
elif path[1] != ':':
continue
if path[0].islower():
path = path[0].upper() + path[1:]
if len(path) == 3 and path.endswith('\\'):
continue
if not self.Filter(path):
continue
if not os.path.isdir(path):
continue
if os.path.exists(path):
if key not in skips:
path = self.win32.GetRightPathCase(path)
if not path:
continue
unique.append(path)
exists[key] = 1
return unique
def SaveHistory (self, history):
return self.config.mru_save(self.config.database, history)
def StartFuzzy (self, input, args = None, fzf = None):
import tempfile
code = 0
output = None
args = args is not None and args or ''
fzf = fzf is not None and fzf or 'fzf'
with tempfile.TemporaryDirectory(prefix = 'fzf.') as dirname:
outname = os.path.join(dirname, 'output.txt')
if isinstance(input, list):
inname = os.path.join(dirname, 'input.txt')
with open(inname, 'wb') as fp:
content = '\n'.join([ str(n) for n in input ])
fp.write(content.encode('utf-8'))
cmd = '%s %s < "%s" > "%s"'%(fzf, args, inname, outname)
elif isinstance(input, str):
cmd = '%s | %s %s > "%s"'%(input, fzf, args, outname)
code = os.system(cmd)
if os.path.exists(outname):
with open(outname, 'rb') as fp:
output = fp.read()
if output is not None:
output = output.decode('utf-8')
if code != 0:
return None
output = output.strip('\r\n')
return output
def CheckExe (self, exename):
cmd = 'where %s > nul 2> nul'%exename
code = os.system(cmd)
if code == 0:
return True
return False
def _SearchFZF (self, input):
args = '--reverse --height 95% --inline-info --border'
if not self.exec['fzf']:
print('not find fzf executable in %PATH%')
sys.exit(1)
return None
return self.StartFuzzy(input, args, self.exec['fzf'])
def _SearchPeco (self, input):
rc = os.path.join(self.config.ghisler, 'peco.json')
if not self.exec['peco']:
print('not find peco executable in %PATH%')
sys.exit(1)
return None
if not os.path.exists(rc) or True:
config = {}
config['Keymap'] = {}
config['Keymap']['C-j'] = 'peco.SelectDown'
config['Keymap']['C-k'] = 'peco.SelectUp'
config['Keymap']['C-['] = 'peco.Cancel'
import json
text = json.dumps(config)
self.config.save_file_text(rc, text)
args = '--rcfile "%s"'%rc
return self.StartFuzzy(input, args, self.exec['peco'])
def _SearchGof (self, input):
args = ''
if not self.exec['gof']:
print('not find gof executable in %PATH%')
sys.exit(1)
return None
return self.StartFuzzy(input, args, self.exec['gof'])
def FuzzySearch (self, input):
if self.mode == 'peco':
return self._SearchPeco(input)
elif self.mode == 'fzf':
return self._SearchFZF(input)
elif self.mode == 'gof':
return self._SearchGof(input)
print('invalid fuzzy searcher:', self.mode)
return None
#----------------------------------------------------------------------
# getopt: returns (options, args)
#----------------------------------------------------------------------
def getopt (argv):
args = []
options = {}
if argv is None:
argv = sys.argv[1:]
index = 0
count = len(argv)
while index < count:
arg = argv[index]
if arg != '':
head = arg[:1]
if head != '-':
break
if arg == '-':
break
name = arg.lstrip('-')
key, _, val = name.partition('=')
options[key.strip()] = val.strip()
index += 1
while index < count:
args.append(argv[index])
index += 1
return options, args
#----------------------------------------------------------------------
# list dirs
#----------------------------------------------------------------------
def list_directory (root):
import os
exclude = ('.git', '.svn', '.ssh')
win32 = Win32API()
fp = os.fdopen(1, 'wb')
for root, dirs, files in os.walk(root):
newdirs = []
for dir in dirs:
if dir in exclude:
continue
elif dir.startswith('.'):
continue
path = os.path.join(root, dir)
attr = win32.GetFileAttributes(path)
if attr & 6:
continue
newdirs.append(dir)
dirs[:] = newdirs
for dir in dirs:
path = os.path.join(root, dir)
if path.startswith('.\\'):
path = path[2:]
elif path.startswith('./'):
path = path[2:]
text = path.encode('utf-8')
fp.write(text + b'\n')
return 0
#----------------------------------------------------------------------
# main
#----------------------------------------------------------------------
def main (argv = None):
argv = argv and argv or sys.argv
argv = [n for n in argv]
opts, args = getopt(argv[1:])
mode = ''
if 'm' in opts:
mode = 'history'
elif 'f' in opts:
mode = 'forward'
elif 'b' in opts:
mode = 'backward'
elif 'p' in opts:
mode = 'project'
elif 'r' in opts:
mode = 'root'
elif 'l' in opts:
list_directory('.')
return 0
# print(argv, opts, args)
if not mode:
prog = os.path.split(__file__)[-1]
print('usage: python %s <operation>'%prog)
print('available operations:')
print(' -m cd from mru history')
print(' -f cd forward')
print(' -b cd backward')
print(' -p cd in project')
print(' -c back to project root')
print()
return 0
tc = TotalCommander()
hr = tc.CheckTC()
if hr != 0:
return 1
os.environ['PATH'] = os.environ.get('PATH', '') + ';' + tc.config.dirname
if mode == 'history':
print('Searching in history ...')
tc.SendUserCommand('cm_ConfigSaveDirHistory')
time.sleep(0.1)
tc.config.reset()
mru = tc.LoadHistory()
tc.SaveHistory(mru)
path = tc.FuzzySearch(mru)
if path:
tc.ChangeDirectory(path)
return 0
elif mode == 'forward':
cmd = sys.executable + ' "%s" -l '%SRCNAME
print('Searching in subdirectories ...')
path = tc.FuzzySearch(cmd)
if path:
path = os.path.join(os.getcwd(), path)
tc.ChangeDirectory(path)
return 0
elif mode == 'backward':
parents = []
pwd = os.getcwd()
while 1:
parents.append(pwd)
next = os.path.abspath(os.path.join(pwd, '..'))
if os.path.normcase(next) == os.path.normcase(pwd):
break
pwd = next
# print(parents)
size = len(str(len(parents))) + 1
inputs = []
for i, path in enumerate(parents):
name = os.path.split(path)[-1]
if name == '':
name = path
text = str(i + 1).rjust(size) + ': ' + name
inputs.append(text)
path = tc.FuzzySearch(inputs)
if path:
p1 = path.find(':')
if p1 > 0:
index = path[:p1].strip('\r\n\t ')
if index.isdigit():
target = parents[int(index) - 1]
tc.ChangeDirectory(target)
return 0
elif mode == 'root':
root = tc.config.find_root('.', None, True)
if os.path.exists(root):
tc.ChangeDirectory(root)
return 0
elif mode == 'project':
root = tc.config.find_root('.', None, True)
os.chdir(root)
cmd = sys.executable + ' "%s" -l '%SRCNAME
print('Searching in project ...')
path = tc.FuzzySearch(cmd)
if path:
path = os.path.join(os.getcwd(), path)
tc.ChangeDirectory(path)
return 0
return 0
#----------------------------------------------------------------------
# testing suit
#----------------------------------------------------------------------
if __name__ == '__main__':
def test1():
print(win32.GetActiveWindow())
print(win32.FindWindowW('TTOTAL_CMD', None))
return 0
def test2():
tc = TotalCommander()
hr = tc.SendUserCommand('em_calc')
hr = tc.SendUserCommand('cm_ConfigSaveDirHistory')
print(hr)
def test3():
tc = TotalCommander()
# hr = tc.SendChangeDirectory('d:/temp', None, None)
hr = tc.SendChangeDirectory('d:/acm', 'e:/Lab', 'S')
print(hr)
print(tc.config.cmdhome)
print(tc.config.cmdconf)
# print(tc.config.load_history())
# import pprint
# pprint.pprint(tc.LoadHistory())
# print(tc.win32.GetRightPathCase('d:\\program files'))
return 0
def test4():
os.chdir(os.path.expandvars('%USERPROFILE%'))
tc = TotalCommander()
tc.mode = 'gof'
hr = tc.FuzzySearch(['1234', '5678', ''])
# hr = tc.FuzzySearch('dir /A:d /b /s')
print(hr)
return 0
def test5():
os.chdir(os.path.expandvars('%USERPROFILE%'))
os.chdir('d:/acm/github/collection/vintage/apihook')
# args = ['', '-l']
# args = ['', '-f']
args = ['', '-b']
args = ['', '-p']
main(args)
return 0
def test6():
args = ['', '']
args = ['', '-m']
# args = ['', '-l']
# args = ['', '-r']
main(args)
return 0
def test7():
tc = TotalCommander()
return 0
# test6()
main()
|
|
# Copyright 2017 Norman Heckscher. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train MNIST using multiple GPU's with synchronous updates.
Accuracy:
Done training with 2 GPUs, for 20 epochs, 11000 steps.
Total Duration (327.396 sec)
2017-04-21 20:46:18.466392: precision = 9848.000
Done training with 1 GPUs, for 20 epochs, 22000 steps.
Total Duration (500.122 sec)
2017-04-21 20:56:40.639580: precision = 9884.000
Speed: With batch_size 50.
System | Step Time (sec/batch) | Accuracy
-------------------------------------------------------------------------
1 GTX 1080 | 258.136 sec | ~94.58% at 11K steps
2 GTX 1080 | 189.572 sec | ~94.59% at 11K steps
Usage:
Please see the TensorFlow website for how to download the MNIST
data set, compile and train models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import re
import time
import numpy as np
from datetime import datetime
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import sonnet as snt
# Constants used for dealing with the files, matches convert_to_records.
TRAIN_FILE = 'train.tfrecords'
VALIDATION_FILE = 'validation.tfrecords'
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
IMAGE_PIXELS = 784
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 20.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# Global constants describing the MNIST data set.
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 50,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/home/norman/MNIST_data',
"""Path to the MNIST data directory.""")
tf.app.flags.DEFINE_string('train_dir', '/home/norman/MNIST_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('num_gpus', 2,
"""How many GPUs to use.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
tf.app.flags.DEFINE_boolean('tb_logging', False,
"""Whether to log to Tensorboard.""")
tf.app.flags.DEFINE_integer('num_epochs', 20,
"""Number of epochs to run trainer.""")
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
})
# Convert from a scalar string tensor (whose single string has
# length mnist.IMAGE_PIXELS) to a uint8 tensor with shape
# [mnist.IMAGE_PIXELS].
image = tf.decode_raw(features['image_raw'], tf.uint8)
image.set_shape([IMAGE_PIXELS])
# OPTIONAL: Could reshape into a 28x28 image and apply distortions
# here. Since we are not applying any distortions in this
# example, and the next step expects the image to be flattened
# into a vector, we don't bother.
# Convert from [0, 255] -> [-0.5, 0.5] floats.
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(features['label'], tf.int32)
return image, label
def inputs(train, batch_size, num_epochs):
"""Reads input data num_epochs times.
Args:
train: Selects between the training (True) and validation (False) data.
batch_size: Number of examples per returned batch.
num_epochs: Number of times to read the input data, or 0/None to
train forever.
Returns:
A tuple (images, labels), where:
* images is a float tensor with shape [batch_size, mnist.IMAGE_PIXELS]
in the range [-0.5, 0.5].
* labels is an int32 tensor with shape [batch_size] with the true label,
a number in the range [0, mnist.NUM_CLASSES).
Note that an tf.train.QueueRunner is added to the graph, which
must be run using e.g. tf.train.start_queue_runners().
"""
if not num_epochs: num_epochs = None
filename = os.path.join(FLAGS.data_dir,
TRAIN_FILE if train else VALIDATION_FILE)
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer(
[filename], num_epochs=num_epochs)
# Even when reading in multiple threads, share the filename
# queue.
image, label = read_and_decode(filename_queue)
# Shuffle the examples and collect them into batch_size batches.
# (Internally uses a RandomShuffleQueue.)
# We run this in two threads to avoid being a bottleneck.
images, sparse_labels = tf.train.shuffle_batch(
[image, label], batch_size=batch_size, num_threads=2,
capacity=1000 + 3 * batch_size,
# Ensures a minimum amount of shuffling of examples.
min_after_dequeue=1000)
return images, sparse_labels
def custom_build(inputs, is_training, keep_prob):
x_inputs = tf.reshape(inputs, [-1, 28, 28, 1])
"""A custom build method to wrap into a sonnet Module."""
outputs = snt.Conv2D(output_channels=32, kernel_shape=4, stride=2)(x_inputs)
outputs = snt.BatchNorm()(outputs, is_training=is_training)
outputs = tf.nn.relu(outputs)
outputs = tf.nn.max_pool(outputs, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
outputs = snt.Conv2D(output_channels=64, kernel_shape=4, stride=2)(outputs)
outputs = snt.BatchNorm()(outputs, is_training=is_training)
outputs = tf.nn.relu(outputs)
outputs = tf.nn.max_pool(outputs, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
outputs = snt.Conv2D(output_channels=1024, kernel_shape=1, stride=1)(outputs)
outputs = snt.BatchNorm()(outputs, is_training=is_training)
outputs = tf.nn.relu(outputs)
outputs = snt.BatchFlatten()(outputs)
outputs = tf.nn.dropout(outputs, keep_prob=keep_prob)
outputs = snt.Linear(output_size=10)(outputs)
# _activation_summary(outputs)
return outputs
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
if FLAGS.tb_logging:
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
# labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def average_gradients(tower_grads):
"""Calculate average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been
averaged across all towers.
"""
# for m in xrange(len(tower_grads)):
# for n in xrange(len(tower_grads[m])):
# print(type(tower_grads[0][n][0]))
# for gg in tower_grads:
# for x in gg:
# print(type(x[0]))
# print(tower_grads)
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
if g != None:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def tower_loss(scope):
"""Calculate the total loss on a single tower running the MNIST model.
Args:
scope: unique prefix string identifying the MNIST tower, e.g. 'tower_0'
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# Input images and labels.
images, labels = inputs(train=True, batch_size=FLAGS.batch_size,
num_epochs=(FLAGS.num_epochs / FLAGS.num_gpus))
# Build inference Graph.
# The line below takes custom_build and
# wraps it to construct a sonnet Module.
module_with_build_args = snt.Module(custom_build, name='simple_net')
train_model_outputs = module_with_build_args(images, is_training=True,
keep_prob=tf.constant(0.5))
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
_ = loss(train_model_outputs, labels)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection('losses', scope)
# Calculate the total loss for the current tower.
total_loss = tf.add_n(losses, name='total_loss')
# Attach a scalar summary to all individual losses and the total loss; do
# the same for the averaged version of the losses.
if FLAGS.tb_logging:
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU
# training session. This helps the clarity of presentation on
# tensorboard.
loss_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', l.op.name)
tf.summary.scalar(loss_name, l)
return total_loss
def train():
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals
# the number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /
(FLAGS.batch_size * FLAGS.num_gpus))
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(learning_rate=INITIAL_LEARNING_RATE,
global_step=global_step,
decay_steps=decay_steps,
decay_rate=LEARNING_RATE_DECAY_FACTOR,
staircase=True)
opt = tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True,
use_locking=True)
#opt = tf.train.AdamOptimizer(1e-4)
# opt = tf.train.ProximalAdagradOptimizer(1e-2)
# Calculate the gradients for each model tower.
tower_grads = []
with tf.variable_scope(tf.get_variable_scope()):
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope(
'%s_%d' % (TOWER_NAME, i)) as scope:
# Calculate the loss for one tower of the CIFAR model.
# This function constructs the entire CIFAR model but
# shares the variables across all towers.
loss = tower_loss(scope)
# Reuse variables for the next tower.
# No need for this with Sonnet?
#tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES,
scope)
# Calculate the gradients for the batch of data on this
# MNIST tower.
grads = opt.compute_gradients(loss, gate_gradients=2)
# for x in grads:
# print(type(x[0]))
# print (grads)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# Add histograms for gradients.
if FLAGS.tb_logging:
for grad, var in grads:
if grad is not None:
summaries.append(
tf.summary.histogram(var.op.name + '/gradients', grad))
# Add a summary to track the learning rate.
summaries.append(tf.summary.scalar('learning_rate', lr))
train_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
if FLAGS.tb_logging:
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram(var.op.name, var))
# Create a saver.
saver = tf.train.Saver(tf.global_variables(), sharded=True)
# Build the summary operation from the last tower summaries.
summary_op = tf.summary.merge(summaries)
# Build an initialization operation to run below.
# init = tf.global_variables_initializer()
# The op for initializing the variables.
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
# Start running operations on the Graph. allow_soft_placement must be
# set to True to build towers on GPU, as some of the ops do not have GPU
# implementations.
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement,
gpu_options=gpu_options))
sess.run(init_op)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
try:
step = 0
while not coord.should_stop():
start_time = time.time()
# Run one step of the model. The return values are
# the activations from the `train_op` (which is
# discarded) and the `loss` op. To inspect the values
# of your ops or variables, you may include them in
# the list passed to sess.run() and the value tensors
# will be returned in the tuple from the call.
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(
loss_value), 'Model diverged with loss = NaN'
# Print an overview fairly often.
if step % 100 == 0:
num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
examples_per_sec = num_examples_per_step / duration
sec_per_batch = duration / FLAGS.num_gpus
format_str = (
'%s: step %d, epochs %d, loss = %.3f '
'(%.1f examples/sec; %.3f sec/batch)')
print(format_str % (datetime.now(), step,
# step * gpu / batchsize * gpu = 100
FLAGS.num_epochs,
loss_value,
examples_per_sec, sec_per_batch))
if FLAGS.tb_logging:
if step % 10 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (
step + 1) == FLAGS.num_epochs * FLAGS.batch_size:
checkpoint_path = os.path.join(FLAGS.train_dir,
'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
step += 1
except tf.errors.OutOfRangeError:
print('Done training with %d GPUs, for %d epochs, %d steps.' % (
FLAGS.num_gpus, FLAGS.num_epochs, step))
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def evaluate():
"""Eval MNIST for a number of steps."""
with tf.Graph().as_default():
# Get images and labels for MNIST.
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=False)
images = mnist.test.images
labels = mnist.test.labels
# Build a Graph that computes the logits predictions from the
# inference model.
# The line below takes custom_build and wraps it to construct a sonnet Module.
module_with_build_args = snt.Module(custom_build, name='simple_net')
test_model_outputs = module_with_build_args(images, is_training=False,
keep_prob=tf.constant(1.0))
# Calculate predictions.
top_k_op = tf.nn.in_top_k(predictions=test_model_outputs, targets=labels, k=1)
# Create saver to restore the learned variables for eval.
saver = tf.train.Saver()
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('No checkpoint file found')
return
predictions = np.sum(sess.run([top_k_op]))
# Compute precision.
print('%s: precision = %.3f' % (datetime.now(), predictions))
def main(argv=None): # pylint: disable=unused-argument
start_time = time.time()
train()
duration = time.time() - start_time
print('Total Duration (%.3f sec)' % duration)
evaluate()
if __name__ == '__main__':
tf.app.run()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httpretty
from keystoneclient import exceptions
from keystoneclient.tests.v2_0 import utils
from keystoneclient.v2_0 import tenants
class TenantTests(utils.TestCase):
def setUp(self):
super(TenantTests, self).setUp()
self.TEST_TENANTS = {
"tenants": {
"values": [
{
"enabled": True,
"description": "A description change!",
"name": "invisible_to_admin",
"id": 3,
},
{
"enabled": True,
"description": "None",
"name": "demo",
"id": 2,
},
{
"enabled": True,
"description": "None",
"name": "admin",
"id": 1,
},
{
"extravalue01": "metadata01",
"enabled": True,
"description": "For testing extras",
"name": "test_extras",
"id": 4,
}
],
"links": [],
},
}
@httpretty.activate
def test_create(self):
req_body = {
"tenant": {
"name": "tenantX",
"description": "Like tenant 9, but better.",
"enabled": True,
"extravalue01": "metadata01",
},
}
resp_body = {
"tenant": {
"name": "tenantX",
"enabled": True,
"id": 4,
"description": "Like tenant 9, but better.",
"extravalue01": "metadata01",
}
}
self.stub_url(httpretty.POST, ['tenants'], json=resp_body)
tenant = self.client.tenants.create(
req_body['tenant']['name'],
req_body['tenant']['description'],
req_body['tenant']['enabled'],
extravalue01=req_body['tenant']['extravalue01'],
name="dont overwrite priors")
self.assertIsInstance(tenant, tenants.Tenant)
self.assertEqual(tenant.id, 4)
self.assertEqual(tenant.name, "tenantX")
self.assertEqual(tenant.description, "Like tenant 9, but better.")
self.assertEqual(tenant.extravalue01, "metadata01")
self.assertRequestBodyIs(json=req_body)
@httpretty.activate
def test_duplicate_create(self):
req_body = {
"tenant": {
"name": "tenantX",
"description": "The duplicate tenant.",
"enabled": True
},
}
resp_body = {
"error": {
"message": "Conflict occurred attempting to store project.",
"code": 409,
"title": "Conflict",
}
}
self.stub_url(httpretty.POST, ['tenants'], status=409, json=resp_body)
def create_duplicate_tenant():
self.client.tenants.create(req_body['tenant']['name'],
req_body['tenant']['description'],
req_body['tenant']['enabled'])
self.assertRaises(exceptions.Conflict, create_duplicate_tenant)
@httpretty.activate
def test_delete(self):
self.stub_url(httpretty.DELETE, ['tenants', '1'], status=204)
self.client.tenants.delete(1)
@httpretty.activate
def test_get(self):
resp = {'tenant': self.TEST_TENANTS['tenants']['values'][2]}
self.stub_url(httpretty.GET, ['tenants', '1'], json=resp)
t = self.client.tenants.get(1)
self.assertIsInstance(t, tenants.Tenant)
self.assertEqual(t.id, 1)
self.assertEqual(t.name, 'admin')
@httpretty.activate
def test_list(self):
self.stub_url(httpretty.GET, ['tenants'], json=self.TEST_TENANTS)
tenant_list = self.client.tenants.list()
[self.assertIsInstance(t, tenants.Tenant) for t in tenant_list]
@httpretty.activate
def test_list_limit(self):
self.stub_url(httpretty.GET, ['tenants'], json=self.TEST_TENANTS)
tenant_list = self.client.tenants.list(limit=1)
self.assertQueryStringIs('limit=1')
[self.assertIsInstance(t, tenants.Tenant) for t in tenant_list]
@httpretty.activate
def test_list_marker(self):
self.stub_url(httpretty.GET, ['tenants'], json=self.TEST_TENANTS)
tenant_list = self.client.tenants.list(marker=1)
self.assertQueryStringIs('marker=1')
[self.assertIsInstance(t, tenants.Tenant) for t in tenant_list]
@httpretty.activate
def test_list_limit_marker(self):
self.stub_url(httpretty.GET, ['tenants'], json=self.TEST_TENANTS)
tenant_list = self.client.tenants.list(limit=1, marker=1)
self.assertQueryStringIs('marker=1&limit=1')
[self.assertIsInstance(t, tenants.Tenant) for t in tenant_list]
@httpretty.activate
def test_update(self):
req_body = {
"tenant": {
"id": 4,
"name": "tenantX",
"description": "I changed you!",
"enabled": False,
"extravalue01": "metadataChanged",
#"extraname": "dontoverwrite!",
},
}
resp_body = {
"tenant": {
"name": "tenantX",
"enabled": False,
"id": 4,
"description": "I changed you!",
"extravalue01": "metadataChanged",
},
}
self.stub_url(httpretty.POST, ['tenants', '4'], json=resp_body)
tenant = self.client.tenants.update(
req_body['tenant']['id'],
req_body['tenant']['name'],
req_body['tenant']['description'],
req_body['tenant']['enabled'],
extravalue01=req_body['tenant']['extravalue01'],
name="dont overwrite priors")
self.assertIsInstance(tenant, tenants.Tenant)
self.assertRequestBodyIs(json=req_body)
self.assertEqual(tenant.id, 4)
self.assertEqual(tenant.name, "tenantX")
self.assertEqual(tenant.description, "I changed you!")
self.assertFalse(tenant.enabled)
self.assertEqual(tenant.extravalue01, "metadataChanged")
@httpretty.activate
def test_update_empty_description(self):
req_body = {
"tenant": {
"id": 4,
"name": "tenantX",
"description": "",
"enabled": False,
},
}
resp_body = {
"tenant": {
"name": "tenantX",
"enabled": False,
"id": 4,
"description": "",
},
}
self.stub_url(httpretty.POST, ['tenants', '4'], json=resp_body)
tenant = self.client.tenants.update(req_body['tenant']['id'],
req_body['tenant']['name'],
req_body['tenant']['description'],
req_body['tenant']['enabled'])
self.assertIsInstance(tenant, tenants.Tenant)
self.assertRequestBodyIs(json=req_body)
self.assertEqual(tenant.id, 4)
self.assertEqual(tenant.name, "tenantX")
self.assertEqual(tenant.description, "")
self.assertFalse(tenant.enabled)
@httpretty.activate
def test_add_user(self):
self.stub_url(httpretty.PUT, ['tenants', '4', 'users', 'foo', 'roles',
'OS-KSADM', 'barrr'], status=204)
self.client.tenants.add_user('4', 'foo', 'barrr')
@httpretty.activate
def test_remove_user(self):
self.stub_url(httpretty.DELETE, ['tenants', '4', 'users', 'foo',
'roles', 'OS-KSADM', 'barrr'],
status=204)
self.client.tenants.remove_user('4', 'foo', 'barrr')
@httpretty.activate
def test_tenant_add_user(self):
self.stub_url(httpretty.PUT, ['tenants', '4', 'users', 'foo', 'roles',
'OS-KSADM', 'barrr'],
status=204)
req_body = {
"tenant": {
"id": 4,
"name": "tenantX",
"description": "I changed you!",
"enabled": False,
},
}
# make tenant object with manager
tenant = self.client.tenants.resource_class(self.client.tenants,
req_body['tenant'])
tenant.add_user('foo', 'barrr')
self.assertIsInstance(tenant, tenants.Tenant)
@httpretty.activate
def test_tenant_remove_user(self):
self.stub_url(httpretty.DELETE, ['tenants', '4', 'users', 'foo',
'roles', 'OS-KSADM', 'barrr'],
status=204)
req_body = {
"tenant": {
"id": 4,
"name": "tenantX",
"description": "I changed you!",
"enabled": False,
},
}
# make tenant object with manager
tenant = self.client.tenants.resource_class(self.client.tenants,
req_body['tenant'])
tenant.remove_user('foo', 'barrr')
self.assertIsInstance(tenant, tenants.Tenant)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.