input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
(self.oldest is None) or (mjd1 < self._entries[self.oldest]['mjd1'])
entry = (mjd1, mjd2, value, error) + aux_data
if self.full:
assert self.oldest is not None
if is_oldest:
# Ignore this since it is older than all existing entries.
return
self.last = self.oldest
self._entries[self.last] = entry
# Update the index of the oldest entry, which might be us.
self.oldest = np.argmin(self.entries['mjd1'])
else:
self.last = self.len
if is_oldest:
# This is now the oldest entry.
self.oldest = self.last
self.len += 1
self.full = (self.len == self._entries.size)
self._entries[self.last] = entry
def set_last(self, **kwargs):
"""Set values of the most recently added measurement.
"""
if self.last is not None:
for name, value in kwargs.items():
if name in self.names:
self._entries[self.last][name] = value
def inside(self, mjd1, mjd2):
"""Return a mask for entries whose intervals overlap [mjd1, mjd2].
Use mjd2=None to select all entries after mjd1.
"""
mask = self.entries['mjd2'] > mjd1
if mjd2 is not None:
mask &= self.entries['mjd1'] < mjd2
return mask
def average(self, mjd, interval_secs, min_values, field='value'):
"""Return the average of values recorded up to inteval_secs before mjd,
or None if less than min_values have been recorded.
"""
sel = self.inside(mjd - interval_secs / self.SECS_PER_DAY, mjd)
nsel = np.count_nonzero(sel)
return np.mean(self.entries[sel][field]) if nsel >= min_values else None
def sample_grid(self, mjd_grid, field='value'):
"""Sample measurements on a the specified MJD grid.
Use measurements that lie outside the grid up to self.padding seconds.
Return default_value when no measurements are available.
Use constant extrapolation of the first/last measurement if necessary.
"""
mjd1, mjd2 = mjd_grid[0], mjd_grid[-1]
# Select measurements that span the padded input grid.
sel = self.inside(mjd1 - self.padding, mjd2 + self.padding)
if not np.any(sel):
return np.full_like(mjd_grid, self.default_value)
mjd_sel = 0.5 * (self.entries[sel]['mjd1'] + self.entries[sel]['mjd2'])
dmjd_sel = self.entries[sel]['mjd2'] - self.entries[sel]['mjd1']
value_sel = self.entries[sel][field]
# Values might not be recorded in time order so fix that now.
iorder = np.argsort(mjd_sel)
mjd_sel = mjd_sel[iorder]
dmjd_sel = dmjd_sel[iorder]
value_sel = value_sel[iorder]
# The measurements are integrals over each exposure with some deadtime between them.
# Correct for this deadtime by calculating a piece-wise linear approximation to
# the instantaneous value that matches the measured integrals.
try:
value_sel_corrected = pwlinear_solve(mjd_sel, dmjd_sel, value_sel * dmjd_sel)
except Exception as e:
print(f'pwlinear_solve failed: {e}')
value_sel_corrected = value_sel
# Use linear interpolation with constant extrapolation beyond the endpoints.
return np.interp(mjd_grid, mjd_sel, value_sel_corrected)
def trend(self, mjd):
"""Return the linear trend in values over (mjd - recent, mjd).
For now, this returns a weighted average with zero slope.
"""
sel = self.inside(mjd - self.recent, mjd)
if not np.any(sel):
return self.default_value, 0
wgt = self.entries[sel]['error'] ** -0.5
val = self.entries[sel]['value']
return np.sum(wgt * val) / np.sum(wgt), 0
def forecast_grid(self, mjd_grid):
"""Forecast our trend on the specified MJD grid.
"""
mjd1, mjd2 = mjd_grid[0], mjd_grid[-1]
# Calculate the trend at mjd1.
offset, slope = self.trend(mjd1)
# Evaluate the linear trend on our grid.
return offset + slope * (mjd_grid - mjd1)
def save(self, mjd1, mjd2=None):
"""Return a json suitable serialization of our entries spanning (mjd1, mjd2).
Use mjd2=None to use all entries after mjd1.
If this buffer has auxiliary data, that will saved also.
Note that we return numpy data types, which are not JSON serializable by
default, so this assumes that the caller uses :class:`NumpyEncoder` or
something equivalent.
"""
sel = self.inside(mjd1, mjd2)
E = self.entries[sel]
if len(E) == 0:
return {}
# Sort based on mjd1.
isort = np.argsort(E['mjd1'])
E = E[isort]
# Convert to a dictionary of fields, excluding mjd1,2.
D = {name: E[name] for name in E.dtype.fields if name not in ('mjd1','mjd2')}
# Lookup the earliest MJD.
mjd0 = E['mjd1'][0]
D['mjd0'] = mjd0
# Replace mjd1,2 with offsets dt1,2 from mjd0 in seconds.
# Use float32 so that JSON output will be rounded.
D['dt1'] = np.float32((E['mjd1'] - mjd0) * self.SECS_PER_DAY)
D['dt2'] = np.float32((E['mjd2'] - mjd0) * self.SECS_PER_DAY)
return D
def mjd_to_date(mjd, utc_offset):
"""Convert an MJD value to a datetime using the specified UTC offset in hours.
Use utc_offset of -7 for local time at Kitt Peak.
Use :func:`date_to_mjd` to invert this calculation.
"""
return datetime.datetime(2019, 1, 1) + datetime.timedelta(days=mjd - 58484.0, hours=utc_offset)
def date_to_mjd(date, utc_offset):
"""Convert a datetime using the specified UTC offset in hours to an MJD value.
Use utc_offset of -7 for local time at Kitt Peak.
Use :func:`mjd_to_date` to invert this calculation.
"""
delta = date - datetime.datetime(2019, 1, 1) - datetime.timedelta(hours=utc_offset)
return 58484 + delta.days + (delta.seconds + 1e-6 * delta.microseconds) / 86400
def mjd_to_night(mjd):
"""Convert MJD to NIGHT for KPNO in the format YYYYMMDD.
Uses the convention that the night rollover occurs at local (UTC-7) noon.
"""
date = mjd_to_date(mjd, utc_offset=-7)
if date.hour < 12:
date -= datetime.timedelta(days=1)
return int(date.strftime('%Y%m%d'))
def night_to_midnight(night, utc_offset):
"""Convert YYYYMMDD into a datetime representing midnight. Use utc_offset=0 for a
result with midnight.hour==0 or utc_offset=-7 for the KPNO local time.
"""
night = str(night)
if len(night) != 8:
raise ValueError('night_to_midnight: expected an integer of the form YYYYMMDD.')
year, month, day = int(night[0:4]), int(night[4:6]), int(night[6:8])
return datetime.datetime(year, month, day, 12) + datetime.timedelta(hours=12 + utc_offset)
class NumpyEncoder(json.JSONEncoder):
"""JSON encoder to use with numpy data with rounding of float32 values.
"""
FLOAT32_DECIMALS = 6
def default(self, obj):
if isinstance(obj, np.float32):
# Convert to 64-bit float before rounding.
return float(np.round(np.float64(obj), self.FLOAT32_DECIMALS))
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.ndarray):
if obj.dtype.fields is not None:
# convert a recarray to a dictionary.
new_obj = {}
for (name, (dtype, size)) in obj.dtype.fields.items():
if dtype.base == np.float32:
new_obj[name] = np.round(obj[name], self.FLOAT32_DECIMALS)
else:
new_obj[name] = obj[name]
return new_obj
else:
if obj.dtype == np.float32:
# tolist converts to 64-bit native float so apply rounding first.
obj = np.round(obj.astype(np.float64), self.FLOAT32_DECIMALS)
return obj.tolist()
else:
return super().default(obj)
def is_datetime(time, oldest=datetime.datetime(2019, 1, 1)):
"""Test for a valid datetime after oldest.
"""
try:
delta = (time - oldest).days
return delta > 0
except Exception as e:
return False
def load_guider_centroids(path, expid):
"""Attempt to read the centroids json file produced by the guider.
Extracts numbers from the json file into numpy arrays. Note that
the json file uses "x" for rows and "y" for columns, which we map
to indices 0 and 1, respectively.
Returns
-------
tuple
Tuple (expected, combined, centroid) where expected gives the
expected position of each star with shape (nstars, 2), combined
gives the combined guider move after each frame with shape (2, nframes),
and centroid gives the centroid of each star for each frame with
shape (nstars, 2, nframes). If a star is not measured in a frame,
the centroid values are np.nan.
"""
cameras = ('GUIDE0', 'GUIDE2', 'GUIDE3', 'GUIDE5', 'GUIDE7', 'GUIDE8')
# Read the json file of guider outputs.
jsonpath = path / 'centroids-{0}.json'.format(expid)
if not jsonpath.exists():
raise ValueError('Non-existent path: {0}.'.format(jsonpath))
with open(jsonpath) as f:
D = json.load(f)
assert D['expid'] == int(expid)
nframes = D['summary']['frames']
# Use the first frame to lookup the guide stars for each camera.
frame0 = D['frames']['1']
stars = {G: len([K for K in frame0.keys() if K.startswith(G)]) for G in cameras}
expected = {G: np.zeros((stars[G], 2)) for G in cameras}
combined = {G: np.zeros((2, nframes)) for G in cameras}
centroid = {G: np.zeros((stars[G], 2, nframes)) for G in cameras}
for camera in cameras:
# Get the expected position for each guide star.
for istar in range(stars[camera]):
S = frame0.get(camera + f'_{istar}')
expected[camera][istar, 0] = S['y_expected']
expected[camera][istar, 1] = S['x_expected']
# Get the combined centroid sent to the telescope for each frame.
for iframe in range(nframes):
F = D['frames'].get(str(iframe + 1))
if F is None:
logging.warning('Missing frame {0}/{1} in {2}'.format(iframe + 1, nframes, jsonpath))
continue
combined[camera][0, iframe] = F['combined_y']
combined[camera][1, iframe] = F['combined_x']
# Get the measured centroids for each guide star in this frame.
for istar in range(stars[camera]):
S = F.get(camera + '_{0}'.format(istar))
centroid[camera][istar, 0, iframe] = S.get('y_centroid', np.nan)
centroid[camera][istar, 1, iframe] = S.get('x_centroid', np.nan)
return expected, combined, centroid
def git_describe():
"""Return a string describing the git origin of the package where this function is defined.
The result is | |
libX11.XBlackPixelOfScreen
XBlackPixelOfScreen.restype = c_ulong
XBlackPixelOfScreen.argtypes = [POINTER(Screen)]
#~ extern unsigned long XWhitePixelOfScreen(
#~ Screen* /* screen */
#~ );
XWhitePixelOfScreen = libX11.XWhitePixelOfScreen
XWhitePixelOfScreen.restype = c_ulong
XWhitePixelOfScreen.argtypes = [POINTER(Screen)]
#~ extern unsigned long XNextRequest(
#~ Display* /* display */
#~ );
XNextRequest = libX11.XNextRequest
XNextRequest.restype = c_ulong
XNextRequest.argtypes = [POINTER(Display)]
#~ extern unsigned long XLastKnownRequestProcessed(
#~ Display* /* display */
#~ );
XLastKnownRequestProcessed = libX11.XLastKnownRequestProcessed
XLastKnownRequestProcessed.restype = c_ulong
XLastKnownRequestProcessed.argtypes = [POINTER(Display)]
#~ extern char *XServerVendor(
#~ Display* /* display */
#~ );
XServerVendor = libX11.XServerVendor
XServerVendor.restype = c_char_p
XServerVendor.argtypes = [POINTER(Display)]
#~ extern char *XDisplayString(
#~ Display* /* display */
#~ );
XDisplayString = libX11.XDisplayString
XDisplayString.restype = c_char_p
XDisplayString.argtypes = [POINTER(Display)]
#~ extern Colormap XDefaultColormap(
#~ Display* /* display */,
#~ int /* screen_number */
#~ );
XDefaultColormap = libX11.XDefaultColormap
XDefaultColormap.restype = Colormap
XDefaultColormap.argtypes = [POINTER(Display), c_int]
#~ extern Colormap XDefaultColormapOfScreen(
#~ Screen* /* screen */
#~ );
XDefaultColormapOfScreen = libX11.XDefaultColormapOfScreen
XDefaultColormapOfScreen.restype = Colormap
XDefaultColormapOfScreen.argtypes = [POINTER(Screen)]
#~ extern Display *XDisplayOfScreen(
#~ Screen* /* screen */
#~ );
XDisplayOfScreen = libX11.XDisplayOfScreen
XDisplayOfScreen.restype = POINTER(Display)
XDisplayOfScreen.argtypes = [POINTER(Screen)]
#~ extern Screen *XScreenOfDisplay(
#~ Display* /* display */,
#~ int /* screen_number */
#~ );
XScreenOfDisplay = libX11.XScreenOfDisplay
XScreenOfDisplay.restype = POINTER(Screen)
XScreenOfDisplay.argtypes = [POINTER(Display), c_int]
#~ extern Screen *XDefaultScreenOfDisplay(
#~ Display* /* display */
#~ );
XDefaultScreenOfDisplay = libX11.XDefaultScreenOfDisplay
XDefaultScreenOfDisplay.restype = POINTER(Screen)
XDefaultScreenOfDisplay.argtypes = [POINTER(Display)]
#~ extern long XEventMaskOfScreen(
#~ Screen* /* screen */
#~ );
XEventMaskOfScreen = libX11.XEventMaskOfScreen
XEventMaskOfScreen.restype = c_long
XEventMaskOfScreen.argtypes = [POINTER(Screen)]
#~ extern int XScreenNumberOfScreen(
#~ Screen* /* screen */
#~ );
XScreenNumberOfScreen = libX11.XScreenNumberOfScreen
XScreenNumberOfScreen.restype = c_int
XScreenNumberOfScreen.argtypes = [POINTER(Screen)]
#~ typedef int (*XErrorHandler) ( /* WARNING, this type not in Xlib spec */
#~ Display* /* display */,
#~ XErrorEvent* /* error_event */
#~ );
XErrorHandler = c_void_p
#~ extern XErrorHandler XSetErrorHandler (
#~ XErrorHandler /* handler */
#~ );
XSetErrorHandler = libX11.XSetErrorHandler
XSetErrorHandler.restype = XErrorHandler
XSetErrorHandler.argtypes = [XErrorHandler]
#~ typedef int (*XIOErrorHandler) ( /* WARNING, this type not in Xlib spec */
#~ Display* /* display */
#~ );
XIOErrorHandler = c_void_p
#~ extern XIOErrorHandler XSetIOErrorHandler (
#~ XIOErrorHandler /* handler */
#~ );
XSetIOErrorHandler = libX11.XSetIOErrorHandler
XSetIOErrorHandler.restype = XIOErrorHandler
XSetIOErrorHandler.argtypes = [XIOErrorHandler]
#~ extern XPixmapFormatValues *XListPixmapFormats(
#~ Display* /* display */,
#~ int* /* count_return */
#~ );
XListPixmapFormats = libX11.XListPixmapFormats
XListPixmapFormats.restype = POINTER(XPixmapFormatValues)
XListPixmapFormats.argtypes = [POINTER(Display), POINTER(c_int)]
#~ extern int *XListDepths(
#~ Display* /* display */,
#~ int /* screen_number */,
#~ int* /* count_return */
#~ );
XListDepths = libX11.XListDepths
XListDepths.restype = POINTER(c_int)
XListDepths.argtypes = [POINTER(Display), c_int, POINTER(c_int)]
#~ #/* ICCCM routines for things that don't require special include files; */
#~ #/* other declarations are given in Xutil.h */
#~ extern Status XReconfigureWMWindow(
#~ Display* /* display */,
#~ Window /* w */,
#~ int /* screen_number */,
#~ unsigned int /* mask */,
#~ XWindowChanges* /* changes */
#~ );
XReconfigureWMWindow = libX11.XReconfigureWMWindow
XReconfigureWMWindow.restype = Status
XReconfigureWMWindow.argtypes = [POINTER(Display), Window, c_int, c_uint, POINTER(XWindowChanges)]
#~ extern Status XGetWMProtocols(
#~ Display* /* display */,
#~ Window /* w */,
#~ Atom** /* protocols_return */,
#~ int* /* count_return */
#~ );
XGetWMProtocols = libX11.XGetWMProtocols
XGetWMProtocols.restype = Status
XGetWMProtocols.argtypes = [POINTER(Display), Window, POINTER(POINTER(Atom)), POINTER(c_int)]
#~ extern Status XSetWMProtocols(
#~ Display* /* display */,
#~ Window /* w */,
#~ Atom* /* protocols */,
#~ int /* count */
#~ );
XSetWMProtocols = libX11.XSetWMProtocols
XSetWMProtocols.restype = Status
XSetWMProtocols.argtypes = [POINTER(Display), Window, POINTER(Atom), c_int]
#~ extern Status XIconifyWindow(
#~ Display* /* display */,
#~ Window /* w */,
#~ int /* screen_number */
#~ );
XIconifyWindow = libX11.XIconifyWindow
XIconifyWindow.restype = Status
XIconifyWindow.argtypes = [POINTER(Display), Window, c_int]
#~ extern Status XWithdrawWindow(
#~ Display* /* display */,
#~ Window /* w */,
#~ int /* screen_number */
#~ );
XWithdrawWindow = libX11.XWithdrawWindow
XWithdrawWindow.restype = Status
XWithdrawWindow.argtypes = [POINTER(Display), Window, c_int]
#~ extern Status XGetCommand(
#~ Display* /* display */,
#~ Window /* w */,
#~ char*** /* argv_return */,
#~ int* /* argc_return */
#~ );
XGetCommand = libX11.XGetCommand
XGetCommand.restype = Status
XGetCommand.argtypes = [POINTER(Display), Window, POINTER(POINTER(c_char_p)), POINTER(c_int)]
#~ extern Status XGetWMColormapWindows(
#~ Display* /* display */,
#~ Window /* w */,
#~ Window** /* windows_return */,
#~ int* /* count_return */
#~ );
XGetWMColormapWindows = libX11.XGetWMColormapWindows
XGetWMColormapWindows.restype = Status
XGetWMColormapWindows.argtypes = [POINTER(Display), Window, POINTER(POINTER(Window)), POINTER(c_int)]
#~ extern Status XSetWMColormapWindows(
#~ Display* /* display */,
#~ Window /* w */,
#~ Window* /* colormap_windows */,
#~ int /* count */
#~ );
XSetWMColormapWindows = libX11.XSetWMColormapWindows
XSetWMColormapWindows.restype = Status
XSetWMColormapWindows.argtypes = [POINTER(Display), Window, POINTER(Window), c_int]
#~ extern void XFreeStringList(
#~ char** /* list */
#~ );
XFreeStringList = libX11.XFreeStringList
XFreeStringList.argtypes = [POINTER(c_char_p)]
#~ extern int XSetTransientForHint(
#~ Display* /* display */,
#~ Window /* w */,
#~ Window /* prop_window */
#~ );
XSetTransientForHint = libX11.XSetTransientForHint
XSetTransientForHint.restype = c_int
XSetTransientForHint.argtypes = [POINTER(Display), Window, Window]
#~ /* The following are given in alphabetical order */
#~ extern int XActivateScreenSaver(
#~ Display* /* display */
#~ );
XActivateScreenSaver = libX11.XActivateScreenSaver
XActivateScreenSaver.restype = c_int
XActivateScreenSaver.argtypes = [POINTER(Display)]
#~ extern int XAddHost(
#~ Display* /* display */,
#~ XHostAddress* /* host */
#~ );
XAddHost = libX11.XAddHost
XAddHost.restype = c_int
XAddHost.argtypes = [POINTER(Display), POINTER(XHostAddress)]
#~ extern int XAddHosts(
#~ Display* /* display */,
#~ XHostAddress* /* hosts */,
#~ int /* num_hosts */
#~ );
XAddHosts = libX11.XAddHosts
XAddHosts.restype = c_int
XAddHosts.argtypes = [POINTER(Display), POINTER(XHostAddress), c_int]
#~ extern int XAddToExtensionList(
#~ struct _XExtData** /* structure */,
#~ XExtData* /* ext_data */
#~ );
XAddToExtensionList = libX11.XAddToExtensionList
XAddToExtensionList.restype = c_int
XAddToExtensionList.argtypes = [POINTER(POINTER(_XExtData)), POINTER(XExtData)]
#~ extern int XAddToSaveSet(
#~ Display* /* display */,
#~ Window /* w */
#~ );
XAddToSaveSet = libX11.XAddToSaveSet
XAddToSaveSet.restype = c_int
XAddToSaveSet.argtypes = [POINTER(Display), Window]
#~ extern Status XAllocColor(
#~ Display* /* display */,
#~ Colormap /* colormap */,
#~ XColor* /* screen_in_out */
#~ );
XAllocColor = libX11.XAllocColor
XAllocColor.restype = Status
XAllocColor.argtypes = [POINTER(Display), Colormap,POINTER(XColor)]
#~ extern Status XAllocColorCells(
#~ Display* /* display */,
#~ Colormap /* colormap */,
#~ Bool /* contig */,
#~ unsigned long* /* plane_masks_return */,
#~ unsigned int /* nplanes */,
#~ unsigned long* /* pixels_return */,
#~ unsigned int /* npixels */
#~ );
XAllocColorCells = libX11.XAllocColorCells
XAllocColorCells.restype = Status
XAllocColorCells.argtypes = [POINTER(Display), Colormap, Bool, POINTER(c_ulong), c_int, POINTER(c_ulong), c_int]
#~ extern Status XAllocColorPlanes(
#~ Display* /* display */,
#~ Colormap /* colormap */,
#~ Bool /* contig */,
#~ unsigned long* /* pixels_return */,
#~ int /* ncolors */,
#~ int /* nreds */,
#~ int /* ngreens */,
#~ int /* nblues */,
#~ unsigned long* /* rmask_return */,
#~ unsigned long* /* gmask_return */,
#~ unsigned long* /* bmask_return */
#~ );
XAllocColorPlanes = libX11.XAllocColorPlanes
XAllocColorPlanes.restype = Status
XAllocColorPlanes.argtypes = [POINTER(Display), Colormap, Bool, POINTER(c_ulong), c_int, c_int, c_int, c_int, POINTER(c_ulong), POINTER(c_ulong), POINTER(c_ulong)]
#~ extern Status XAllocNamedColor(
#~ Display* /* display */,
#~ Colormap /* colormap */,
#~ _Xconst char* /* color_name */,
#~ XColor* /* screen_def_return */,
#~ XColor* /* exact_def_return */
#~ );
XAllocNamedColor = libX11.XAllocNamedColor
XAllocNamedColor.restype = Status
XAllocNamedColor.argtypes = [POINTER(Display), Colormap, c_char_p, POINTER(XColor), POINTER(XColor)]
#~ extern int XAllowEvents(
#~ Display* /* display */,
#~ int /* event_mode */,
#~ Time /* time */
#~ );
XAllowEvents = libX11.XAllowEvents
XAllowEvents.restype = c_int
XAllowEvents.argtypes = [POINTER(Display), c_int, Time]
#~ extern int XAutoRepeatOff(
#~ Display* /* display */
#~ );
XAutoRepeatOff = libX11.XAutoRepeatOff
XAutoRepeatOff.restype = c_int
XAutoRepeatOff.argtypes = [POINTER(Display)]
#~ extern int XAutoRepeatOn(
#~ Display* /* display */
#~ );
XAutoRepeatOn = libX11.XAutoRepeatOn
XAutoRepeatOn.restype = c_int
XAutoRepeatOn.argtypes = [POINTER(Display)]
#~ extern int XBell(
#~ Display* /* display */,
#~ int /* percent */
#~ );
XBell = libX11.XBell
XBell.restype = c_int
XBell.argtypes = [POINTER(Display), c_int]
#~ extern int XBitmapBitOrder(
#~ Display* /* display */
#~ );
XBitmapBitOrder = libX11.XBitmapBitOrder
XBitmapBitOrder.restype = c_int
XBitmapBitOrder.argtypes = [POINTER(Display)]
#~ extern int XBitmapPad(
#~ Display* /* display */
#~ );
XBitmapPad = libX11.XBitmapPad
XBitmapPad.restype = c_int
XBitmapPad.argtypes = [POINTER(Display)]
#~ extern int XBitmapUnit(
#~ Display* /* display */
#~ );
XBitmapUnit = libX11.XBitmapUnit
XBitmapUnit.restype = c_int
XBitmapUnit.argtypes = [POINTER(Display)]
#~ extern int XCellsOfScreen(
#~ Screen* /* screen */
#~ );
XCellsOfScreen = libX11.XCellsOfScreen
XCellsOfScreen.restype = c_int
XCellsOfScreen.argtypes = [POINTER(Screen)]
#~ extern int XChangeActivePointerGrab(
#~ Display* /* display */,
#~ unsigned int /* event_mask */,
#~ Cursor /* cursor */,
#~ Time /* time */
#~ );
XChangeActivePointerGrab = libX11.XChangeActivePointerGrab
XChangeActivePointerGrab.restype = c_int
XChangeActivePointerGrab.argtypes = [POINTER(Display), c_uint, Cursor, Time]
#~ extern int XChangeGC(
#~ Display* /* display */,
#~ GC /* gc */,
#~ unsigned long /* valuemask */,
#~ XGCValues* /* values */
#~ );
XChangeGC = libX11.XChangeGC
XChangeGC.restype = c_int
XChangeGC.argtypes = [POINTER(Display), GC, c_ulong, POINTER(XGCValues)]
#~ extern int XChangeKeyboardControl(
#~ Display* /* display */,
#~ unsigned long /* value_mask */,
#~ XKeyboardControl* /* values */
#~ );
XChangeKeyboardControl = libX11.XChangeKeyboardControl
XChangeKeyboardControl.restype = c_int
XChangeKeyboardControl.argtypes = [POINTER(Display), c_ulong, POINTER(XKeyboardControl)]
#~ extern int XChangeKeyboardMapping(
#~ Display* /* display */,
#~ int /* first_keycode */,
#~ int /* keysyms_per_keycode */,
#~ KeySym* /* keysyms */,
#~ int /* num_codes */
#~ );
XChangeKeyboardMapping = libX11.XChangeKeyboardMapping
XChangeKeyboardMapping.restype = c_int
XChangeKeyboardMapping.argtypes = [POINTER(Display), c_int, c_int, POINTER(KeySym), c_int]
#~ extern int XChangePointerControl(
#~ Display* /* display */,
#~ Bool /* do_accel */,
#~ Bool /* do_threshold */,
#~ int /* accel_numerator */,
#~ int /* accel_denominator */,
#~ int /* threshold */
#~ );
XChangePointerControl = libX11.XChangePointerControl
XChangePointerControl.restype = c_int
XChangePointerControl.argtypes = [POINTER(Display), Bool, Bool, c_int, c_int, c_int]
#~ extern | |
<reponame>mb0rt/OpenCue<gh_stars>0
# Copyright Contributors to the OpenCue Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tree widget to display a list of monitored jobs."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from builtins import map
import time
from PySide2 import QtCore
from PySide2 import QtGui
from PySide2 import QtWidgets
import opencue
import cuegui.AbstractTreeWidget
import cuegui.AbstractWidgetItem
import cuegui.Constants
import cuegui.ItemDelegate
import cuegui.Logger
import cuegui.MenuActions
import cuegui.Style
import cuegui.Utils
logger = cuegui.Logger.getLogger(__file__)
COLUMN_NAME = 0
COLUMN_COMMENT = 1
COLUMN_AUTOEAT = 2
COLUMN_STATE = 3
def displayState(job):
"""Returns the string to display in the status for the given job
@type job: job
@param job: The job to check the status of
@rtype: string
@return: The status of the job for display"""
if job.data.state == opencue.api.job_pb2.FINISHED:
return "Finished"
if job.data.is_paused:
return "Paused"
if job.data.job_stats.dead_frames > 0:
return "Failing"
if (job.data.job_stats.depend_frames and
job.data.job_stats.depend_frames == job.data.job_stats.pending_frames and
job.data.job_stats.running_frames == 0):
return "Dependency"
return "In Progress"
class JobMonitorTree(cuegui.AbstractTreeWidget.AbstractTreeWidget):
"""Tree widget to display a list of monitored jobs."""
__loadMine = True
view_object = QtCore.Signal(object)
def __init__(self, parent):
self.ticksWithoutUpdate = 0
self.startColumnsForType(cuegui.Constants.TYPE_JOB)
self.addColumn("Job", 470, id=1,
data=lambda job: job.data.name,
tip="The name of the job: show-shot-user_uniqueName")
self.addColumn("_Comment", 20, id=2,
sort=lambda job: job.data.has_comment,
tip="A comment icon will appear if a job has a comment. You\n"
"may click on it to view the comments.")
self.addColumn("_Autoeat", 20, id=3,
sort=lambda job: job.data.auto_eat,
tip="If the job has auto eating enabled, a pac-man icon\n"
"will appear here and all frames that become dead will\n"
"automatically be eaten.")
# pylint: disable=unnecessary-lambda
self.addColumn("State", 80, id=4,
data=lambda job: displayState(job),
tip="The state of each job.\n"
"In Progress \t The job is on the queue\n"
"Failing \t The job has dead frames\n"
"Paused \t The job has been paused\n"
"Finished \t The job has finished and is no longer in the queue")
self.addColumn("Done/Total", 90, id=5,
data=lambda job: "%d of %d" % (job.data.job_stats.succeeded_frames,
job.data.job_stats.total_frames),
sort=lambda job: job.data.job_stats.succeeded_frames,
tip="The number of succeeded frames vs the total number\n"
"of frames in each job.")
self.addColumn("Running", 60, id=6,
data=lambda job: job.data.job_stats.running_frames,
sort=lambda job: job.data.job_stats.running_frames,
tip="The number of running frames in each job,")
self.addColumn("Dead", 50, id=7,
data=lambda job: job.data.job_stats.dead_frames,
sort=lambda job: job.data.job_stats.dead_frames,
tip="Total number of dead frames in each job.")
self.addColumn("Eaten", 50, id=8,
data=lambda job: job.data.job_stats.eaten_frames,
sort=lambda job: job.data.job_stats.eaten_frames,
tip="Total number of eaten frames in each job.")
self.addColumn("Wait", 60, id=9,
data=lambda job: job.data.job_stats.waiting_frames,
sort=lambda job: job.data.job_stats.waiting_frames,
tip="The number of waiting frames in each job,")
self.addColumn("MaxRss", 55, id=10,
data=lambda job: cuegui.Utils.memoryToString(job.data.job_stats.max_rss),
sort=lambda job: job.data.job_stats.max_rss,
tip="The maximum memory used any single frame in each job.")
self.addColumn("Age", 50, id=11,
data=lambda job: (cuegui.Utils.secondsToHHHMM((job.data.stop_time or
time.time()) - job.data.start_time)),
sort=lambda job: ((job.data.stop_time or time.time()) - job.data.start_time),
tip="The HOURS:MINUTES that the job has spent in the queue.")
self.addColumn("Launched", 100, id=12,
data=lambda job: cuegui.Utils.dateToMMDDHHMM(job.data.start_time),
sort=lambda job: job.data.start_time,
tip="The time when the job was launched.")
self.addColumn("Finished", 100, id=13,
data=lambda job: (job.data.stop_time > 0
and cuegui.Utils.dateToMMDDHHMM(job.data.stop_time)
or ""),
sort=lambda job: job.data.stop_time,
tip="The time when the job ended.")
self.addColumn("Progress", 0, id=14,
delegate=cuegui.ItemDelegate.JobProgressBarDelegate,
tip="A visual overview of the progress of each job.\n"
"Green \t is succeeded\n"
"Yellow \t is running\n"
"Red \t is dead\n"
"Purple \t is waiting on a dependency\n"
"Light Blue \t is waiting to be booked")
cuegui.AbstractTreeWidget.AbstractTreeWidget.__init__(self, parent)
self.__jobTimeLoaded = {}
self.__userColors = {}
# Used to build right click context menus
self.__menuActions = cuegui.MenuActions.MenuActions(
self, self.updateSoon, self.selectedObjects)
self.setAcceptDrops(True)
self.setDropIndicatorShown(True)
self.setDragEnabled(True)
self.setDragDropMode(QtWidgets.QAbstractItemView.DragDrop)
self.itemClicked.connect(self.__itemSingleClickedCopy)
self.itemClicked.connect(self.__itemSingleClickedComment)
self.__load = {}
self.startTicksUpdate(20, False, 60)
def tick(self):
if self.__load:
__jobs = self.__load.copy()
self.__load.clear()
self._processUpdate(None, __jobs)
if self.tickNeedsUpdate():
self.ticksWithoutUpdate = 0
self._update()
return
self.ticksWithoutUpdate += 1
def __itemSingleClickedCopy(self, item, col):
"""Called when an item is clicked on. Copies selected object names to
the middle click selection clip board.
@type item: QTreeWidgetItem
@param item: The item clicked on
@type col: int
@param col: The column clicked on"""
del item
del col
selected = [job.data.name for job in self.selectedObjects() if cuegui.Utils.isJob(job)]
if selected:
QtWidgets.QApplication.clipboard().setText(
" ".join(selected), QtGui.QClipboard.Selection)
def __itemSingleClickedComment(self, item, col):
"""If the comment column is clicked on, and there is a comment on the
job, this pops up the comments dialog
@type item: QTreeWidgetItem
@param item: The item clicked on
@type col: int
@param col: The column clicked on"""
job = item.rpcObject
if col == COLUMN_COMMENT and job.isCommented():
self.__menuActions.jobs().viewComments([job])
def startDrag(self, dropActions):
cuegui.Utils.startDrag(self, dropActions, self.selectedObjects())
def dragEnterEvent(self, event):
cuegui.Utils.dragEnterEvent(event)
def dragMoveEvent(self, event):
cuegui.Utils.dragMoveEvent(event)
def dropEvent(self, event):
for job_name in cuegui.Utils.dropEvent(event):
self.addJob(job_name)
def setLoadMine(self, value):
"""Enables or disables the autoloading of the user's jobs
@param value: New loadMine state
@type value: boolean or QtCore.Qt.Checked or QtCore.Qt.Unchecked"""
self.__loadMine = (value is True or value == QtCore.Qt.Checked)
def addJob(self, job):
"""Adds a job to the list. With locking"
@param job: Job can be None, a job object, or a job name.
@type job: job, string, None"""
newJobObj = cuegui.Utils.findJob(job)
self.ticksLock.lock()
try:
if newJobObj:
objectKey = cuegui.Utils.getObjectKey(newJobObj)
self.__load[objectKey] = newJobObj
self.__jobTimeLoaded[objectKey] = time.time()
finally:
self.ticksLock.unlock()
def getJobProxies(self):
"""Gets a list of IDs of monitored jobs."""
return list(self._items.keys())
def _removeItem(self, item):
"""Removes an item from the TreeWidget without locking
@param item: A tree widget item
@type item: AbstractTreeWidgetItem"""
# pylint: disable=no-member
QtGui.qApp.unmonitor.emit(item.rpcObject)
# pylint: enable=no-member
cuegui.AbstractTreeWidget.AbstractTreeWidget._removeItem(self, item)
self.__jobTimeLoaded.pop(item.rpcObject, "")
def removeAllItems(self):
"""Notifies the other widgets of each item being unmonitored, then calls
the the AbstractTreeWidget.removeAllItems like normal"""
for proxy in list(self._items.keys()):
# pylint: disable=no-member
QtGui.qApp.unmonitor.emit(proxy)
# pylint: enable=no-member
if proxy in self.__jobTimeLoaded:
del self.__jobTimeLoaded[proxy]
cuegui.AbstractTreeWidget.AbstractTreeWidget.removeAllItems(self)
def removeFinishedItems(self):
"""Removes finished jobs"""
for item in self.findItems("Finished", QtCore.Qt.MatchFixedString, COLUMN_STATE):
self.removeItem(item)
def contextMenuEvent(self, e):
"""Creates a context menu when an item is right clicked.
@param e: Right click QEvent
@type e: QEvent"""
menu = QtWidgets.QMenu()
__selectedObjects = self.selectedObjects()
__count = len(__selectedObjects)
jobType = cuegui.Utils.countJobTypes(__selectedObjects)
self.__menuActions.jobs().addAction(menu, "unmonitor")
self.__menuActions.jobs().addAction(menu, "view")
self.__menuActions.jobs().addAction(menu, "emailArtist")
self.__menuActions.jobs().addAction(menu, "viewComments")
self.__menuActions.jobs().addAction(menu, "useLocalCores")
depend_menu = QtWidgets.QMenu("&Dependencies",self)
self.__menuActions.jobs().addAction(depend_menu, "viewDepends")
self.__menuActions.jobs().addAction(depend_menu, "dependWizard")
depend_menu.addSeparator()
self.__menuActions.jobs().addAction(depend_menu, "dropExternalDependencies")
self.__menuActions.jobs().addAction(depend_menu, "dropInternalDependencies")
menu.addMenu(depend_menu)
color_menu = QtWidgets.QMenu("&Set user color",self)
self.__menuActions.jobs().addAction(color_menu, "setUserColor1")
self.__menuActions.jobs().addAction(color_menu, "setUserColor2")
self.__menuActions.jobs().addAction(color_menu, "setUserColor3")
self.__menuActions.jobs().addAction(color_menu, "setUserColor4")
self.__menuActions.jobs().addAction(color_menu, "clearUserColor")
menu.addMenu(color_menu)
menu.addSeparator()
self.__menuActions.jobs().addAction(menu, "setMaxRetries")
if __count == 1:
self.__menuActions.jobs().addAction(menu, "reorder")
self.__menuActions.jobs().addAction(menu, "stagger")
menu.addSeparator()
if jobType["unpaused"]:
self.__menuActions.jobs().addAction(menu, "pause")
if jobType["paused"]:
self.__menuActions.jobs().addAction(menu, "resume")
menu.addSeparator()
if jobType["hasDead"]:
self.__menuActions.jobs().addAction(menu, "retryDead")
self.__menuActions.jobs().addAction(menu, "eatDead")
if jobType["notEating"]:
self.__menuActions.jobs().addAction(menu, "autoEatOn")
if jobType["autoEating"]:
self.__menuActions.jobs().addAction(menu, "autoEatOff")
menu.addSeparator()
self.__menuActions.jobs().addAction(menu, "kill")
menu.exec_(e.globalPos())
def actionRemoveSelectedItems(self):
"""Unmonitors selected items"""
for item in self.selectedItems():
self.removeItem(item)
def actionSetUserColor(self, color):
"""Set selected items to have provided background color"""
for item in self.selectedItems():
objectKey = cuegui.Utils.getObjectKey(item.rpcObject)
if color is None and objectKey in self.__userColors:
self.__userColors.pop(objectKey)
elif color is not None:
self.__userColors[objectKey] = color
item.setUserColor(color)
def actionEatSelectedItems(self):
"""Eats all dead frames for selected jobs"""
self.__menuActions.jobs().eatDead()
def actionRetrySelectedItems(self):
"""Retries all dead frames for selected jobs"""
self.__menuActions.jobs().retryDead()
def actionKillSelectedItems(self):
"""Removes selected jobs from cue"""
self.__menuActions.jobs().kill()
def actionPauseSelectedItems(self):
"""Pause selected jobs"""
self.__menuActions.jobs().pause()
def actionResumeSelectedItems(self):
"""Resume selected jobs"""
self.__menuActions.jobs().resume()
def updateRequest(self):
"""If sufficient time has passed since last update, call _update"""
self.ticksWithoutUpdate = 999
def _getUpdate(self):
"""Gets the currently monitored jobs from the cuebot. Will also load
any of the users jobs if self.__loadMine is True
@return: dict of updated jobs
@rtype: dict<class.id: job>"""
try:
jobs = {}
# TODO: When getJobs is fixed to allow MatchAny, this can be updated to use one call
monitored_proxies = []
for item in list(self._items.values()):
objectKey = cuegui.Utils.getObjectKey(item.rpcObject)
if item.rpcObject.data.state == opencue.api.job_pb2.FINISHED:
# Reuse the old object if job is finished
jobs[objectKey] = item.rpcObject
else:
# Gather list of all other jobs to update
monitored_proxies.append(objectKey)
if self.__loadMine:
# This auto-loads all the users jobs
for job in opencue.api.getJobs(user=[cuegui.Utils.getUsername()]):
objectKey = cuegui.Utils.getObjectKey(job)
jobs[objectKey] = job
# Prune the users jobs from the remaining proxies to update
for proxy, job in list(jobs.items()):
if proxy in monitored_proxies:
monitored_proxies.remove(proxy)
if monitored_proxies:
for job in opencue.api.getJobs(
id=[proxyId.split('.')[-1] for proxyId in monitored_proxies],
include_finished=True):
objectKey = cuegui.Utils.getObjectKey(job)
jobs[objectKey] = job
except opencue.exception.CueException as e:
| |
# will trigger even if single echo data is in medata folder. Should still
# be okay
for subdir in subdirs[1:]: # not including parent folder or /medata, run dcm2niix on non me data
try:
fobj = dicom.read_file(os.path.join(subdir, list(os.walk(subdir))[0][2][0]),
force=True) # first dicom file of the scan
scan_num = str(int(os.path.basename(subdir))).zfill(2)
except ValueError:
continue
firstfile = [x[2] for x in os.walk(subdir)][0][0]
# print(str(fobj[0x20, 0x11].value), runlist)
# running dcm2niix,
if str(fobj[0x20, 0x11].value) in runlist:
proc = subprocess.Popen(
"dcm2niix -z y -f run{SCAN_NUM}_%p_%t_sub{SUB_NUM} -o {OUTPUT_DIR} -s y -b y {DATA_DIR}".format(
OUTPUT_DIR=sub_dir, SUB_NUM=sub_num, DATA_DIR=os.path.join(subdir, firstfile),
SCAN_NUM=scan_num), shell=True, stdout=subprocess.PIPE)
# output = proc.stdout.read()
outs, errs = proc.communicate()
prefix = re.match(".*/sub-{SUB_NUM}/(run{SCAN_NUM}".format(SUB_NUM=sub_num,
SCAN_NUM=scan_num) + r"[^ \(\"\\n\.]*).*",
str(outs)).group(1)
for file in os.listdir(sub_dir):
mefile = re.match(r"run{SCAN_NUM}(\.e\d\d)\.nii".format(SCAN_NUM=scan_num), file)
if re.match(r"run{SCAN_NUM}\.e\d\d.nii".format(SCAN_NUM=scan_num), file):
shutil.move(os.path.join(sub_dir, file),
os.path.join(sub_dir, prefix + mefile.group(1) + ".nii"))
shutil.copy(os.path.join(sub_dir, prefix + ".json"),
os.path.join(sub_dir, prefix + mefile.group(1) + ".json"))
os.remove(os.path.join(sub_dir, prefix + ".nii.gz"))
os.remove(os.path.join(sub_dir, prefix + ".json"))
else:
proc = subprocess.Popen(
"dcm2niix -z y -f run{SCAN_NUM}_%p_%t_sub{SUB_NUM} -o {OUTPUT_DIR} -b y {DATA_DIR}".format(
OUTPUT_DIR=sub_dir, SUB_NUM=sub_num, DATA_DIR=subdir, SCAN_NUM=scan_num), shell=True,
stdout=subprocess.PIPE)
outs, errs = proc.communicate()
sys.stdout.write(outs.decode("utf-8"))
self._multi_echo = runlist
self._data_dir = os.path.join(os.path.dirname(self._bids_dir), "sub-{SUB_NUM}".format(SUB_NUM=sub_num))
self._DICOM_path = ddir
def get_data_dir(self):
return self._data_dir
def set_data_dir(self, data_dir, DICOM): # check if input dir is listed
if DICOM is None:
if data_dir is None:
self._data_dir = os.getcwd()
else:
self._data_dir = data_dir
self._dataset_name = os.path.basename(self._data_dir)
else:
self._data_dir = None
def get_config(self):
return self._config
def get_config_path(self):
return self._config_path
def _set_config(self):
with open(self._config_path, 'r') as fst:
self._config = json.load(fst)
def set_config(self, config):
self._config = config
def set_config_path(self, config_path):
if config_path is None:
# Checking if a config.json is present
if os.path.isfile(os.path.join(os.getcwd(), "config.json")):
self._config_path = os.path.join(os.getcwd(), "config.json")
# Otherwise taking the default config
else:
self._config_path = os.path.join(os.path.dirname(__file__), "config.json")
else:
self._config_path = config_path
self._set_config()
def get_bids_dir(self):
return self._bids_dir
def set_bids_dir(self, bids_dir):
if bids_dir is None:
# Creating a new directory for BIDS
try:
newdir = self._data_dir + "/BIDS"
except TypeError:
print("Error: Please provide input data directory if no BIDS directory...")
# deleting old BIDS to make room for new
elif not os.path.basename(bids_dir) == "BIDS":
newdir = os.path.join(bids_dir, "BIDS")
else:
newdir = bids_dir
if not os.path.isdir(newdir):
os.mkdir(newdir)
elif self._is_overwrite:
force_remove(newdir)
os.mkdir(newdir)
self._bids_dir = newdir
self._ignore.append(newdir)
# as of BIDS ver 1.6.0, CT is not a part of BIDS, so check for CT files and add to .bidsignore
self.bidsignore("*_CT.*")
def get_bids_version(self):
return self._bids_version
def bids_validator(self):
assert self._bids_dir is not None, "Cannot launch bids-validator without specifying bids directory !"
# try:
subprocess.check_call(['bids-validator', self._bids_dir])
# except FileNotFoundError:
# print("bids-validator does not appear to be installed")
def generate_names(self, src_file_path, filename=None,
# function to run through name text and generate metadata
part_match=None,
sess_match=None,
ce_match=None,
acq_match=None,
echo_match=None,
data_type_match=None,
task_label_match=None,
run_match=None,
verbose=None,
debug=False):
if filename is None:
filename = os.path.basename(src_file_path)
if part_match is None:
part_match = match_regexp(self._config["partLabel"], filename)
if verbose is None:
verbose = self._is_verbose
try:
if re.match(r"^[^\d]{1,3}", part_match):
part_matches = re.split(r"([^\d]{1,3})", part_match, 1)
part_match_z = part_matches[1] + str(int(part_matches[2])).zfill(self._config["partLabel"]["fill"])
else:
part_match_z = str(int(part_match)).zfill(self._config["partLabel"]["fill"])
except KeyError:
pass
dst_file_path = self._bids_dir + "/sub-" + part_match_z
new_name = "/sub-" + part_match_z
SeqType = None
# Matching the session
try:
if sess_match is None:
sess_match = match_regexp(self._config["sessLabel"], filename)
dst_file_path = dst_file_path + "/ses-" + sess_match
new_name = new_name + "_ses-" + sess_match
except AssertionError:
if verbose:
print("No session found for %s" % src_file_path)
# Matching the run number
try:
if run_match is None:
run_match = match_regexp(self._config["runIndex"], filename)
try:
if re.match(r"^[^\d]{1,3}", run_match):
run_matches = re.split(r"([^\d]{1,3})", run_match, 1)
run_match = run_matches[1] + str(int(run_matches[2])).zfill(self._config["runIndex"]["fill"])
else:
run_match = str(int(run_match)).zfill(self._config["runIndex"]["fill"])
except KeyError:
pass
except AssertionError:
pass
# Matching the anat/fmri data type and task
try:
if data_type_match is None:
data_type_match = match_regexp(self._config["anat"]
, filename
, subtype=True)
dst_file_path = dst_file_path + "/anat"
self._data_types["anat"] = True
except (AssertionError, KeyError) as e:
# If no anatomical, trying functionnal
try:
if data_type_match is None:
data_type_match = match_regexp(self._config["func"]
, filename
, subtype=True)
dst_file_path = dst_file_path + "/func"
self._data_types["func"] = True
# Now trying to match the task
try:
if task_label_match is None:
task_label_match = match_regexp(self._config["func.task"]
, filename
, subtype=True)
new_name = new_name + "_task-" + task_label_match
except AssertionError as e:
print("No task found for %s" % src_file_path)
if debug:
raise e
return
except (AssertionError, KeyError) as e:
# no functional or anatomical, try ieeg
try:
if data_type_match is None:
data_type_match = match_regexp(self._config["ieeg"]
, filename
, subtype=True)
dst_file_path = dst_file_path + "/ieeg"
self._data_types["ieeg"] = True
# Now trying to match the task
try:
if task_label_match is None:
task_label_match = match_regexp(self._config["ieeg.task"]
, filename
, subtype=True)
new_name = new_name + "_task-" + task_label_match
except AssertionError as e:
print("No task found for %s" % src_file_path)
if debug:
raise e
return
except AssertionError as e:
if verbose:
print("No anat, func, or ieeg data type found for %s" % src_file_path)
if debug:
raise e
return
except KeyError as e:
print("No anat, func, or ieeg data type found in config file, one of these data types is required")
if debug:
raise e
return
# if is an MRI
if dst_file_path.endswith("/func") or dst_file_path.endswith("/anat"):
try:
SeqType = str(match_regexp(self._config["pulseSequenceType"], filename, subtype=True))
except AssertionError:
if verbose:
print("No pulse sequence found for %s" % src_file_path)
except KeyError:
if verbose:
print("pulse sequence not listed for %s, will look for in file header" % src_file_path)
try:
if echo_match is None:
echo_match = match_regexp(self._config["echo"], filename)
new_name = new_name + "_echo-" + echo_match
except AssertionError:
if verbose:
print("No echo found for %s" % src_file_path)
# check for optional labels
try:
if acq_match is None:
acq_match = match_regexp(self._config["acq"], filename)
try:
if re.match(r"^[^\d]{1,3}", acq_match):
acq_matches = re.split(r"([^\d]{1,3})", acq_match, 1)
acq_match = acq_matches[1] + str(int(acq_matches[2])).zfill(self._config["acq"]["fill"])
else:
acq_match = str(int(acq_match)).zfill(self._config["acq"]["fill"])
except KeyError:
pass
new_name = new_name + "_acq-" + acq_match
except (AssertionError, KeyError) as e:
if verbose:
print("no optional labels for %s" % src_file_path)
try:
if ce_match is None:
ce_match = match_regexp(self._config["ce"]
, filename)
new_name = new_name + "_ce-" + ce_match
except (AssertionError, KeyError) as e:
if verbose:
print("no special contrast labels for %s" % src_file_path)
if run_match is not None:
new_name = new_name + "_run-" + run_match
# Adding the modality to the new filename
new_name = new_name + "_" + data_type_match
return (new_name, dst_file_path, part_match, run_match,
acq_match, echo_match, sess_match, ce_match,
data_type_match, task_label_match, SeqType)
def multi_echo_check(self, runnum, src_file=""): # check to see if run is multi echo based on input
if self.is_multi_echo:
if int(runnum) in self._multi_echo:
return (True)
else:
if self._multi_echo == 0:
try:
match_regexp(self._config["echo"], src_file)
except AssertionError:
return (False)
return (True)
else:
return (False)
else:
return (False)
def get_params(self, folder, echo_num, run_num): # function to run through DICOMs and get metadata
# threading?
if self.is_multi_echo and run_num in self._multi_echo:
vols_per_time = len(self._config['delayTimeInSec']) - 1
echo = self._config['delayTimeInSec'][echo_num]
else:
vols_per_time = 1
echo = None
for root, _, dfiles in os.walk(folder, topdown=True):
dfiles.sort()
for dfile in dfiles:
dcm_file_path = os.path.join(root, dfile)
fobj = dicom.read_file(str(dcm_file_path))
if echo is None:
try:
echo = float(fobj[0x18, 0x81].value) / 1000
except KeyError:
echo = self._config['delayTimeInSec'][0]
ImagesInAcquisition = int(fobj[0x20, 0x1002].value)
seqlist = []
for i in list(range(5)):
try:
seqlist.append(fobj[0x18, (32 + i)].value)
if seqlist[i] == 'NONE':
seqlist[i] = None
if isinstance(seqlist[i], dicom.multival.MultiValue):
seqlist[i] = list(seqlist[i])
if isinstance(seqlist[i], list):
seqlist[i] = ", ".join(seqlist[i])
except KeyError:
seqlist.append(None)
[ScanningSequence, SequenceVariant, SequenceOptions, AquisitionType, SequenceName] = seqlist
try:
timings = []
except NameError:
timings = [None] * int(ImagesInAcquisition / vols_per_time)
RepetitionTime = (
(float(fobj[0x18, 0x80].value) / 1000)) # TR value extracted in milliseconds, converted to seconds
try:
acquisition_series = self._config['series']
except KeyError:
print("default")
acquisition_series = "non-interleaved"
if acquisition_series == "even-interleaved":
InstackPositionNumber = 2
else:
InStackPositionNumber = 1
InstanceNumber = 0
while None in timings:
if timings[InStackPositionNumber - 1] is None:
timings[InStackPositionNumber - 1] = slice_time_calc(RepetitionTime, InstanceNumber,
int(ImagesInAcquisition / vols_per_time),
echo)
if acquisition_series == "odd-interleaved" or acquisition_series == "even-interleaved":
InStackPositionNumber += 2
if InStackPositionNumber > ImagesInAcquisition / vols_per_time and acquisition_series == "odd-interleaved":
InStackPositionNumber = 2
elif InStackPositionNumber > ImagesInAcquisition / vols_per_time and acquisition_series == "even-interleaved":
InStackPositionNumber = 1
else:
InStackPositionNumber += 1
InstanceNumber += 1
return (timings, echo, ScanningSequence, SequenceVariant, SequenceOptions, SequenceName)
def read_edf(self, file_name, channels=None, | |
AND datetime <= %s"""
if search_type is not None and search_type == 'stationvariable':
sql_string = """
SELECT station_id,
variable_id,
b.symbol,
b.name,
b.sampling_operation_id,
c.name,
c.symbol,
min_value,
max_value,
avg_value,
sum_value,
num_records,
datetime as data
FROM hourly_summary a
INNER JOIN wx_variable b ON a.variable_id=b.id
INNER JOIN wx_unit c ON b.unit_id=c.id
WHERE station_id=%s AND variable_id=%s AND datetime >= %s AND datetime <= %s"""
if sql_string:
sql_string += " ORDER BY datetime"
with connection.cursor() as cursor:
if search_type is not None and search_type == 'stationvariable':
cursor.execute(sql_string, [search_value, search_value2, search_date_start, search_date_end])
else:
cursor.execute(sql_string, [search_value, search_date_start, search_date_end])
rows = cursor.fetchall()
for row in rows:
value = None
if row[4] in [1, 2]:
value = row[9]
elif row[4] == 3:
value = row[7]
elif row[4] == 4:
value = row[8]
elif row[4] == 6:
value = row[10]
else:
value = row[10]
if value is None:
print('variable {} does not have supported sampling operation {}'.format(row[1], row[4]))
else:
obj = {
'station': row[0],
'date': row[12],
'value': round(value, 2),
'min': round(row[7], 2),
'max': round(row[8], 2),
'avg': round(row[9], 2),
'sum': round(row[10], 2),
'count': round(row[11], 2),
'variable': {
'symbol': row[2],
'name': row[3],
'unit_name': row[5],
'unit_symbol': row[6],
}
}
response['results'].append(obj)
if response['results']:
return JsonResponse(response, status=status.HTTP_200_OK)
return JsonResponse(data=response)
def daily_summary_list(request):
search_type = request.GET.get('search_type', None)
search_value = request.GET.get('search_value', None)
search_value2 = request.GET.get('search_value2', None)
search_date_start = request.GET.get(
'search_date_start',
default=(datetime.datetime.now() - datetime.timedelta(days=365)).strftime('%Y-%m-%d')
)
search_date_end = request.GET.get(
'search_date_end',
default=datetime.datetime.now().strftime('%Y-%m-%d')
)
sql_string = ""
response = {
'results': [],
'messages': [],
}
try:
start_date = datetime.datetime.strptime(search_date_start, '%Y-%m-%d')
end_date = datetime.datetime.strptime(search_date_end, '%Y-%m-%d')
except ValueError:
message = 'Invalid date format. Expected YYYY-MM-DD'
return JsonResponse(data={"message": message}, status=status.HTTP_400_BAD_REQUEST)
delta = end_date - start_date
if delta.days > 400: # Restrict queries to max 400 days
message = 'Interval between start date and end date is greater than 13 months.'
return JsonResponse(data={"message": message}, status=status.HTTP_400_BAD_REQUEST)
if search_type in ['station', 'stationvariable']:
try:
station = Station.objects.get(code=search_value)
except ObjectDoesNotExist:
station = Station.objects.get(pk=search_value)
finally:
search_value = station.id
if search_type is not None and search_type == 'variable':
sql_string = """
SELECT station_id,
variable_id,
b.symbol,
b.name,
b.sampling_operation_id,
c.name,
c.symbol,
min_value,
max_value,
avg_value,
sum_value,
day,
num_records
FROM daily_summary a
INNER JOIN wx_variable b ON a.variable_id=b.id
INNER JOIN wx_unit c ON b.unit_id=c.id
WHERE b.id = %s
AND day >= %s
AND day <= %s"""
if search_type is not None and search_type == 'station':
sql_string = """
SELECT station_id,
variable_id,
b.symbol,
b.name,
b.sampling_operation_id,
c.name,
c.symbol,
min_value,
max_value,
avg_value,
sum_value,
day,
num_records
FROM daily_summary a
INNER JOIN wx_variable b ON a.variable_id=b.id
INNER JOIN wx_unit c ON b.unit_id=c.id
WHERE station_id=%s AND day >= %s AND day <= %s"""
if search_type is not None and search_type == 'stationvariable':
sql_string = """
SELECT station_id,
variable_id,
b.symbol,
b.name,
b.sampling_operation_id,
c.name,
c.symbol,
min_value,
max_value,
avg_value,
sum_value,
day,
num_records
FROM daily_summary a
INNER JOIN wx_variable b ON a.variable_id=b.id
INNER JOIN wx_unit c ON b.unit_id=c.id
WHERE station_id=%s AND variable_id=%s AND day >= %s AND day <= %s"""
if sql_string:
sql_string += " ORDER BY day"
with connection.cursor() as cursor:
if search_type is not None and search_type == 'stationvariable':
cursor.execute(sql_string, [search_value, search_value2, search_date_start, search_date_end])
else:
cursor.execute(sql_string, [search_value, search_date_start, search_date_end])
rows = cursor.fetchall()
for row in rows:
value = None
if row[4] in [1, 2]:
value = row[9]
elif row[4] == 3:
value = row[7]
elif row[4] == 4:
value = row[8]
elif row[4] == 6:
value = row[10]
else:
value = row[10]
if value is not None:
obj = {
'station': row[0],
'date': row[11],
'value': round(value, 2),
'min': round(row[7], 2),
'max': round(row[8], 2),
'avg': round(row[9], 2),
'total': round(row[10], 2),
'count': row[12],
'variable': {
'symbol': row[2],
'name': row[3],
'unit_name': row[5],
'unit_symbol': row[6],
}
}
response['results'].append(obj)
else:
JsonResponse(data={
"message": 'variable {} does not have supported sampling operation {}'.format(row[1], row[4])},
status=status.HTTP_400_BAD_REQUEST)
if response['results']:
return JsonResponse(response, status=status.HTTP_200_OK)
return JsonResponse(data=response)
def monthly_summary_list(request):
search_type = request.GET.get('search_type', None)
search_value = request.GET.get('search_value', None)
search_value2 = request.GET.get('search_value2', None)
search_date_start = request.GET.get(
'search_date_start',
default=(datetime.datetime.now() - datetime.timedelta(days=365)).strftime('%Y-%m-%d')
)
search_date_end = request.GET.get(
'search_date_end',
default=datetime.datetime.now().strftime('%Y-%m-%d')
)
sql_string = ""
response = {
'count': -999,
'next': None,
'previous': None,
'results': []
}
try:
start_date = datetime.datetime.strptime(search_date_start, '%Y-%m-%d')
end_date = datetime.datetime.strptime(search_date_end, '%Y-%m-%d')
except ValueError:
message = 'Invalid date format. Expected YYYY-MM-DD'
return JsonResponse(data={"message": message}, status=status.HTTP_400_BAD_REQUEST)
if search_type in ['station', 'stationvariable']:
try:
station = Station.objects.get(code=search_value)
except ObjectDoesNotExist:
station = Station.objects.get(pk=search_value)
finally:
search_value = station.id
if search_type is not None and search_type == 'variable':
sql_string = """
SELECT station_id,
variable_id,
b.symbol,
b.name,
b.sampling_operation_id,
c.name,
c.symbol,
min_value,
max_value,
avg_value,
sum_value,
date::date,
num_records
FROM monthly_summary a
JOIN wx_variable b ON a.variable_id=b.id
JOIN wx_unit c ON b.unit_id=c.id
WHERE b.id = %s
AND date >= %s
AND date <= %s
"""
if search_type is not None and search_type == 'station':
sql_string = """
SELECT station_id,
variable_id,
b.symbol,
b.name,
b.sampling_operation_id,
c.name,
c.symbol,
min_value,
max_value,
avg_value,
sum_value,
date::date,
num_records
FROM monthly_summary a
JOIN wx_variable b ON a.variable_id=b.id
JOIN wx_unit c ON b.unit_id=c.id
WHERE station_id = %s
AND date >= %s AND date <= %s
"""
if search_type is not None and search_type == 'stationvariable':
sql_string = """
SELECT station_id,
variable_id,
b.symbol,
b.name,
b.sampling_operation_id,
c.name,
c.symbol,
min_value,
max_value,
avg_value,
sum_value,
date::date,
num_records
FROM monthly_summary a
JOIN wx_variable b ON a.variable_id=b.id
JOIN wx_unit c ON b.unit_id=c.id
WHERE station_id = %s
AND variable_id = %s
AND date >= %s AND date <= %s
"""
if sql_string:
sql_string += " ORDER BY month"
with connection.cursor() as cursor:
if search_type is not None and search_type == 'stationvariable':
cursor.execute(sql_string, [search_value, search_value2, start_date, end_date])
else:
cursor.execute(sql_string, [search_value, start_date, end_date])
rows = cursor.fetchall()
for row in rows:
value = None
if row[4] in [1, 2]:
value = row[9]
elif row[4] == 3:
value = row[7]
elif row[4] == 4:
value = row[8]
elif row[4] == 6:
value = row[10]
else:
value = row[10]
if value is not None:
obj = {
'station': row[0],
'date': row[11],
'value': round(value, 2),
'min': round(row[7], 2),
'max': round(row[8], 2),
'avg': round(row[9], 2),
'total': round(row[10], 2),
'count': row[12],
'variable': {
'symbol': row[2],
'name': row[3],
'unit_name': row[5],
'unit_symbol': row[6],
}
}
response['results'].append(obj)
else:
JsonResponse(data={
"message": 'variable {} does not have supported sampling operation {}'.format(row[1], row[4])},
status=status.HTTP_400_BAD_REQUEST)
if response['results']:
return JsonResponse(response, status=status.HTTP_200_OK)
return JsonResponse(data=response)
def yearly_summary_list(request):
search_type = request.GET.get('search_type', None)
search_value = request.GET.get('search_value', None)
search_value2 = request.GET.get('search_value2', None)
search_date_start = request.GET.get(
'search_date_start',
default=(datetime.datetime.now() - datetime.timedelta(days=365)).strftime('%Y-%m-%d')
)
search_date_end = request.GET.get(
'search_date_end',
default=datetime.datetime.now().strftime('%Y-%m-%d')
)
sql_string = ""
response = {
'count': -999,
'next': None,
'previous': None,
'results': []
}
try:
start_date = datetime.datetime.strptime(search_date_start, '%Y-%m-%d')
end_date = datetime.datetime.strptime(search_date_end, '%Y-%m-%d')
except ValueError:
message = 'Invalid date format. Expected YYYY-MM-DD'
return JsonResponse(data={"message": message}, status=status.HTTP_400_BAD_REQUEST)
if search_type in ['station', 'stationvariable']:
try:
station = Station.objects.get(code=search_value)
except ObjectDoesNotExist:
station = Station.objects.get(pk=search_value)
finally:
search_value = station.id
if search_type is not None and search_type == 'variable':
sql_string = """
SELECT station_id,
variable_id,
b.symbol,
b.name,
b.sampling_operation_id,
c.name,
c.symbol,
min_value,
max_value,
avg_value,
sum_value,
date::date,
num_records
FROM yearly_summary a
JOIN wx_variable b ON a.variable_id=b.id
JOIN wx_unit c ON b.unit_id=c.id
WHERE b.id = %s
AND date >= %s
AND date <= %s
"""
if search_type is not None and search_type == 'station':
sql_string = """
SELECT station_id,
variable_id,
b.symbol,
b.name,
b.sampling_operation_id,
c.name,
c.symbol,
min_value,
max_value,
avg_value,
sum_value,
date::date,
num_records
FROM yearly_summary a
JOIN wx_variable b ON a.variable_id=b.id
JOIN wx_unit c ON b.unit_id=c.id
WHERE station_id = %s
AND date >= %s AND date <= %s
"""
if search_type is not None and search_type == 'stationvariable':
sql_string = """
SELECT station_id,
variable_id,
b.symbol,
b.name,
b.sampling_operation_id,
c.name,
c.symbol,
min_value,
max_value,
avg_value,
sum_value,
date::date,
num_records
FROM yearly_summary a
JOIN wx_variable b ON a.variable_id=b.id
JOIN wx_unit c ON b.unit_id=c.id
WHERE station_id = %s
AND variable_id = %s
AND date >= %s AND date <= %s
"""
if sql_string:
sql_string += " ORDER BY year"
with connection.cursor() as cursor:
if search_type is not None and search_type == 'stationvariable':
cursor.execute(sql_string, [search_value, search_value2, | |
= isinstance(dorp, list)
# If this is a plan panel, it only renders plans
if (self.type == 'plan' or self.type == 'plan_summary') and \
not isinstance(dorp,Plan):
if is_list:
if any(not isinstance(item, Plan) for item in dorp):
return ''
else:
return ''
# Given a plan, it will render using the districts within the plan
if self.type == 'district' and \
not isinstance(dorp,District):
if is_list:
if any(not isinstance(item, District) for item in dorp):
return ''
elif isinstance(dorp, Plan):
dorp = dorp.get_districts_at_version(
version if version is not None else dorp.version,
include_geom=True)
is_list = True
else:
return ''
# Keep track of whether we're using a parameter or the DB to populate our panel
function_override = components is not None
# Render an item for each plan and plan score
if self.type == 'plan' or self.type == 'plan_summary':
if is_list:
plans = dorp
else:
plans = [dorp]
planscores = []
for plan in plans:
plan_version = version if version is not None else plan.version
if function_override:
functions = map(lambda f: f[0], components)
else:
functions = self.score_functions.filter(
is_planscore=True).order_by('name')
for function in functions:
# Don't process this function if it isn't in the inclusion list
if function_ids and not function.id in function_ids:
continue
if function_override:
if len(function) > 1:
arguments = function[1:]
function = function[0]
score = function.score(
plans,
format='html',
version=plan_version,
score_arguments=arguments)
sort = score
else:
score = ComputedPlanScore.compute(
function,
plan,
format='html',
version=plan_version)
sort = ComputedPlanScore.compute(
function,
plan,
format='sort',
version=plan_version)
planscores.append({
'plan':
plan,
'name':
function.get_short_label(),
'label':
function.get_label(),
'description':
function.get_long_description(),
'score':
score,
'sort':
sort
})
if self.type == 'plan':
planscores.sort(
key=lambda x: x['sort'], reverse=not self.is_ascending)
return "" if len(planscores) == 0 else render_to_string(
self.template, {
'settings': settings,
'planscores': planscores,
'functions': functions,
'title': self.get_short_label(),
'cssclass': self.cssclass,
'position': self.position,
'description': self.get_long_description(),
'planname': '' if len(plans) == 0 else plans[0].name,
'context': context
})
# Render each district with multiple scores
elif self.type == 'district':
if is_list:
districts = dorp
else:
districts = [dorp]
districtscores = []
functions = []
for district in districts:
districtscore = {'district': district, 'scores': []}
if function_override:
district_functions = reduce(
lambda c: not c[0].is_planscore, components)
else:
district_functions = self.score_functions.filter(
is_planscore=False)
for function in district_functions:
# Don't process this function if it isn't in the inclusion list
if function_ids and not function.id in function_ids:
continue
if function_override:
if len(function) > 1:
arguments = function[1:]
function = function[0]
score = function.score(
district, format='html', score_arguments=arguments)
else:
if not function.get_label() in functions:
functions.append(function.get_label())
score = ComputedDistrictScore.compute(
function, district, format='html')
districtscore['scores'].append({
'district':
district,
'name':
function.get_short_label(),
'label':
function.get_label(),
'description':
function.get_long_description(),
'score':
score
})
if len(districtscore['scores']) > 0:
districtscores.append(districtscore)
return "" if len(districtscores) == 0 else render_to_string(
self.template, {
'districtscores': districtscores,
'functions': functions,
'title': self.__unicode__(),
'cssclass': self.cssclass,
'settings': settings,
'position': self.position,
'context': context
})
class ValidationCriteria(BaseModel):
"""
Defines the required score functions to validate a legislative body
"""
# The score function this criteria is for
function = models.ForeignKey(ScoreFunction)
# Name of this validation criteria
name = models.CharField(max_length=50)
# The legislative body that this validation criteria is for
legislative_body = models.ForeignKey(LegislativeBody)
def __unicode__(self):
return self.get_label()
class Meta:
"""
Additional properties about the ValidationCriteria model.
"""
verbose_name_plural = "Validation criterion"
unique_together = ('name', )
class ComputedDistrictScore(models.Model):
"""
A score generated by a score function for a district that can be
saved for later.
These computed scores do not store the version number, since each
district has a unique version.
"""
# The score function that computes this score
function = models.ForeignKey(ScoreFunction)
# The district that this score relates to
district = models.ForeignKey(District)
# The actual score value
value = models.TextField()
def __unicode__(self):
name = ''
if not self.district is None:
if not self.district.plan is None:
name = '%s / %s' % (
self.district.long_label,
self.district.plan.name,
)
else:
name = self.district.long_label
if not self.function is None:
name = '%s / %s' % (self.function.get_short_label(), name)
else:
name = 'None / %s' % name
return name
@staticmethod
def compute(function, district, format='raw'):
"""
Get the computed value. This method will leverage the cache when
it is available, or it will populate the cache if it is not.
If the cached score exists, it's value is not changed.
If the cached score does not exist, this method will create it.
Parameters:
function -- A ScoreFunction to compute with
district -- A District to compute on
Returns:
The cached value for the district.
"""
created = False
try:
defaults = {'value': ''}
cache, created = ComputedDistrictScore.objects.get_or_create(
function=function, district=district, defaults=defaults)
except Exception as ex:
logger.info(
'Could not retrieve nor create computed district score for district %d.',
district.id)
logger.debug('Reason:', ex)
return None
try:
# Since we create the object blank, it's possible to hit a race condition where
# the object already existed, but has no value saved. Try to load the value,
# and if it fails then calculate it.
score = cPickle.loads(str(cache.value))
except Exception:
# The object didn't have an already cached value, so calculate it
score = function.score(district, format='raw')
# To avoid having multiple processes hitting the database with the same write,
# only update the object if we were the one that created it initially
if created:
cache.value = cPickle.dumps(score)
cache.save()
if format != 'raw':
calc = function.get_calculator()
calc.result = score
if format == 'html':
return calc.html()
elif format == 'json':
return calc.json()
elif format == 'sort':
return calc.sortkey()
else:
# Unrecognized format!
return None
return score
class Meta:
unique_together = (('function', 'district'), )
class ComputedPlanScore(models.Model):
"""
A score generated by a score function for a plan that can be saved
for later.
These computed scores contain version numbers, since a plan's version
number is incremented each time, but scores need to exist for different
plan version numbers, for history, etc.
"""
# The score function that computes this score
function = models.ForeignKey(ScoreFunction)
# The plan that this score relates to
plan = models.ForeignKey(Plan)
# The version of the plan that this relates to
version = models.PositiveIntegerField(default=0)
# The actual score value
value = models.TextField()
@staticmethod
def compute(function, plan, version=None, format='raw'):
"""
Get the computed value. This method will leverage the cache when
it is available, or it will populate the cache if it is not.
If the cached score exists, its value is not changed.
If the cached score does not exist, this method will create it.
Parameters:
function -- A ScoreFunction to compute with
plan -- A Plan to compute on
version -- Optional; the version of the plan to compute.
Returns:
The cached value for the plan.
"""
created = False
plan_version = version if version is not None else plan.version
try:
defaults = {'value': ''}
cache, created = ComputedPlanScore.objects.get_or_create(
function=function,
plan=plan,
version=plan_version,
defaults=defaults)
except:
logger.exception(
'Could not retrieve nor create ComputedPlanScore for plan %d',
plan.id)
return None
try:
# Since we create the object blank, it's possible to hit a race condition where
# the object already existed, but has no value saved. Try to load the value,
# and if it fails then calculate it.
score = cPickle.loads(str(cache.value))
except Exception:
# The object didn't have an already cached value, so calculate it
score = function.score(plan, format='raw', version=plan_version)
# To avoid having multiple processes hitting the database with the same write,
# only update the object if we were the one that created it initially
if created:
cache.value = cPickle.dumps(score)
cache.save()
if format != 'raw':
calc = function.get_calculator()
calc.result = score
if format == 'html':
return calc.html()
elif format == 'json':
return calc.json()
elif format == 'sort':
return calc.sortkey()
else:
# Unrecognized format!
return None
return score
def __unicode__(self):
name = ''
if not self.plan is None:
name = self.plan.name
if not self.function is None:
name = '%s / %s' % (self.function.get_short_label(), name)
else:
name = 'None / %s' % name
return name
class ContiguityOverride(models.Model):
"""
Defines a relationship between two geounits in which special
behavior needs to be applied when calculating contiguity.
"""
# The geounit that | |
import time
import weakref
import logging
import numpy as np
import wgpu.backends.rs
from .. import Renderer
from ...linalg import Matrix4, Vector3
from ...objects import (
id_provider,
KeyboardEvent,
RootEventHandler,
PointerEvent,
WheelEvent,
WindowEvent,
WorldObject,
)
from ...cameras import Camera
from ...resources import Buffer, Texture, TextureView
from ...utils import array_from_shadertype, Color
from . import _blender as blender_module
from ._flusher import RenderFlusher
from ._pipelinebuilder import ensure_pipeline
from ._update import update_buffer, update_texture, update_texture_view
logger = logging.getLogger("pygfx")
# Definition uniform struct with standard info related to transforms,
# provided to each shader as uniform at slot 0.
# todo: a combined transform would be nice too, for performance
# todo: same for ndc_to_world transform (combined inv transforms)
stdinfo_uniform_type = dict(
cam_transform="4x4xf4",
cam_transform_inv="4x4xf4",
projection_transform="4x4xf4",
projection_transform_inv="4x4xf4",
physical_size="2xf4",
logical_size="2xf4",
flipped_winding="i4", # A bool, really
)
def _get_sort_function(camera: Camera):
"""Given a scene object, get a function to sort wobject-tuples"""
def sort_func(wobject_tuple: WorldObject):
wobject = wobject_tuple[0]
z = (
Vector3()
.set_from_matrix_position(wobject.matrix_world)
.apply_matrix4(proj_screen_matrix)
.z
)
return wobject.render_order, z
proj_screen_matrix = Matrix4().multiply_matrices(
camera.projection_matrix, camera.matrix_world_inverse
)
return sort_func
class SharedData:
"""An object to store global data to share between multiple wgpu renderers.
Since renderers don't render simultaneously, they can share certain
resources. This safes memory, but more importantly, resources that
get used in wobject pipelines should be shared to avoid having to
constantly recompose the pipelines of wobjects that are rendered by
multiple renderers.
"""
def __init__(self, canvas):
# Create adapter and device objects - there should be just one per canvas.
# Having a global device provides the benefit that we can draw any object
# anywhere.
# We could pass the canvas to request_adapter(), so we get an adapter that is
# at least compatible with the first canvas that a renderer is create for.
# However, passing the object has been shown to prevent the creation of
# a canvas (on Linux + wx), so, we never pass it for now.
self.adapter = wgpu.request_adapter(
canvas=None, power_preference="high-performance"
)
self.device = self.adapter.request_device(
required_features=[], required_limits={}
)
# Create a uniform buffer for std info
self.stdinfo_buffer = Buffer(array_from_shadertype(stdinfo_uniform_type))
self.stdinfo_buffer._wgpu_usage |= wgpu.BufferUsage.UNIFORM
# A cache for shader objects
self.shader_cache = {}
class WgpuRenderer(RootEventHandler, Renderer):
"""Object used to render scenes using wgpu.
The purpose of a renderer is to render (i.e. draw) a scene to a
canvas or texture. It also provides picking, defines the
anti-aliasing parameters, and any post processing effects.
A renderer is directly associated with its target and can only render
to that target. Different renderers can render to the same target though.
It provides a ``.render()`` method that can be called one or more
times to render scenes. This creates a visual representation that
is stored internally, and is finally rendered into its render target
(the canvas or texture).
__________
| blender |
[scenes] -- render() --> | state | -- flush() --> [target]
|__________|
The internal representation is managed by the blender object. The
internal render textures are typically at a higher resolution to
reduce aliasing (SSAA). The blender has auxilary buffers such as a
depth buffer, pick buffer, and buffers for transparent fragments.
Depending on the blend mode, a single render call may consist of
multiple passes (to deal with semi-transparent fragments).
The flush-step resolves the internal representation into the target
texture or canvas, averaging neighbouring fragments for anti-aliasing.
Parameters:
target (WgpuCanvas or Texture): The target to render to, and what
determines the size of the render buffer.
pixel_ratio (float, optional): How large the physical size of the render
buffer is in relation to the target's physical size, for antialiasing.
See the corresponding property for details.
show_fps (bool): Whether to display the frames per second. Beware that
depending on the GUI toolkit, the canvas may impose a frame rate limit.
"""
_shared = None
_wobject_pipelines_collection = weakref.WeakValueDictionary()
def __init__(
self,
target,
*args,
pixel_ratio=None,
show_fps=False,
blend_mode="default",
sort_objects=False,
enable_events=True,
**kwargs,
):
super().__init__(*args, **kwargs)
# Check and normalize inputs
if not isinstance(target, (Texture, TextureView, wgpu.gui.WgpuCanvasBase)):
raise TypeError(
f"Render target must be a canvas or texture (view), not a {target.__class__.__name__}"
)
self._target = target
# Process other inputs
self.pixel_ratio = pixel_ratio
self._show_fps = bool(show_fps)
# Make sure we have a shared object (the first renderer create it)
canvas = target if isinstance(target, wgpu.gui.WgpuCanvasBase) else None
if WgpuRenderer._shared is None:
WgpuRenderer._shared = SharedData(canvas)
# Init counter to auto-clear
self._renders_since_last_flush = 0
# Get target format
if isinstance(target, wgpu.gui.WgpuCanvasBase):
self._canvas_context = self._target.get_context()
self._target_tex_format = self._canvas_context.get_preferred_format(
self._shared.adapter
)
# Also configure the canvas
self._canvas_context.configure(
device=self._shared.device,
format=self._target_tex_format,
usage=wgpu.TextureUsage.RENDER_ATTACHMENT,
)
else:
self._target_tex_format = self._target.format
# Also enable the texture for render and display usage
self._target._wgpu_usage |= wgpu.TextureUsage.RENDER_ATTACHMENT
self._target._wgpu_usage |= wgpu.TextureUsage.TEXTURE_BINDING
# Prepare render targets.
self.blend_mode = blend_mode
self.sort_objects = sort_objects
# Prepare object that performs the final render step into a texture
self._flusher = RenderFlusher(self._shared.device)
# Initialize a small buffer to read pixel info into
# Make it 256 bytes just in case (for bytes_per_row)
self._pixel_info_buffer = self._shared.device.create_buffer(
size=16,
usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.MAP_READ,
)
if enable_events:
self.enable_events()
@property
def device(self):
"""A reference to the used wgpu device."""
return self._shared.device
@property
def target(self):
"""The render target. Can be a canvas, texture or texture view."""
return self._target
@property
def pixel_ratio(self):
"""The ratio between the number of internal pixels versus the logical pixels on the canvas.
This can be used to configure the size of the render texture
relative to the canvas' logical size. By default (value is None) the
used pixel ratio follows the screens pixel ratio on high-res
displays, and is 2 otherwise.
If the used pixel ratio causes the render texture to be larger
than the physical size of the canvas, SSAA is applied, resulting
in a smoother final image with less jagged edges. Alternatively,
this value can be set to e.g. 0.5 to lower* the resolution (e.g.
for performance during interaction).
"""
return self._pixel_ratio
@pixel_ratio.setter
def pixel_ratio(self, value):
if value is None:
self._pixel_ratio = None
elif isinstance(value, (int, float)):
self._pixel_ratio = None if value <= 0 else float(value)
else:
raise TypeError(
f"Rendered.pixel_ratio expected None or number, not {value}"
)
@property
def rect(self):
"""The rectangular viewport for the renderer area."""
return (0, 0) + self.logical_size
@property
def logical_size(self):
"""The size of the render target in logical pixels."""
target = self._target
if isinstance(target, wgpu.gui.WgpuCanvasBase):
return target.get_logical_size()
elif isinstance(target, Texture):
return target.size[:2] # assuming pixel-ratio 1
elif isinstance(target, TextureView):
return target.texture.size[:2] # assuming pixel-ratio 1
else:
raise TypeError(f"Unexpected render target {target.__class__.__name__}")
@property
def physical_size(self):
"""The physical size of the internal render texture."""
# Get physical size of the target
target = self._target
if isinstance(target, wgpu.gui.WgpuCanvasBase):
target_psize = target.get_physical_size()
elif isinstance(target, Texture):
target_psize = target.size[:2]
elif isinstance(target, TextureView):
target_psize = target.texture.size[:2]
else:
raise TypeError(f"Unexpected render target {target.__class__.__name__}")
# Determine the pixel ratio of the render texture
if self._pixel_ratio:
pixel_ratio = self._pixel_ratio
else:
target_lsize = self.logical_size
pixel_ratio = target_psize[0] / target_lsize[0]
if pixel_ratio <= 1:
pixel_ratio = 2.0 # use 2 on non-hidpi displays
# Determine the physical size of the internal render textures
return tuple(max(1, int(pixel_ratio * x)) for x in target_lsize)
@property
def blend_mode(self):
"""The method for handling transparency:
* "default" or None: Select the default: currently this is "ordered2".
* "opaque": single-pass approach that consider every fragment opaque.
* "ordered1": single-pass approach that blends fragments (using alpha blending).
Can only produce correct results if fragments are drawn from back to front.
* "ordered2": two-pass approach that first processes all opaque fragments and then
blends transparent fragments (using alpha blending) with depth-write disabled. The
visual results are usually better than ordered1, but still depend on the drawing order.
* "weighted": two-pass approach that for order independent transparency,
using alpha weights.
* "weighted_depth": two-pass approach for order independent transparency,
with weights based on alpha and depth (McGuire 2013). Note that the depth
range affects the (quality of the) visual result.
* "weighted_plus": three-pass approach for order independent transparency,
in wich the front-most transparent layer is rendered correctly, while
transparent layers behind it are blended using alpha weights.
"""
return self._blend_mode
@blend_mode.setter
def blend_mode(self, value):
# Massage and check the | |
NUMBER):
return TRUE
elif term.type == OBJECT:
return self
else:
return FALSE
class StringOp(Expression):
data_type = STRING
def __init__(self, term):
Expression.__init__(self, [term])
self.term = term
def __data__(self):
return {"string": self.term.__data__()}
def vars(self):
return self.term.vars()
def map(self, map_):
return self.lang[StringOp(self.term.map(map_))]
def missing(self):
return self.term.missing()
@simplified
def partial_eval(self):
term = self.term
if term.type is IS_NULL:
return NULL
term = self.lang[FirstOp(term)].partial_eval()
if is_op(term, StringOp):
return term.term.partial_eval()
elif is_op(term, CoalesceOp):
return self.lang[CoalesceOp([self.lang[StringOp(t)].partial_eval() for t in term.terms])]
elif is_literal(term):
if term.type == STRING:
return term
else:
return self.lang[Literal(mo_json.value2json(term.value))]
return self
class IsStringOp(Expression):
data_type = BOOLEAN
def __init__(self, term):
Expression.__init__(self, [term])
self.term = term
def __data__(self):
return {"is_string": self.term.__data__()}
def vars(self):
return self.term.vars()
def map(self, map_):
return self.lang[IsStringOp(self.term.map(map_))]
def missing(self):
return FALSE
class CountOp(Expression):
has_simple_form = False
data_type = INTEGER
def __init__(self, terms, **clauses):
Expression.__init__(self, terms)
if is_many(terms):
# SHORTCUT: ASSUME AN ARRAY OF IS A TUPLE
self.terms = self.lang[TupleOp(terms)]
else:
self.terms = terms
def __data__(self):
return {"count": self.terms.__data__()}
def vars(self):
return self.terms.vars()
def map(self, map_):
return self.lang[CountOp(self.terms.map(map_))]
def missing(self):
return FALSE
def exists(self):
return TrueOp
class MaxOp(Expression):
data_type = NUMBER
def __init__(self, terms):
Expression.__init__(self, terms)
if terms == None:
self.terms = []
elif is_many(terms):
self.terms = [t for t in terms if t != None]
else:
self.terms = [terms]
def __data__(self):
return {"max": [t.__data__() for t in self.terms]}
def vars(self):
output = set()
for t in self.terms:
output |= t.vars()
return output
def map(self, map_):
return self.lang[MaxOp([t.map(map_) for t in self.terms])]
def missing(self):
return FALSE
@simplified
def partial_eval(self):
maximum = None
terms = []
for t in self.terms:
simple = t.partial_eval()
if simple is NULL:
pass
elif is_literal(simple):
maximum = MAX([maximum, simple.value])
else:
terms.append(simple)
if len(terms) == 0:
if maximum == None:
return NULL
else:
return Literal(maximum)
else:
if maximum == None:
output = self.lang[MaxOp(terms)]
else:
output = self.lang[MaxOp([Literal(maximum)] + terms)]
return output
class MinOp(Expression):
data_type = NUMBER
def __init__(self, terms):
Expression.__init__(self, terms)
if terms == None:
self.terms = []
elif is_many(terms):
self.terms = terms
else:
self.terms = [terms]
def __data__(self):
return {"min": [t.__data__() for t in self.terms]}
def vars(self):
output = set()
for t in self.terms:
output |= t.vars()
return output
def map(self, map_):
return self.lang[MinOp([t.map(map_) for t in self.terms])]
def missing(self):
return FALSE
@simplified
def partial_eval(self):
minimum = None
terms = []
for t in self.terms:
simple = t.partial_eval()
if is_op(simple, NullOp):
pass
elif is_literal(simple):
minimum = MIN([minimum, simple.value])
else:
terms.append(simple)
if len(terms) == 0:
if minimum == None:
return NULL
else:
return Literal(minimum)
else:
if minimum == None:
output = self.lang[MinOp(terms)]
else:
output = self.lang[MinOp([Literal(minimum)] + terms)]
return output
_jx_identity = {
"add": ZERO,
"mul": ONE
}
class BaseMultiOp(Expression):
has_simple_form = True
data_type = NUMBER
op = None
def __init__(self, terms, **clauses):
Expression.__init__(self, terms)
self.terms = terms
self.default = coalesce(clauses.get("default"), NULL)
self.nulls = coalesce(clauses.get("nulls"), FALSE) # nulls==True WILL HAVE OP RETURN null ONLY IF ALL OPERANDS ARE null
def __data__(self):
return {self.op: [t.__data__() for t in self.terms], "default": self.default, "nulls": self.nulls}
def vars(self):
output = set()
for t in self.terms:
output |= t.vars()
return output
def map(self, map_):
return self.__class__([t.map(map_) for t in self.terms], **{"default": self.default, "nulls": self.nulls})
def missing(self):
if self.nulls:
if self.default is NULL:
return self.lang[AndOp([t.missing() for t in self.terms])]
else:
return TRUE
else:
if self.default is NULL:
return self.lang[OrOp([t.missing() for t in self.terms])]
else:
return FALSE
def exists(self):
if self.nulls:
return self.lang[OrOp([t.exists() for t in self.terms])]
else:
return self.lang[AndOp([t.exists() for t in self.terms])]
@simplified
def partial_eval(self):
acc = None
terms = []
for t in self.terms:
simple = t.partial_eval()
if simple is NULL:
pass
elif is_literal(simple):
if acc is None:
acc = simple.value
else:
acc = builtin_ops[self.op](acc, simple.value)
else:
terms.append(simple)
lang =self.lang
if len(terms) == 0:
if acc == None:
return self.default.partial_eval()
else:
return lang[Literal(acc)]
elif self.nulls:
# DECISIVE
if acc is not None:
terms.append(Literal(acc))
output = lang[WhenOp(
AndOp([t.missing() for t in terms]),
**{
"then": self.default,
"else": operators["basic." + self.op]([
CoalesceOp([t, _jx_identity[self.op]])
for t in terms
])
}
)].partial_eval()
else:
# CONSERVATIVE
if acc is not None:
terms.append(lang[Literal(acc)])
output = lang[WhenOp(
lang[OrOp([t.missing() for t in terms])],
**{
"then": self.default,
"else": operators["basic." + self.op](terms)
}
)].partial_eval()
return output
class AddOp(BaseMultiOp):
op = "add"
class MulOp(BaseMultiOp):
op = "mul"
class RegExpOp(Expression):
has_simple_form = True
data_type = BOOLEAN
def __init__(self, terms):
Expression.__init__(self, terms)
self.var, self.pattern = terms
def __data__(self):
return {"regexp": {self.var.var: self.pattern}}
def vars(self):
return {self.var}
def map(self, map_):
return self.lang[RegExpOp([self.var.map(map_), self.pattern])]
def missing(self):
return FALSE
def exists(self):
return TRUE
class CoalesceOp(Expression):
has_simple_form = True
def __init__(self, terms):
Expression.__init__(self, terms)
self.terms = terms
def __data__(self):
return {"coalesce": [t.__data__() for t in self.terms]}
def __eq__(self, other):
if is_op(other, CoalesceOp):
if len(self.terms) == len(other.terms):
return all(s == o for s, o in zip(self.terms, other.terms))
return False
def missing(self):
# RETURN true FOR RECORDS THE WOULD RETURN NULL
return self.lang[AndOp([v.missing() for v in self.terms])]
def vars(self):
output = set()
for v in self.terms:
output |= v.vars()
return output
def map(self, map_):
return self.lang[CoalesceOp([v.map(map_) for v in self.terms])]
@simplified
def partial_eval(self):
terms = []
for t in self.terms:
simple = self.lang[FirstOp(t)].partial_eval()
if simple is NULL:
pass
elif is_literal(simple):
terms.append(simple)
break
else:
terms.append(simple)
if len(terms) == 0:
return NULL
elif len(terms) == 1:
return terms[0]
else:
return self.lang[CoalesceOp(terms)]
class MissingOp(Expression):
data_type = BOOLEAN
def __init__(self, term):
Expression.__init__(self, term)
self.expr = term
def __data__(self):
return {"missing": self.expr.__data__()}
def __eq__(self, other):
if not is_op(other, MissingOp):
return False
else:
return self.expr == other.expr
def vars(self):
return self.expr.vars()
def map(self, map_):
return self.lang[MissingOp(self.expr.map(map_))]
def missing(self):
return FALSE
def exists(self):
return TRUE
@simplified
def partial_eval(self):
output = self.lang[self.expr].partial_eval().missing()
if is_op(output, MissingOp):
return output
else:
return output.partial_eval()
class ExistsOp(Expression):
data_type = BOOLEAN
def __init__(self, term):
Expression.__init__(self, [term])
self.field = term
def __data__(self):
return {"exists": self.field.__data__()}
def vars(self):
return self.field.vars()
def map(self, map_):
return self.lang[ExistsOp(self.field.map(map_))]
def missing(self):
return FALSE
def exists(self):
return TRUE
@simplified
def partial_eval(self):
return self.lang[NotOp(self.field.missing())].partial_eval()
class PrefixOp(Expression):
has_simple_form = True
data_type = BOOLEAN
def __init__(self, term):
Expression.__init__(self, term)
if not term:
self.expr = NULL
self.prefix = NULL
elif is_data(term):
self.expr, self.prefix = term.items()[0]
else:
self.expr, self.prefix = term
def __data__(self):
if not self.expr:
return {"prefix": {}}
elif is_op(self.expr, Variable) and is_literal(self.prefix):
return {"prefix": {self.expr.var: self.prefix.value}}
else:
return {"prefix": [self.expr.__data__(), self.prefix.__data__()]}
def vars(self):
if self.expr is NULL:
return set()
return self.expr.vars() | self.prefix.vars()
def map(self, map_):
if not self.expr:
return self
else:
return self.lang[PrefixOp([self.expr.map(map_), self.prefix.map(map_)])]
def missing(self):
return FALSE
@simplified
def partial_eval(self):
return self.lang[CaseOp([
WhenOp(self.prefix.missing(), then=TRUE),
WhenOp(self.expr.missing(), then=FALSE),
BasicStartsWithOp([self.expr, self.prefix])
])].partial_eval()
class SuffixOp(Expression):
has_simple_form = True
data_type = BOOLEAN
def __init__(self, term):
Expression.__init__(self, term)
if not term:
self.expr = self.suffix = None
elif is_data(term):
self.expr, self.suffix = term.items()[0]
else:
self.expr, self.suffix = term
def __data__(self):
if self.expr is None:
return {"suffix": {}}
elif is_op(self.expr, Variable) and is_literal(self.suffix):
return {"suffix": {self.expr.var: self.suffix.value}}
else:
return {"suffix": [self.expr.__data__(), self.suffix.__data__()]}
def missing(self):
"""
THERE IS PLENTY OF OPPORTUNITY TO SIMPLIFY missing EXPRESSIONS
OVERRIDE THIS METHOD TO SIMPLIFY
:return:
"""
return FALSE
def vars(self):
if self.expr is None:
return set()
return self.expr.vars() | self.suffix.vars()
def map(self, map_):
if self.expr is None:
return TRUE
else:
return self.lang[SuffixOp([self.expr.map(map_), self.suffix.map(map_)])]
@simplified
def partial_eval(self):
if self.expr is None:
return TRUE
if not is_literal(self.suffix) and self.suffix.type == STRING:
Log.error("can only hanlde literal suffix ")
return WhenOp(
self.lang[AndOp([self.expr.exists(), self.suffix.exists()])],
**{"then": self.lang[RegExpOp([self.expr, Literal(".*" + re.escape(self.suffix.value))])], "else": FALSE}
).partial_eval()
class ConcatOp(Expression):
has_simple_form = True
data_type = STRING
def __init__(self, terms, **clauses):
Expression.__init__(self, terms)
if is_data(terms):
self.terms = first(terms.items())
else:
self.terms = terms
self.separator = clauses.get(str("separator"), Literal(""))
self.default = clauses.get(str("default"), NULL)
if not is_literal(self.separator):
Log.error("Expecting a literal separator")
@classmethod
def define(cls, expr):
terms = expr['concat']
if is_data(terms):
k, v = first(terms.items())
terms = [Variable(k), Literal(v)]
else:
terms = [jx_expression(t) for t in terms]
return cls.lang[ConcatOp(
terms,
**{
k: Literal(v) if is_text(v) and not is_variable_name(v) else jx_expression(v)
for k, v in expr.items()
if k in ["default", "separator"]
}
)]
def __data__(self):
f, s = self.terms[0], self.terms[1]
if is_op(f, Variable) and is_literal(s):
output = {"concat": {f.var: s.value}}
else:
output = {"concat": [t.__data__() for t in self.terms]}
if self.separator.json != '""':
output["separator"] = self.separator.__data__()
return | |
hyperparameter_name)
elif hyperparameter_description['type'] == metadata_base.ArgumentType.PRIMITIVE:
if utils.is_sequence(hyperparameter_description['data']):
primitive_references = hyperparameter_description['data']
else:
primitive_references = typing.cast(typing.Sequence, [hyperparameter_description['data']])
primitives = []
for primitive_reference in primitive_references:
# We make an instance of a primitive which is almost the same as the pipeline primitive
# (see "_create_pipeline_primitive"), but with a different random seed because of a different
# "current_step". Then we clone it (using "_clone_primitive") in "_handle_primitive_hyperparams"
# which uses the final random seed. This way we are handling all primitives in hyper-parameters
# the same no matter the source (it could be somebody somehow passes a primitive instance through
# produce method's output or something).
# TODO: See if an optimization (no additional clone) here is needed and how hard is to implement it.
# TODO: Try to re-use existing primitive instances.
# We currently do not store primitive instances of prior steps, but we could those we know we
# will need in later steps and then just use them here, instead of creating them from scratch.
primitive = self._create_primitive_reference_primitive(primitive_reference, hyperparameter_name)
primitives.append(primitive)
if utils.is_sequence(hyperparameter_description['data']):
pipeline_hyperparams[hyperparameter_name] = primitives
else:
assert len(primitives) == 1
pipeline_hyperparams[hyperparameter_name] = primitives[0] # type: ignore
elif hyperparameter_description['type'] == metadata_base.ArgumentType.CONTAINER:
pipeline_hyperparams[hyperparameter_name] = self.data_values[hyperparameter_description['data']]
elif hyperparameter_description['type'] == metadata_base.ArgumentType.VALUE:
pipeline_hyperparams[hyperparameter_name] = hyperparameter_description['data']
else:
raise exceptions.UnexpectedValueError("Unknown hyper-parameter type: {hyperparameter_type}".format(hyperparameter_type=hyperparameter_description['type']))
return pipeline_hyperparams
def _prepare_primitive_hyperparams(self, step_index: int, step: pipeline_module.PrimitiveStep) -> typing.Tuple[hyperparams_module.Hyperparams, typing.Dict]:
default_hyperparams = self._get_default_hyperparams(step_index, step)
pipeline_hyperparams = self._get_pipeline_hyperparams(step_index, step)
runtime_hyperparams = self._get_runtime_hyperparams(step_index, step)
# Pipeline hyper-parameters should be disjoint with runtime hyper-parameters.
# We check this in "_check_hyperparams" call from the constructor.
assert set(pipeline_hyperparams.keys()).isdisjoint(set(runtime_hyperparams.keys())), (pipeline_hyperparams, runtime_hyperparams)
hyperparams = default_hyperparams.replace(pipeline_hyperparams).replace(runtime_hyperparams)
# We have to handle all primitive values present in hyper-parameters.
return self._handle_primitive_hyperparams(hyperparams, 0), pipeline_hyperparams
def _filter_arguments(self, primitive_class: typing.Type[base.PrimitiveBase], method_name: str, arguments: typing.Dict[str, typing.Any]) -> typing.Dict[str, typing.Any]:
"""
Primitive as a whole gets arguments for all its methods, so here we then filter out
only those arguments expected by a given method.
"""
method_arguments = primitive_class.metadata.query()['primitive_code'].get('instance_methods', {}).get(method_name, {}).get('arguments', [])
filtered_arguments = {}
for argument_name in method_arguments:
if argument_name in arguments:
filtered_arguments[argument_name] = arguments[argument_name]
return filtered_arguments
def _get_primitive_volumes(self, primitive_class: typing.Type[base.PrimitiveBase]) -> typing.Dict:
volumes = {}
for entry in primitive_class.metadata.get_volumes():
if self.volumes_dir is None:
raise exceptions.InvalidArgumentValueError(
"Primitive '{primitive_id}' of step {step_index} of pipeline '{pipeline_id}' requires static files (volumes) but volumes are not available.".format(
primitive_id=primitive_class.metadata.query()['id'],
step_index=self.current_step,
pipeline_id=self.pipeline.id,
),
)
volume_path = os.path.join(self.volumes_dir, entry['file_digest'])
if not os.path.exists(volume_path):
raise exceptions.InvalidArgumentValueError(
"Primitive '{primitive_id}' of step {step_index} of pipeline '{pipeline_id}' requires static files (volume) but volume for key '{key}' is not available.".format(
primitive_id=primitive_class.metadata.query()['id'],
step_index=self.current_step,
pipeline_id=self.pipeline.id,
key=entry['key'],
),
)
volumes[entry['key']] = volume_path
return volumes
def _get_primitive_temporary_directory(self, primitive_class: typing.Type[base.PrimitiveBase]) -> str:
return tempfile.mkdtemp(dir=self._base_temporary_directory_path)
def _create_primitive_arguments(self, primitive_class: typing.Type[base.PrimitiveBase], hyperparams: hyperparams_module.Hyperparams, random_seed_offset: int) -> typing.Dict:
constructor_arguments = {
'hyperparams': hyperparams,
# We change the random seed in a deterministic way so that it does not matter in which order we run steps.
'random_seed': self.random_seed + self.current_step + random_seed_offset,
'volumes': self._get_primitive_volumes(primitive_class),
'temporary_directory': self._get_primitive_temporary_directory(primitive_class),
}
filtered_arguments = self._filter_arguments(primitive_class, '__init__', constructor_arguments)
return filtered_arguments
def _create_primitive(self, primitive_class: typing.Type[base.PrimitiveBase], hyperparams: hyperparams_module.Hyperparams, random_seed_offset: int) -> base.PrimitiveBase:
"""
Creates an instance of a non-pipeline primitive.
Constructor call is not recorded in pipeline run.
"""
arguments = self._create_primitive_arguments(primitive_class, hyperparams, random_seed_offset)
return primitive_class(**arguments)
def _clone_primitive(self, primitive: base.PrimitiveBase, random_seed_offset: int) -> base.PrimitiveBase:
"""
Clone a primitive. It reuses hyper-parameters and params, but provides a
potentially different random seed and other constructor arguments.
We are creating a new instance and not a deep copy because primitive instance might have
been created outside of the runtime and might not have valid constructor argument values.
"""
# We have to handle all primitive values present in hyper-parameters.
# They are all already an instance, but we have to make their copies.
hyperparams = self._handle_primitive_hyperparams(primitive.hyperparams, random_seed_offset + 1)
primitive_clone = self._create_primitive(type(primitive), hyperparams, random_seed_offset)
primitive_clone.set_params(params=primitive.get_params())
return primitive_clone
def _create_pipeline_primitive(self, primitive_class: typing.Type[base.PrimitiveBase], hyperparams: hyperparams_module.Hyperparams) -> base.PrimitiveBase:
"""
Creates an instance of a pipeline primitive.
Constructor call is recorded in pipeline run.
"""
assert self.pipeline_run is not None
arguments = self._create_primitive_arguments(primitive_class, hyperparams, 0)
if 'random_seed' in arguments:
self.pipeline_run.set_primitive_step_random_seed(self.current_step, arguments['random_seed'])
return self._call_primitive_method(primitive_class, arguments)
def _create_hyperparameter_primitive(self, primitive_class: typing.Type[base.PrimitiveBase], random_seed_offset: int) -> base.PrimitiveBase:
"""
Creates an instance of the non-pipeline primitive with default hyper-parameters.
"""
hyperparams_class = primitive_class.metadata.get_hyperparams()
return self._create_primitive(primitive_class, hyperparams_class.defaults(), random_seed_offset)
def _create_primitive_reference_primitive(self, primitive_reference: int, hyperparameter_name: str) -> base.PrimitiveBase:
"""
Creates an instance of a primitive based on its primitive reference (step index), meaning the instance
of a primitive is almost the same as the pipeline primitive (see "_create_pipeline_primitive") at that
step index, but with a different random seed because of a probably different "current_step".
Constructor call is not recorded in pipeline run.
"""
# It could point to a sub-pipeline and not primitive.
if not isinstance(self.pipeline.steps[primitive_reference], pipeline_module.PrimitiveStep):
raise exceptions.InvalidPipelineError(
"Hyper-parameter '{hyperparameter_name}' of step {step_index} of pipeline '{pipeline_id}' does not point to a primitive step (step {primitive_reference}).".format( # noqa
hyperparameter_name=hyperparameter_name,
step_index=self.current_step,
pipeline_id=self.pipeline.id,
primitive_reference=primitive_reference,
),
)
step = typing.cast(pipeline_module.PrimitiveStep, self.pipeline.steps[primitive_reference])
if step.primitive is None:
raise exceptions.InvalidStateError("Primitive has not been resolved.")
hyperparams, pipeline_hyperparams = self._prepare_primitive_hyperparams(primitive_reference, step)
# We use 0 for "random_seed_offset" because we are creating a primitive instance
# which should be the same as the pipeline primitive (see "_create_pipeline_primitive").
primitive = self._create_primitive(step.primitive, hyperparams, 0)
primitive.set_params(params=self.steps_state[primitive_reference])
return primitive
def _transform_primitive_hyperparameter(self, hyperparameter: hyperparams_module.Hyperparameter, value: typing.Any, index: int) -> typing.Any:
value_is_type = utils.is_type(value)
if value_is_type and issubclass(value, base.PrimitiveBase):
return self._create_hyperparameter_primitive(value, index)
elif not value_is_type and isinstance(value, base.PrimitiveBase):
return self._clone_primitive(value, index)
else:
# Not a primitive instance or a primitive class, do not do anything.
return value
def _handle_primitive_hyperparams(self, hyperparams: base.Hyperparams, random_seed_offset: int) -> base.Hyperparams:
"""
Handles a special case when the value is a primitive instance or a primitive class.
In this case we have to make sure we create a new instance reusing its hyper-parameters,
or create an instance from the class using default hyper-parameters.
"""
return hyperparams.transform_value(hyperparams, self._transform_primitive_hyperparameter, random_seed_offset)
def _run_primitive(self, step: pipeline_module.PrimitiveStep) -> None:
assert self.pipeline_run is not None
if step.primitive is None:
raise exceptions.InvalidPipelineError("Primitive has not been resolved.")
self.pipeline_run.add_primitive_step(step)
arguments = self._prepare_primitive_arguments(step)
hyperparams, pipeline_hyperparams = self._prepare_primitive_hyperparams(self.current_step, step)
if self.phase == metadata_base.PipelineRunPhase.FIT:
self.pipeline_run.set_primitive_step_hyperparams(self.current_step, hyperparams, pipeline_hyperparams)
# We create a primitive just before it is being run. This assures that any primitives it depends on through its
# hyper-parameters have already been run (because they are in prior steps). Similarly, any pipeline-based value
# being passed to a hyper-parameter has already been computed.
primitive = self._create_pipeline_primitive(step.primitive, hyperparams)
# If primitive step has no arguments we do not fit or produce it. It is meant to be used as
# unfitted primitive for another primitive's hyper-parameter.
if not arguments:
return
if self.phase == metadata_base.PipelineRunPhase.FIT:
assert self.steps_state[self.current_step] is None
else:
primitive.set_params(params=self.steps_state[self.current_step])
arguments_set = set(arguments.keys())
# Required arguments are all arguments required by produce methods used in step outputs and "set_training_data".
required_arguments = step._get_required_arguments()
instance_methods = step.primitive.metadata.query()['primitive_code'].get('instance_methods', {})
# This should already be checked by "PrimitiveStep.check_add", but we check it here as well,
# to provide a more user friendly error if somebody is subclassing the runtime breaking this.
missing_arguments = required_arguments - arguments_set
if missing_arguments:
raise exceptions.InvalidArgumentValueError(
"Not all required arguments are provided for the primitive: {missing_arguments}".format(
missing_arguments=missing_arguments,
)
)
# "multi_produce" and "fit_multi_produce" accept all possible arguments to the primitive.
# But if not all produce methods are being called, some of those arguments are not
# necessary and we pass "None" for them. We know that at this point "arguments" include
# arguments for all produce methods used in step outputs and "set_training_data".
# So we can iterate over all produce methods (which might include those not among step outputs)
# and set value for any missing arguments to "None".
for method_name, method_metadata in instance_methods.items():
if method_metadata['kind'] == metadata_base.PrimitiveMethodKind.PRODUCE:
for argument in method_metadata['arguments']:
if argument not in arguments:
arguments[argument] = None
# Get the produce methods from the union of the step outputs and outputs to expose.
produce_methods = list(step.outputs)
step_reference_prefix = 'steps.{i}.'.format(i=step.index)
for output_to_expose in self.outputs_to_expose:
if output_to_expose.startswith(step_reference_prefix):
produce_method = output_to_expose[len(step_reference_prefix):]
# Produce method should not | |
<reponame>TopDeveloper-333/opentoontownsrc<filename>toontown/distributed/ToontownClientRepository.py<gh_stars>0
import types
import time
from pandac.PandaModules import *
from direct.distributed.ClockDelta import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import ivalMgr
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedSmoothNode
from direct.distributed.PyDatagram import PyDatagram
from direct.distributed.PyDatagramIterator import PyDatagramIterator
from direct.task import Task
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.showbase.PythonUtil import Functor, ScratchPad
from direct.showbase.InputStateGlobal import inputState
from otp.avatar import Avatar
from otp.avatar import DistributedAvatar
from otp.friends import FriendManager
from otp.login import TTAccount
from otp.login import AccountServerConstants
from otp.login import LoginScreen
from otp.login import LoginGSAccount
from otp.login import LoginGoAccount
from otp.login import LoginWebPlayTokenAccount
from otp.login import LoginTTAccount
from otp.login import HTTPUtil
from otp.distributed import OTPClientRepository
from otp.distributed import PotentialAvatar
from otp.distributed import PotentialShard
from otp.distributed import DistributedDistrict
from otp.distributed.OtpDoGlobals import *
from otp.distributed import OtpDoGlobals
from otp.otpbase import OTPGlobals
from otp.otpbase import OTPLocalizer
from otp.otpbase import OTPLauncherGlobals
from otp.avatar.Avatar import teleportNotify
from toontown.toonbase.ToonBaseGlobal import *
from toontown.toonbase.ToontownGlobals import *
from toontown.launcher.DownloadForceAcknowledge import *
from toontown.distributed import DelayDelete
from toontown.friends import FriendHandle
from toontown.friends import FriendsListPanel
from toontown.friends import ToontownFriendSecret
from toontown.uberdog import TTSpeedchatRelay
from toontown.login import DateObject
from toontown.login import AccountServerDate
from toontown.login import AvatarChooser
from toontown.makeatoon import MakeAToon
from toontown.pets import DistributedPet, PetDetail, PetHandle
from toontown.toonbase import TTLocalizer
from toontown.toontowngui import TTDialog
from toontown.toon import LocalToon
from toontown.toon import ToonDNA
from toontown.distributed import ToontownDistrictStats
from toontown.makeatoon import TTPickANamePattern
from toontown.parties import ToontownTimeManager
from toontown.toon import Toon, DistributedToon
from ToontownMsgTypes import *
import HoodMgr
import PlayGame
from toontown.toontowngui import ToontownLoadingBlocker
from toontown.hood import StreetSign
class ToontownClientRepository(OTPClientRepository.OTPClientRepository):
SupportTutorial = 1
GameGlobalsId = OTP_DO_ID_TOONTOWN
SetZoneDoneEvent = 'TCRSetZoneDone'
EmuSetZoneDoneEvent = 'TCREmuSetZoneDone'
SetInterest = 'Set'
ClearInterest = 'Clear'
ClearInterestDoneEvent = 'TCRClearInterestDone'
KeepSubShardObjects = False
def __init__(self, serverVersion, launcher = None):
OTPClientRepository.OTPClientRepository.__init__(self, serverVersion, launcher, playGame=PlayGame.PlayGame)
self._playerAvDclass = self.dclassesByName['DistributedToon']
setInterfaceFont(TTLocalizer.InterfaceFont)
setSignFont(TTLocalizer.SignFont)
setFancyFont(TTLocalizer.FancyFont)
nameTagFontIndex = 0
for font in TTLocalizer.NametagFonts:
setNametagFont(nameTagFontIndex, TTLocalizer.NametagFonts[nameTagFontIndex])
nameTagFontIndex += 1
self.toons = {}
if self.http.getVerifySsl() != HTTPClient.VSNoVerify:
self.http.setVerifySsl(HTTPClient.VSNoDateCheck)
#prepareAvatar(self.http)
self.__forbidCheesyEffects = 0
self.friendManager = None
self.speedchatRelay = None
self.trophyManager = None
self.bankManager = None
self.catalogManager = None
self.welcomeValleyManager = None
self.newsManager = None
self.streetSign = None
self.distributedDistrict = None
self.partyManager = None
self.inGameNewsMgr = None
self.whitelistMgr = None
self.toontownTimeManager = ToontownTimeManager.ToontownTimeManager()
self.avatarFriendsManager = self.generateGlobalObject(OtpDoGlobals.OTP_DO_ID_AVATAR_FRIENDS_MANAGER, 'AvatarFriendsManager')
self.playerFriendsManager = self.generateGlobalObject(OtpDoGlobals.OTP_DO_ID_PLAYER_FRIENDS_MANAGER, 'TTPlayerFriendsManager')
self.speedchatRelay = self.generateGlobalObject(OtpDoGlobals.OTP_DO_ID_TOONTOWN_SPEEDCHAT_RELAY, 'TTSpeedchatRelay')
self.deliveryManager = self.generateGlobalObject(OtpDoGlobals.OTP_DO_ID_TOONTOWN_DELIVERY_MANAGER, 'DistributedDeliveryManager')
if config.GetBool('want-code-redemption', 1):
self.codeRedemptionManager = self.generateGlobalObject(OtpDoGlobals.OTP_DO_ID_TOONTOWN_CODE_REDEMPTION_MANAGER, 'TTCodeRedemptionMgr')
self.streetSign = None
self.furnitureManager = None
self.objectManager = None
self.friendsMap = {}
self.friendsOnline = {}
self.friendsMapPending = 0
self.friendsListError = 0
self.friendPendingChatSettings = {}
self.elderFriendsMap = {}
self.__queryAvatarMap = {}
self.dateObject = DateObject.DateObject()
self.accountServerDate = AccountServerDate.AccountServerDate()
self.hoodMgr = HoodMgr.HoodMgr(self)
self.setZonesEmulated = 0
self.old_setzone_interest_handle = None
self.setZoneQueue = Queue()
self.accept(ToontownClientRepository.SetZoneDoneEvent, self._handleEmuSetZoneDone)
self._deletedSubShardDoIds = set()
self.toonNameDict = {}
self.gameFSM.addState(State.State('skipTutorialRequest', self.enterSkipTutorialRequest, self.exitSkipTutorialRequest, ['playGame', 'gameOff', 'tutorialQuestion']))
state = self.gameFSM.getStateNamed('waitOnEnterResponses')
state.addTransition('skipTutorialRequest')
state = self.gameFSM.getStateNamed('playGame')
state.addTransition('skipTutorialRequest')
self.wantCogdominiums = base.config.GetBool('want-cogdominiums', 1)
self.wantEmblems = base.config.GetBool('want-emblems', 0)
if base.config.GetBool('tt-node-check', 0):
for species in ToonDNA.toonSpeciesTypes:
for head in ToonDNA.getHeadList(species):
for torso in ToonDNA.toonTorsoTypes:
for legs in ToonDNA.toonLegTypes:
for gender in ('m', 'f'):
print 'species: %s, head: %s, torso: %s, legs: %s, gender: %s' % (species,
head,
torso,
legs,
gender)
dna = ToonDNA.ToonDNA()
dna.newToon((head,
torso,
legs,
gender))
toon = Toon.Toon()
try:
toon.setDNA(dna)
except Exception, e:
print e
return
def congratulations(self, avatarChoice):
self.acceptedScreen = loader.loadModel('phase_3/models/gui/toon_council')
self.acceptedScreen.setScale(0.667)
self.acceptedScreen.reparentTo(aspect2d)
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
self.acceptedBanner = DirectLabel(parent=self.acceptedScreen, relief=None, text=OTPLocalizer.CRNameCongratulations, text_scale=0.18, text_fg=Vec4(0.6, 0.1, 0.1, 1), text_pos=(0, 0.05), text_font=getMinnieFont())
newName = avatarChoice.approvedName
self.acceptedText = DirectLabel(parent=self.acceptedScreen, relief=None, text=OTPLocalizer.CRNameAccepted % newName, text_scale=0.125, text_fg=Vec4(0, 0, 0, 1), text_pos=(0, -0.15))
self.okButton = DirectButton(parent=self.acceptedScreen, image=(buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr')), relief=None, text='Ok', scale=1.5, text_scale=0.05, text_pos=(0.0, -0.1), pos=(0, 0, -1), command=self.__handleCongrats, extraArgs=[avatarChoice])
buttons.removeNode()
base.transitions.noFade()
return
def __handleCongrats(self, avatarChoice):
self.acceptedBanner.destroy()
self.acceptedText.destroy()
self.okButton.destroy()
self.acceptedScreen.removeNode()
del self.acceptedScreen
del self.okButton
del self.acceptedText
del self.acceptedBanner
if not self.astronSupport:
datagram = PyDatagram()
datagram.addUint16(CLIENT_SET_WISHNAME_CLEAR)
datagram.addUint32(avatarChoice.id)
datagram.addUint8(1)
self.send(datagram)
self.loginFSM.request('waitForSetAvatarResponse', [avatarChoice])
else:
self.astronLoginManager.sendAcknowledgeAvatarName(avatarChoice.id,
lambda: self.loginFSM.request('waitForSetAvatarResponse', [avatarChoice]))
def betterlucknexttime(self, avList, index):
self.rejectDoneEvent = 'rejectDone'
self.rejectDialog = TTDialog.TTGlobalDialog(doneEvent=self.rejectDoneEvent, message=TTLocalizer.NameShopNameRejected, style=TTDialog.Acknowledge)
self.rejectDialog.show()
self.acceptOnce(self.rejectDoneEvent, self.__handleReject, [avList, index])
base.transitions.noFade()
def __handleReject(self, avList, index):
self.rejectDialog.cleanup()
if not self.astronSupport:
datagram = PyDatagram()
datagram.addUint16(CLIENT_SET_WISHNAME_CLEAR)
avid = 0
for k in avList:
if k.position == index:
avid = k.id
if avid == 0:
self.notify.error('Avatar rejected not found in avList. Index is: ' + str(index))
if not self.astronSupport:
datagram.addUint32(avid)
datagram.addUint8(0)
self.send(datagram)
self.loginFSM.request('waitForAvatarList')
else:
self.astronLoginManager.sendAcknowledgeAvatarName(avId, lambda: self.loginFSM.request('waitForAvatarList'))
def enterChooseAvatar(self, avList):
ModelPool.garbageCollect()
TexturePool.garbageCollect()
self.sendSetAvatarIdMsg(0)
self.clearFriendState()
if self.music == None and base.musicManagerIsValid:
self.music = base.musicManager.getSound('phase_3/audio/bgm/tt_theme.mid')
if self.music:
self.music.setLoop(1)
self.music.setVolume(0.9)
self.music.play()
base.playMusic(self.music, looping=1, volume=0.9, interrupt=None)
self.handler = self.handleMessageType
self.avChoiceDoneEvent = 'avatarChooserDone'
self.avChoice = AvatarChooser.AvatarChooser(avList, self.loginFSM, self.avChoiceDoneEvent)
self.avChoice.load(self.isPaid())
self.avChoice.enter()
self.accept(self.avChoiceDoneEvent, self.__handleAvatarChooserDone, [avList])
if config.GetBool('want-gib-loader', 1):
self.loadingBlocker = ToontownLoadingBlocker.ToontownLoadingBlocker(avList)
return
def __handleAvatarChooserDone(self, avList, doneStatus):
done = doneStatus['mode']
if done == 'exit':
if not launcher.isDummy() and launcher.VISTA:
if not self.isPaid():
self.loginFSM.request('shutdown', [OTPLauncherGlobals.ExitUpsell])
else:
self.loginFSM.request('shutdown')
else:
self.loginFSM.request('shutdown')
return
index = self.avChoice.getChoice()
for av in avList:
if av.position == index:
avatarChoice = av
self.notify.info('================')
self.notify.info('Chose avatar id: %s' % av.id)
self.notify.info('Chose avatar name: %s' % av.name)
dna = ToonDNA.ToonDNA()
dna.makeFromNetString(av.dna)
if base.logPrivateInfo:
self.notify.info('Chose avatar dna: %s' % (dna.asTuple(),))
self.notify.info('Chose avatar position: %s' % av.position)
self.notify.info('isPaid: %s' % self.isPaid())
self.notify.info('freeTimeLeft: %s' % self.freeTimeLeft())
self.notify.info('allowSecretChat: %s' % self.allowSecretChat())
self.notify.info('================')
if done == 'chose':
self.avChoice.exit()
if avatarChoice.approvedName != '':
self.congratulations(avatarChoice)
avatarChoice.approvedName = ''
elif avatarChoice.rejectedName != '':
avatarChoice.rejectedName = ''
self.betterlucknexttime(avList, index)
else:
self.loginFSM.request('waitForSetAvatarResponse', [avatarChoice])
elif done == 'nameIt':
self.accept('downloadAck-response', self.__handleDownloadAck, [avList, index])
self.downloadAck = DownloadForceAcknowledge('downloadAck-response')
self.downloadAck.enter(4)
elif done == 'create':
self.loginFSM.request('createAvatar', [avList, index])
elif done == 'delete':
self.loginFSM.request('waitForDeleteAvatarResponse', [avatarChoice])
def __handleDownloadAck(self, avList, index, doneStatus):
if doneStatus['mode'] == 'complete':
self.goToPickAName(avList, index)
else:
self.loginFSM.request('chooseAvatar', [avList])
self.downloadAck.exit()
self.downloadAck = None
self.ignore('downloadAck-response')
return
def exitChooseAvatar(self):
self.handler = None
self.avChoice.exit()
self.avChoice.unload()
self.avChoice = None
self.ignore(self.avChoiceDoneEvent)
return
def goToPickAName(self, avList, index):
self.avChoice.exit()
self.loginFSM.request('createAvatar', [avList, index])
def enterCreateAvatar(self, avList, index, newDNA = None):
if self.music:
self.music.stop()
self.music = None
if newDNA != None:
self.newPotAv = PotentialAvatar.PotentialAvatar('deleteMe', ['',
'',
'',
''], newDNA.makeNetString(), index, 1)
avList.append(self.newPotAv)
base.transitions.noFade()
self.avCreate = MakeAToon.MakeAToon(self.loginFSM, avList, 'makeAToonComplete', index, self.isPaid())
self.avCreate.load()
self.avCreate.enter()
if not self.astronSupport:
self.handler = self.handleCreateAvatar
self.accept('makeAToonComplete', self.__handleMakeAToon, [avList, index])
self.accept('nameShopCreateAvatar', self.sendCreateAvatarMsg)
self.accept('nameShopPost', self.relayMessage)
return
def relayMessage(self, dg):
self.send(dg)
def handleCreateAvatar(self, msgType, di):
if msgType == CLIENT_CREATE_AVATAR_RESP or msgType == CLIENT_SET_NAME_PATTERN_ANSWER or msgType == CLIENT_SET_WISHNAME_RESP:
self.avCreate.ns.nameShopHandler(msgType, di)
else:
self.handleMessageType(msgType, di)
def __handleMakeAToon(self, avList, avPosition):
done = self.avCreate.getDoneStatus()
if done == 'cancel':
if hasattr(self, 'newPotAv'):
if self.newPotAv in avList:
avList.remove(self.newPotAv)
self.avCreate.exit()
self.loginFSM.request('chooseAvatar', [avList])
elif done == 'created':
self.avCreate.exit()
if not base.launcher or base.launcher.getPhaseComplete(3.5):
for i in avList:
if i.position == avPosition:
newPotAv = i
self.loginFSM.request('waitForSetAvatarResponse', [newPotAv])
else:
self.loginFSM.request('chooseAvatar', [avList])
else:
self.notify.error('Invalid doneStatus from MakeAToon: ' + str(done))
def exitCreateAvatar(self):
self.ignore('makeAToonComplete')
self.ignore('nameShopPost')
self.ignore('nameShopCreateAvatar')
self.avCreate.unload()
self.avCreate = None
self.handler = None
if hasattr(self, 'newPotAv'):
del self.newPotAv
return
if not config.GetBool('astron-support', True):
def handleAvatarResponseMsg(self, di):
self.cleanupWaitingForDatabase()
avatarId = di.getUint32()
returnCode = di.getUint8()
if returnCode == 0:
dclass = self.dclassesByName['DistributedToon']
NametagGlobals.setMasterArrowsOn(0)
loader.beginBulkLoad('localAvatarPlayGame', OTPLocalizer.CREnteringToontown, 400, 1, TTLocalizer.TIP_GENERAL)
localAvatar = LocalToon.LocalToon(self)
localAvatar.dclass = dclass
base.localAvatar = localAvatar
__builtins__['localAvatar'] = base.localAvatar
NametagGlobals.setToon(base.localAvatar)
localAvatar.doId = avatarId
self.localAvatarDoId = avatarId
parentId = None
zoneId = None
localAvatar.setLocation(parentId, zoneId)
localAvatar.generateInit()
localAvatar.generate()
localAvatar.updateAllRequiredFields(dclass, di)
self.doId2do[avatarId] = localAvatar
localAvatar.initInterface()
self.sendGetFriendsListRequest()
self.loginFSM.request('playingGame')
else:
self.notify.error('Bad avatar: return code %d' % returnCode)
return
else:
def handleAvatarResponseMsg(self, avatarId, di):
self.cleanupWaitingForDatabase()
dclass = self.dclassesByName['DistributedToon']
NametagGlobals.setMasterArrowsOn(0)
loader.beginBulkLoad('localAvatarPlayGame', OTPLocalizer.CREnteringToontown, 400, 1, TTLocalizer.TIP_GENERAL)
localAvatar = LocalToon.LocalToon(self)
localAvatar.dclass = dclass
base.localAvatar = localAvatar
__builtins__['localAvatar'] = base.localAvatar
NametagGlobals.setToon(base.localAvatar)
localAvatar.doId = avatarId
self.localAvatarDoId = avatarId
parentId = None
zoneId = None
localAvatar.setLocation(parentId, zoneId)
localAvatar.generateInit()
localAvatar.generate()
dclass.receiveUpdateBroadcastRequiredOwner(localAvatar, di)
localAvatar.announceGenerate()
localAvatar.postGenerateMessage()
self.doId2do[avatarId] = localAvatar
localAvatar.initInterface()
self.sendGetFriendsListRequest()
self.loginFSM.request('playingGame')
def getAvatarDetails(self, avatar, func, *args):
pad = ScratchPad()
pad.func = func
pad.args = args
pad.avatar = avatar
pad.delayDelete = DelayDelete.DelayDelete(avatar, 'getAvatarDetails')
avId = avatar.doId
self.__queryAvatarMap[avId] = pad
self.__sendGetAvatarDetails(avId)
def cancelAvatarDetailsRequest(self, avatar):
avId = avatar.doId
if self.__queryAvatarMap.has_key(avId):
pad = self.__queryAvatarMap.pop(avId)
pad.delayDelete.destroy()
def __sendGetAvatarDetails(self, avId):
datagram = PyDatagram()
avatar = self.__queryAvatarMap[avId].avatar
datagram.addUint16(avatar.getRequestID())
datagram.addUint32(avId)
self.send(datagram)
def handleGetAvatarDetailsResp(self, di):
avId = di.getUint32()
returnCode = di.getUint8()
self.notify.info('Got query response for avatar %d, code = %d.' % (avId, returnCode))
try:
pad = self.__queryAvatarMap[avId]
except:
self.notify.warning('Received unexpected or outdated details for avatar %d.' % avId)
return
del self.__queryAvatarMap[avId]
gotData = 0
if returnCode != 0:
self.notify.warning('No information available for avatar %d.' % avId)
else:
dclassName = pad.args[0]
dclass = self.dclassesByName[dclassName]
pad.avatar.updateAllRequiredFields(dclass, di)
gotData = 1
if isinstance(pad.func, types.StringType):
messenger.send(pad.func, list((gotData, pad.avatar) + pad.args))
else:
apply(pad.func, (gotData, pad.avatar) + pad.args)
pad.delayDelete.destroy()
def enterPlayingGame(self, *args, **kArgs):
OTPClientRepository.OTPClientRepository.enterPlayingGame(self, *args, **kArgs)
self.gameFSM.request('waitOnEnterResponses', [None,
base.localAvatar.defaultZone,
base.localAvatar.defaultZone,
-1])
self._userLoggingOut = False
if not self.streetSign:
self.streetSign = StreetSign.StreetSign()
return
def exitPlayingGame(self):
ivalMgr.interrupt()
if self.objectManager != None:
self.objectManager.destroy()
self.objectManager = None
| |
<reponame>saloniagarwal0403/SSCNN
# conda activate SurvivalAnalysis_January2021
# python Siamese_with_conv_parallel_processing_average_pooling_multiple__magnifications.py>Epoch_4_using_average.txt
import sklearn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Activation, Add, BatchNormalization, Concatenate, Conv2D, Dense, Flatten, GlobalMaxPooling2D, Lambda, MaxPooling2D, Reshape, Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
import CreateInputFeatureMaps_average_pooling_multiple_magnifications
from sklearn.model_selection import train_test_split
from tensorflow.python.framework import ops
import datetime, os
import random
import matplotlib
import pickle
import csv
import xlrd
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
import warnings
warnings.filterwarnings('ignore')
matplotlib.use('Agg')
random.seed(0)
from sklearn.decomposition import PCA
import gc
from sklearn.base import clone
from multiprocessing import Pool
def plot_training(history, acc_val_image_filename):
acc = history.history['c_index']
val_acc = history.history['val_c_index']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.figure()
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.plot(epochs, acc, 'b.', label='Training accuracy')
ax1.plot(epochs, val_acc, 'r-', label='Validation accuracy')
ax1.set_title('Training and validation accuracy')
ax2.plot(epochs, loss, 'b.', label='Training loss')
ax2.plot(epochs, val_loss, 'r-', label='Validation loss')
ax2.set_title('Training and validation loss')
plt.legend()
plt.savefig(acc_val_image_filename)
def loss4(y_true, y_pred):
temp = y_true*y_pred
valid_idx = tf.math.greater(0.0,temp)
valid_y_pred = tf.where(valid_idx, y_pred, 0.0)
valid_y_true = tf.where(valid_idx, y_true, 0.0)
loss1 = tf.keras.losses.MSE(valid_y_true, valid_y_pred)
y_pred2 = tf.where(valid_idx, 0.0, y_pred)
y_true2 = tf.where(valid_idx, 0.0, y_true)
valid_idx2 = tf.math.greater(tf.math.abs(y_true2),tf.math.abs(y_pred2))
valid_y_pred2 = tf.where(valid_idx2, tf.math.abs(y_true2), 0.0)
valid_y_true2 = tf.where(valid_idx2, tf.math.abs(y_pred2), 0.0)
loss2 = tf.keras.losses.MSE(valid_y_true2, valid_y_pred2)
# valid_idx3 = tf.math.greater(365.0,tf.math.abs(y_pred))
# valid_loss3 = tf.where(valid_idx3, 1/(tf.math.abs(y_pred)+0.00000001), 0.0)
# loss3 = tf.math.reduce_sum(valid_loss3)
# return loss1+loss2+loss3
return loss1+loss2
def loss5(y_true, y_pred):
loss1 = tf.keras.losses.MSE(y_true, y_pred)
valid_idx3 = tf.math.greater(365.0,tf.math.abs(y_pred))
valid_loss3 = tf.where(valid_idx3, 1/(tf.math.abs(y_pred)+0.00000001), 0.0)
loss3 = tf.math.reduce_sum(valid_loss3)
return loss1+loss3
def loss6(y_true, y_pred):
temp = y_true*y_pred
valid_idx = tf.math.greater(0.0,temp)
valid_y_pred = tf.where(valid_idx, y_pred, 0.0)
valid_y_true = tf.where(valid_idx, y_true, 0.0)
loss1 = tf.keras.losses.MSE(valid_y_true, valid_y_pred)
y_pred2 = tf.where(valid_idx, 0.0, y_pred)
y_true2 = tf.where(valid_idx, 0.0, y_true)
valid_idx2 = tf.math.greater(tf.math.abs(y_true2),tf.math.abs(y_pred2))
valid_y_pred2 = tf.where(valid_idx2, tf.math.abs(y_true2), 0.0)
valid_y_true2 = tf.where(valid_idx2, tf.math.abs(y_pred2), 0.0)
loss2 = tf.keras.losses.MSE(valid_y_true2, valid_y_pred2)
# valid_idx3 = tf.math.greater(365.0,tf.math.abs(y_pred))
# valid_loss3 = tf.where(valid_idx3, 1/(tf.math.abs(y_pred)+0.00000001), 0.0)
loss3 = tf.math.reduce_sum(tf.math.log(1/(tf.math.abs(y_pred)+0.00000001)))
return loss1+loss2+loss3
def get_X_Y_columns(this_df):
Y = this_df[['Time','Occurence']]
this_df = this_df.drop(columns= ['Time','Occurence'])
return this_df, Y
def get_features(X):
train_features = []
X_filenames = X['filenames'].iloc[:]
shuf = np.arange(0,len(X_filenames))
random.shuffle(shuf)
for i in shuf[0:100]:
filepaths_i = X_filenames.iloc[i]
for filepath_i in filepaths_i:
# print("Working with file: ",i," with path ",filepath_i)
train_features.extend(CreateInputFeatureMaps_average_pooling_multiple_magnifications.get_model_predictions(filepath_i))
return train_features
def pca_features_extraction(X, pca, n_pca_f, tensors_size, saving_folder):
X_filenames = X['filenames'].iloc[:]
X = X.drop(columns= ["filenames"])
arguments_for_pooling = []
count = 0
#For storing the PCA generated maps uncoment the following for loop and comment rest of the code in this fn
for i in range(len(X_filenames)):
###In parallel store all the filesFeatureMaps
filepaths_i = X_filenames.iloc[i]
for filepath_i in filepaths_i:
CreateInputFeatureMaps_average_pooling_multiple_magnifications.create_tensors(filepath_i, pca, n_pca_f, tensors_size, saving_folder)
def permissible_pairs(X, Y, DAYS_DIFF, tensors_size,saving_folder, count_i):
permissible_pairs_set1 = []
permissible_pairs_set2 = []
image_features_set1 = []
image_features_set2 = []
y_true = []
X_filenames = X['filenames'].iloc[:]
X = X.drop(columns= ["filenames"])
arguments_for_pooling = []
count = 0
i_j_pairs = []
if count_i==-1:
for i in range(len(X)):
for j in range(i+1, len(X)):
if Y["Occurence"].iloc[i]==True and (Y["Time"].iloc[i]<(Y["Time"].iloc[j]+DAYS_DIFF)):
filepaths_i = X_filenames.iloc[i]
filepaths_j = X_filenames.iloc[j]
for k in filepaths_i:
for l in filepaths_j:
# print("Working on file pair: ",filepath_i," and ",filepath_j)
img_a = tf.cast(X.iloc[i,:],tf.float32) ## retrieveing all the columns except last as it is for filename
img_b = tf.cast(X.iloc[j,:],tf.float32)
original_image_features_pickle_file_name = os.path.split(k)[-1]
with open(os.path.join(saving_folder,original_image_features_pickle_file_name), 'rb') as handle:
image_features_i = pickle.load(handle)
image_features_set1.append(image_features_i)
original_image_features_pickle_file_name = os.path.split(l)[-1]
with open(os.path.join(saving_folder,original_image_features_pickle_file_name), 'rb') as handle:
image_features_j = pickle.load(handle)
image_features_set2.append(image_features_j)
permissible_pairs_set1.append(img_a)
permissible_pairs_set2.append(img_b)
y_true.append(Y["Time"].iloc[i]-Y["Time"].iloc[j])
i_j_pairs.append([i,j])
count+=1
# print(count)
if Y["Occurence"].iloc[j]==True and ((Y["Time"].iloc[i]+DAYS_DIFF)>Y["Time"].iloc[j]):
img_a = tf.cast(X.iloc[i,:],tf.float32)
img_b = tf.cast(X.iloc[j,:],tf.float32)
original_image_features_pickle_file_name = os.path.split(k)[-1]
with open(os.path.join(saving_folder,original_image_features_pickle_file_name), 'rb') as handle:
image_features_i = pickle.load(handle)
image_features_set1.append(image_features_i)
original_image_features_pickle_file_name = os.path.split(l)[-1]
with open(os.path.join(saving_folder,original_image_features_pickle_file_name), 'rb') as handle:
image_features_j = pickle.load(handle)
image_features_set2.append(image_features_j)
permissible_pairs_set1.append(img_a)
permissible_pairs_set2.append(img_b)
y_true.append(Y["Time"].iloc[i]-Y["Time"].iloc[j])
i_j_pairs.append([i,j])
count+=1
# print(count)
# if count==1000:
# return image_features_set1, image_features_set2, permissible_pairs_set1 , permissible_pairs_set2 , y_true
return image_features_set1, image_features_set2, permissible_pairs_set1 , permissible_pairs_set2 , y_true, i_j_pairs
else:
valid_pairs = []
for i in range(len(X)):
for j in range(i+1, len(X)):
if Y["Occurence"].iloc[i]==True and (Y["Time"].iloc[i]<(Y["Time"].iloc[j]+DAYS_DIFF)):
filepaths_i = X_filenames.iloc[i]
filepaths_j = X_filenames.iloc[j]
for k in filepaths_i:
for l in filepaths_j:
# print("Working on file pair: ",filepath_i," and ",filepath_j)
img_a = tf.cast(X.iloc[i,:],tf.float32) ## retrieveing all the columns except last as it is for filename
img_b = tf.cast(X.iloc[j,:],tf.float32)
original_image_features_pickle_file_name_k = os.path.split(k)[-1]
# with open(os.path.join(saving_folder,original_image_features_pickle_file_name), 'rb') as handle:
# image_features_i = pickle.load(handle)
# image_features_set1.append(image_features_i)
original_image_features_pickle_file_name_l = os.path.split(l)[-1]
# with open(os.path.join(saving_folder,original_image_features_pickle_file_name), 'rb') as handle:
# image_features_j = pickle.load(handle)
# image_features_set2.append(image_features_j)
valid_pairs.append([original_image_features_pickle_file_name_k,original_image_features_pickle_file_name_l])
permissible_pairs_set1.append(img_a)
permissible_pairs_set2.append(img_b)
y_true.append(Y["Time"].iloc[i]-Y["Time"].iloc[j])
i_j_pairs.append([i,j])
# print(count)
if Y["Occurence"].iloc[j]==True and ((Y["Time"].iloc[i]+DAYS_DIFF)>Y["Time"].iloc[j]):
img_a = tf.cast(X.iloc[i,:],tf.float32)
img_b = tf.cast(X.iloc[j,:],tf.float32)
original_image_features_pickle_file_name_k = os.path.split(k)[-1]
# with open(os.path.join(saving_folder,original_image_features_pickle_file_name), 'rb') as handle:
# image_features_i = pickle.load(handle)
# image_features_set1.append(image_features_i)
original_image_features_pickle_file_name_l = os.path.split(l)[-1]
# with open(os.path.join(saving_folder,original_image_features_pickle_file_name), 'rb') as handle:
# image_features_j = pickle.load(handle)
# image_features_set2.append(image_features_j)
valid_pairs.append([original_image_features_pickle_file_name_k,original_image_features_pickle_file_name_l])
permissible_pairs_set1.append(img_a)
permissible_pairs_set2.append(img_b)
y_true.append(Y["Time"].iloc[i]-Y["Time"].iloc[j])
i_j_pairs.append([i,j])
shuf = np.arange(0,len(valid_pairs))
random.shuffle(shuf)
image_features_set1_valid = []
image_features_set2_valid = []
permissible_pairs_set1_valid = []
permissible_pairs_set2_valid= []
y_true_valid = []
i_j_pairs_valid = []
for i in shuf[0:count_i]:
permissible_pairs_set1_valid.append(permissible_pairs_set1[i])
permissible_pairs_set2_valid.append(permissible_pairs_set2[i])
y_true_valid.append(y_true[i])
i_j_pairs_valid.append(i_j_pairs[i])
with open(os.path.join(saving_folder,valid_pairs[i][0]), 'rb') as handle:
image_features_i = pickle.load(handle)
image_features_set1_valid.append(image_features_i)
with open(os.path.join(saving_folder,valid_pairs[i][1]), 'rb') as handle:
image_features_j = pickle.load(handle)
image_features_set2_valid.append(image_features_j)
return image_features_set1_valid, image_features_set2_valid, permissible_pairs_set1_valid , permissible_pairs_set2_valid , y_true_valid, i_j_pairs_valid
def model_def(number_channel,no_clinical_features,first_conv_layer_number_filers,second_conv_layer_number_filers,first_layer_neurons,second_layer_neurons):
image_input = Input(shape=(None,None,number_channel))
conv1 = tf.keras.layers.Conv2D(first_conv_layer_number_filers, (3,3), activation='relu')(image_input)
max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),strides=(1, 1))(conv1)
conv2 = tf.keras.layers.Conv2D(second_conv_layer_number_filers, (3,3), activation='relu')(max_pool_2d)
pool = tf.keras.layers.GlobalMaxPool2D()(conv2)
# tf.keras.layers.Conv1D(first_conv_layer_number_filers,(1), activation='relu'),
# tf.keras.layers.Flatten(),
clinical_input = Input(shape=(no_clinical_features))
concatenate_layer = tf.keras.layers.concatenate([pool,clinical_input])
first_dense = tf.keras.layers.Dense(first_layer_neurons, activation='relu')(concatenate_layer)
dp1 = tf.keras.layers.Dropout(0.5)(first_dense)
second_dense = tf.keras.layers.Dense(second_layer_neurons, activation='relu')(dp1)
dp2 = tf.keras.layers.Dropout(0.5)(second_dense)
output = tf.keras.layers.Dense(1)(dp2)
return Model(inputs=[image_input,clinical_input], outputs=output)
def build_model(number_channel,no_clinical_features,first_conv_layer_number_filers,second_conv_layer_number_filers,first_layer_neurons,second_layer_neurons):
model = model_def(number_channel,no_clinical_features,first_conv_layer_number_filers,second_conv_layer_number_filers,first_layer_neurons,second_layer_neurons)
print(model.summary())
########################
# SIAMESE NEURAL NETWORK
########################
# Complete model is constructed by calling the branch model on each input image,
# and then the head model on the resulting 512-vectors.
img_a = Input(shape=(None,None,number_channel))
img_b = Input(shape=(None,None,number_channel))
clinical_a = Input(shape=(no_clinical_features))
clinical_b = Input(shape=(no_clinical_features))
xa = model([img_a,clinical_a])
xb = model([img_b,clinical_b])
# x = Lambda(lambda x: tf.cast(K.exp(-x[1]) - K.exp(-x[0]), tf.float32))([xa, xb])
# x = Lambda(lambda x:x[1] - x[0])([xa, xb])
subtracted = tf.keras.layers.Subtract()([xa, xb])
# probability_output = tf.keras.activations.sigmoid(subtracted)
# x = Lambda(lambda x:tf.concat(x,1))([xa, xb])
# x = tf.cast(xb-xa, tf.float32)
model_f = Model(inputs=[img_a,img_b,clinical_a,clinical_b], outputs=[subtracted])
return model_f
def c_index_prediction(y_true, y_pred):
correct=0
for i in range(len(y_true)):
if (y_true[i]*y_pred[i])>0:
correct+=1
total = len(y_pred)
return float(correct)/float(total)
def c_index(y_true, y_pred):
temp = y_true*y_pred
valid_idx = tf.math.greater(temp,0.0)
correct_tensor = tf.where(valid_idx, 1.0, 0.0)
return tf.reduce_mean(correct_tensor)
def case_wise_soft_voting(predictions, true_values, i_j_pairs):
i_j_pairs_dict = {}
for i in range(len(i_j_pairs)):
this_pair = (i_j_pairs[i][0],i_j_pairs[i][1])
if this_pair in i_j_pairs_dict:
prediction,true_value = i_j_pairs_dict[this_pair]
prediction = prediction+predictions[i]
i_j_pairs_dict[this_pair] = [prediction,true_value]
else:
i_j_pairs_dict[this_pair] = [predictions[i],true_values[i]]
y_true = []
y_pred = []
for k,v in i_j_pairs_dict.items():
y_pred.append(v[0])
y_true.append(v[1])
return c_index_prediction(y_true,y_pred)
def case_wise_voting(predictions, true_values, i_j_pairs):
i_j_pairs_dict = {}
for i in range(len(i_j_pairs)):
this_pair = (i_j_pairs[i][0],i_j_pairs[i][1])
if this_pair in i_j_pairs_dict:
votes_1,votes_neg_1,true_value = i_j_pairs_dict[this_pair]
if predictions[i]>0:
votes_1+=1
else:
votes_neg_1+=1
i_j_pairs_dict[this_pair] = [votes_1,votes_neg_1,true_value]
else:
if predictions[i]>0:
votes_1=1
votes_neg_1=0
else:
votes_neg_1=1
votes_1=0
i_j_pairs_dict[this_pair] = [votes_1,votes_neg_1,true_values[i]]
y_true = []
y_pred = []
for k,v in i_j_pairs_dict.items():
if v[0]>v[1]:
y_pred.append(1)
else:
y_pred.append(-1)
y_true.append(v[2])
return c_index_prediction(y_true,y_pred)
# TODO: Check how to inset the image feature maps in dataframe
os.environ["CUDA_VISIBLE_DEVICES"]='3'
print("Screen Name: Epoch_0_using_average")
EPOCHS = 10000
PATIENCE = 150
BATCH_SIZE = 32
DAYS_DIFF = 365
# # PLOTS_FOLDER = "Siamese_with_conv_PCA_loss4_2_b"
# # os.mkdir(PLOTS_FOLDER)
output_files_folder = os.path.join(r"/home","sxa171531","images","TCGA-GBM","output_files")
# # f = open(MODEL_NAME+'.pickle',"x")
# # f.close()
# print("Loss: loss4")
# print("EPOCHS: ",EPOCHS)
# print("PATIENCE: ",PATIENCE)
# print("BATCH_SIZE: ",BATCH_SIZE)
# print("Permissible pairs difference: ",DAYS_DIFF)
# # print("Plots are stored in: ",PLOTS_FOLDER)
# print("No Last layer neuron activation")
# print("0.2 Dropouts between the FCLs")
# print("Saving weights in files: ",output_files_folder)
# print("Tile features: average pooling")
# print("Tiles at 20x, 5x, 1.25x, total 6144 features")
# print("WSI features: Feature map generation")
# df = pd.read_excel(os.path.join(r"/home","sxa171531","images","TCGA-GBM","clinical.xlsx"))
#
# df['filenames']=None
# image_features_dir = os.path.join(r"/home","sxa171531","images","TCGA-GBM","original_image_features")
# ids_in_dataframe = []
# for index, row in df.iterrows():
# if row['case_submitter_id'] in ids_in_dataframe:
# df.drop(index, inplace=True)
# else:
# ids_in_dataframe.append(row['case_submitter_id'])
#
# image_features_path_dic={}
# for filename in os.listdir(image_features_dir):
# case_submitter_id = "-".join(filename.split("-")[0:3])
# if case_submitter_id in image_features_path_dic:
# temp = image_features_path_dic[case_submitter_id]
# temp.append(os.path.join(image_features_dir,filename))
# image_features_path_dic[case_submitter_id]=temp
# else:
# image_features_path_dic[case_submitter_id]=[os.path.join(image_features_dir,filename)]
# # print(len(list(image_features_path_dic.keys())))
#
# for index, row in df.iterrows():
# if row['case_submitter_id'] in list(image_features_path_dic.keys()):
# df.at[index,'filenames'] = image_features_path_dic[row['case_submitter_id']]
#
# columns_of_interest = ['age_at_diagnosis','days_to_death','ethnicity','gender','race','days_to_last_follow_up','filenames']
# df = df[columns_of_interest]
#
# df['ethnicity'] = pd.Categorical(df['ethnicity'])
# df['ethnicity'] = df.ethnicity.cat.codes
# y = pd.get_dummies(df.ethnicity,prefix="ethnicity")
# y = y.drop(y.columns[-1],axis=1)
# df = df.drop(columns= ["ethnicity"])
# df = pd.concat([df, y], axis=1)
#
# df['gender'] = pd.Categorical(df['gender'])
# df['gender'] = df.gender.cat.codes
# y = pd.get_dummies(df.gender,prefix="gender")
# y = y.drop(y.columns[-1],axis=1)
# df = df.drop(columns= ["gender"])
# df = pd.concat([df, y], axis=1)
#
# df['race'] = pd.Categorical(df['race'])
# df['race'] = df.race.cat.codes
# y = pd.get_dummies(df.race,prefix="race")
# y = y.drop(y.columns[-1],axis=1)
# df = df.drop(columns= ["race"])
# df = pd.concat([df, y], axis=1)
#
# df_valid = df['filenames'].notnull()
# df = df[df_valid]
#
# df['Time'] = df['days_to_death'].replace("'--", np.nan, regex=True)
# df['Occurence'] = df['Time'].notnull()
# df['Time'][df['Time'].isnull()] = df['days_to_last_follow_up']
# df['Time'] = df['Time'].astype(np.int64)
# df = df.drop(columns=['days_to_death','days_to_last_follow_up'])
# np.random.seed(0)
#
# df_dev, df_test = train_test_split(df, test_size = | |
# coding: utf-8
import os
import sys
import pymongo as mongo
import json
import pandas as pd
import numpy as np
from datetime import date, datetime
import time
from time import strftime, localtime
from QUANTAXIS.QAFetch import QATdx as tdx
from easyquant.easydealutils.easytime import EasyTime
import re
class MongoIo(object):
"""Redis操作类"""
def __init__(self, host='mgdb', port=27017, database='quantaxis'):
# self.config = self.file2dict(conf)
client = mongo.MongoClient(host, port)
self.db = client[database]
self.st_start = '2018-01-01'
# self.st_end = '2030-12-31'
self.st_start_1min = '2020-01-01'
self.st_start_5min = '2020-01-01'
self.st_start_15min = '2020-01-01'
self.st_start_30min = '2020-01-01'
self.st_start_60min = '2020-01-01'
# self.st_end_day = '2030-12-31'
# if self.config['passwd'] is None:
# self.r = redis.Redis(host=self.config['redisip'], port=self.config['redisport'], db=self.config['db'])
# else:
# self.r = redis.Redis(host=self.config['redisip'], port=self.config['redisport'], db=self.config['db'], password = self.config['passwd'])
def dateStr2stamp(self, dateObj):
dateStr = str(dateObj)[0:10]
date = time.mktime(time.strptime(dateStr, '%Y-%m-%d'))
return date
def datetimeStr2stamp(self, dateObj):
dataTimeStr = str(dateObj)[0:19]
date = time.mktime(time.strptime(dataTimeStr, '%Y-%m-%d %H:%M:%S'))
return date
def _get_data_day(self, code, table, st_start, st_end):
cursor = self.db[table].find(
{
'code': {
'$in': code
},
"date_stamp":
{
"$lte": self.dateStr2stamp(st_end),
"$gte": self.dateStr2stamp(st_start)
}
},
{"_id": 0},
batch_size=10000
)
res = pd.DataFrame([item for item in cursor])
try:
res = res.assign(
volume=res.vol,
date=pd.to_datetime(res.date)
).drop_duplicates((['date',
'code'])).query('volume>1').set_index(
'date',
drop=False
)
res = res.loc[:,
[
'code',
'open',
'high',
'low',
'close',
'volume',
'amount',
'date'
]]
except:
res = None
# if format in ['P', 'p', 'pandas', 'pd']:
return res
# elif format in ['json', 'dict']:
# return QA_util_to_json_from_pandas(res)
# # 多种数据格式
# elif format in ['n', 'N', 'numpy']:
# return numpy.asarray(res)
# elif format in ['list', 'l', 'L']:
# return numpy.asarray(res).tolist()
# else:
# print(
# "QA Error QA_fetch_stock_day format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" "
# % format
# )
# return None
def _get_data_min(self, code, table, st_start, st_end, type):
cursor = self.db[table].find(
{
'code': {
'$in': code
}
, "time_stamp":
{
"$lte": self.dateStr2stamp(st_end),
"$gte": self.dateStr2stamp(st_start)
}
, 'type': type
},
{"_id": 0},
batch_size=10000
)
res = pd.DataFrame([item for item in cursor])
try:
res = res.assign(
volume=res.vol,
date=pd.to_datetime(res.date)
).drop_duplicates((['datetime',
'code'])).query('volume>1').set_index(
'datetime',
drop=False
)
res = res.loc[:,
[
'code',
'open',
'high',
'low',
'close',
'volume',
'amount',
'datetime'
]]
except:
res = None
# if format in ['P', 'p', 'pandas', 'pd']:
return res
# elif format in ['json', 'dict']:
# return QA_util_to_json_from_pandas(res)
# # 多种数据格式
# elif format in ['n', 'N', 'numpy']:
# return numpy.asarray(res)
# elif format in ['list', 'l', 'L']:
# return numpy.asarray(res).tolist()
# else:
# print(
# "QA Error QA_fetch_stock_day format parameter %s is none of \"P, p, pandas, pd , json, dict , n, N, numpy, list, l, L, !\" "
# % format
# )
# return None
def _get_data2(self, code, table, st_start, st_end, type='D'):
if st_end is None:
# st_end = "2030-12-31"
st_end = "2030-12-31 23:59:59"
# st_start = self.dateStr2stamp(st_start)
if type == 'D':
if isinstance(code, list):
dtd = self.db[table].find({
'code':
{'$in': code}
, 'date_stamp':
{'$gte': self.dateStr2stamp(st_start), "$lte": self.dateStr2stamp(st_end)}
}
, {"_id": 0})
else:
dtd = self.db[table].find({'code': code,
'date_stamp':
{'$gte': self.dateStr2stamp(st_start),
"$lte": self.dateStr2stamp(st_end)}
}
, {"_id": 0})
else:
if isinstance(code, list):
dtd = self.db[table].find({'code': {'$in': code}, 'date': {'$gte': self.dateStr2stamp(st_start),
"$lte": self.dateStr2stamp(st_end)},
'type': type}, {"_id": 0})
else:
dtd = self.db[table].find(
{'code': code, 'date': {'$gte': self.dateStr2stamp(st_start), "$lte": self.dateStr2stamp(st_end)},
'type': type}, {"_id": 0})
ptd = pd.DataFrame(list(dtd))
if len(ptd) > 0:
# del ptd['_id']
del ptd['date_stamp']
if type == 'D':
ptd.date = pd.to_datetime(ptd.date)
ptd = ptd.set_index(["date", "code"])
else:
ptd.date = pd.to_datetime(ptd.date)
ptd.datetime = pd.to_datetime(ptd.datetime)
ptd = ptd.set_index(["datetime", "code"])
# ptd.rename(columns={"vol":"volume"}, inplace=True)
return ptd
def _get_data(self, code, table, st_start, st_end, type='D'):
if st_end is None:
# st_end = "2030-12-31"
st_end = "2030-12-31 23:59:59"
# st_start = self.dateStr2stamp(st_start)
if type == 'D':
data = self._get_data_day(code, table, st_start, st_end)
else:
data = self._get_data_min(code, table, st_start, st_end, type)
# if isinstance(code, list):
# dtd=self.db[table].find({'code':{'$in':code},'date':{'$gte':self.dateStr2stamp(st_start), "$lte":self.dateStr2stamp(st_end)}, 'type':type},{"_id": 0})
# else:
# dtd=self.db[table].find({'code':code,'date':{'$gte':self.dateStr2stamp(st_start), "$lte":self.dateStr2stamp(st_end)}, 'type':type},{"_id": 0})
# ptd=pd.DataFrame(list(dtd))
if data is None:
return pd.DataFrame()
if len(data) > 0:
# del ptd['_id']
# del ptd['date_stamp']
if type == 'D':
data.date = pd.to_datetime(data.date)
data = data.set_index(["date","code"])
else:
# data.date = pd.to_datetime(data.date)
data.datetime= pd.to_datetime(data.datetime)
data = data.set_index(["datetime","code"])
# ptd.rename(columns={"vol":"volume"}, inplace=True)
return data
def get_stock_day(self, code, st_start=None, st_end=None):
if st_start is None:
st_start = self.st_start
if isinstance(code, str):
code = [code]
return self._get_data(code, 'stock_day', st_start, st_end)
def get_stock_min(self, code, st_start=None, st_end=None, freq=5):
if st_start is None:
st_start = self.st_start_15min
if isinstance(code, str):
code = [code]
return self._get_data(code, 'stock_min', st_start, st_end, "%dmin"%freq)
def get_stock_min_realtime(self, code, st_start=None, st_end=None, freq=5):
if st_start is None:
st_start = self.st_start_5min
if st_end is None:
st_end = "2030-12-31 23:59:59"
data_min = self.get_stock_min(code=code, freq=freq)
if len(data_min) > 0:
if freq < (time.time() - data_min.index[-1][0].timestamp()) / 60:
start = data_min.index[-1][0].strftime('%Y-%m-%d %H:%M:01') ## %S=>01
add_df = tdx.QA_fetch_get_stock_min(code, start=start, end=st_end, frequence='%dmin' % freq)
if len(add_df) > 0:
add_df.drop(['date_stamp', 'datetime'], axis=1, inplace=True)
data_min = data_min.append(add_df, sort=True)
## save to db
else:
data_min = tdx.QA_fetch_get_stock_min(code, start=st_start, end=st_end, frequence='%dmin' % freq)
if len(data_min) > 0:
data_min.drop(['date_stamp', 'datetime'], axis=1, inplace=True)
return data_min
def get_index_day(self, code, st_start=None, st_end=None):
if st_start is None:
st_start = self.st_start
if isinstance(code, str):
code = [code]
return self._get_data(code, 'index_day', st_start, st_end)
def get_index_min(self, code, st_start=None, st_end=None, freq=5):
if st_start is None:
st_start = self.st_start_15min
if isinstance(code, str):
code = [code]
return self._get_data(code, 'index_min', st_start, st_end, "%dmin"%freq)
def get_index_min_realtime(self, code, st_start=None, st_end=None, freq=5):
if st_start is None:
st_start = self.st_start_5min
if st_end is None:
st_end = "2030-12-31 23:59:59"
data_min = self.get_index_min(code=code, freq=freq)
if len(data_min) > 0:
if freq < (time.time() - data_min.index[-1][0].timestamp()) / 60:
start=data_min.index[-1][0].strftime('%Y-%m-%d %H:%M:01') ## %S=>01
add_df=tdx.QA_fetch_get_index_min(code,start=start,end=st_end, frequence='%dmin' % freq)
if len(add_df) > 0:
add_df.drop(['date_stamp','datetime'],axis=1,inplace=True)
data_min=data_min.append(add_df, sort=True)
else:
data_min=tdx.QA_fetch_get_index_min(code,start=st_start,end=st_end, frequence='%dmin' % freq)
if len(data_min) > 0:
data_min.drop(['date_stamp','datetime'],axis=1,inplace=True)
return data_min
def file2dict(self, path):
#读取配置文件
with open(path) as f:
return json.load(f)
def save(self, table, data):
self.db[table].insert_many(
[data]
)
def save_data_min(self, data, idx=0):
if idx == 0:
pass
else:
pass
def save_realtime(self, data):
table = 'realtime_{}'.format(date.today())
# self.db[table].insert_many(
# [data]
# )
self.db[table].replace_one({'_id':data['_id']}, data, True)
def upd_data_min(self, df_data_min, json_data, minute):
# index_time =pd.to_datetime(easytime.get_minute_date(minute=5))
et = EasyTime()
index_time = pd.to_datetime(et.get_minute_date_str(minute=minute, str_date=json_data['datetime']))
begin_time = pd.to_datetime(et.get_begin_trade_date(minute=minute, str_date=json_data['datetime']))
if len(df_data_min) > 0:
sum_df=df_data_min.loc[df_data_min.index > begin_time]
old_vol = sum_df['vol'].sum()
old_amount = sum_df['amount'].sum()
now_price = json_data['now']
if index_time in df_data_min.index:
if now_price > df_data_min.loc[index_time, 'high']:
df_data_min.loc[index_time, 'high'] = now_price
if now_price < df_data_min.loc[index_time, 'low']:
df_data_min.loc[index_time, 'low'] = now_price
df_data_min.loc[index_time, 'close'] = now_price
df_data_min.loc[index_time, 'vol'] = json_data['volume'] - old_vol
df_data_min.loc[index_time, 'amount'] = json_data['amount'] - old_amount
else:
# if self.code == '600822':
# print("2 code=%s, data=%d" % (self.code, len(df_data_min)))
df_data_min.loc[index_time] = [0 for x in range(len(df_data_min.columns))]
df_data_min.loc[index_time, 'code'] = json_data['code']
df_data_min.loc[index_time, 'open'] = now_price
df_data_min.loc[index_time, 'high'] = now_price
df_data_min.loc[index_time, 'low'] = now_price
df_data_min.loc[index_time, 'close'] = now_price
df_data_min.loc[index_time, 'vol'] = json_data['volume'] - old_vol
df_data_min.loc[index_time, 'amount'] = json_data['amount'] - old_amount
else: ##first day ???
pass
return df_data_min
def get_positions(self, idx=0):
table = 'positions'
# self.db[table].insert_many(
# [data]
# )
dtd=self.db[table].find({'amount':{'$gte':0},"idx":idx,'status':'1'})
ptd=pd.DataFrame(list(dtd))
if len(ptd) > 0:
del ptd['_id']
ptd = ptd.set_index(["code"])
return ptd
def get_stock_info(self, code = None):
table = 'stock_info'
if code == None:
dtd = self.db[table].find()
elif isinstance(code, list):
# dtd = None
dtd = self.db[table].find({'code': {'$in':code}})
else:
dtd = self.db[table].find({'code': code})
return pd.DataFrame(list(dtd))
def get_stock_list(self, code=None, notST = True, market = None):
table = 'stock_list'
query = {}
if market != None:
query['sse'] = '%s' % market
if notST:
# data=m.db['stock_list'].find({"code":{"$in":codelist}, "name":{'$not': re.compile(r"ST")}})
query['name'] = {'$not': re.compile(r"ST")}
if code == None:
# print("get-stock-list", query)
dtd = self.db[table].find(query)
elif isinstance(code, list):
query['code'] = {'$in': code}
# dtd = self.db[table].find({'code': {'$in':code}})
dtd = self.db[table].find(query)
else:
dtd = self.db[table].find({'code': code})
pdf = pd.DataFrame(list(dtd))
pdf = pdf.set_index('code')
if len(pdf) == 0:
return pdf
del pdf['_id']
del pdf['volunit']
del pdf['sec']
del pdf['sse']
del pdf['decimal_point']
return pdf
def get_realtime(self, code = None, dateStr = None, time='09:30:00', beg_time = None):
if dateStr == None:
# dateStr = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())
dateStr = strftime('%Y-%m-%d',localtime())
table = 'realtime_%s' % dateStr
if code == None:
if beg_time == None:
dtd = self.db[table].find({'time':{'$lt':time}})
else:
dtd = self.db[table].find({'time':{'$lt':time, '$gt':beg_time}})
elif isinstance(code, list):
dtd = self.db[table].find({'code':{'$in':code}, 'time':{'$lt':time}})
else:
dtd = self.db[table].find({'code':code, 'time':{'$lt':time}})
df = pd.DataFrame(list(dtd))
if | |
<reponame>sjalloq/uvm-python<filename>src/uvm/base/uvm_objection.py
#
#//
#//----------------------------------------------------------------------
#// Copyright 2007-2011 Mentor Graphics Corporation
#// Copyright 2007-2011 Cadence Design Systems, Inc.
#// Copyright 2010-2011 Synopsys, Inc.
#// Copyright 2013 NVIDIA Corporation
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//----------------------------------------------------------------------
import cocotb
from cocotb.triggers import Event, Timer
from .uvm_report_object import UVMReportObject
from .uvm_debug import uvm_debug
from .uvm_globals import *
from .uvm_object_globals import (UVM_RAISED, UVM_DROPPED, UVM_ALL_DROPPED)
from .sv import sv
from ..macros import uvm_error
from typing import List, Optional, Dict, Any
UVM_USE_PROCESS_CONTAINER = 1
def classmethod_named(func):
new_func = classmethod(func)
setattr(new_func, '__name__', func.__name__)
return new_func
class UVMObjectionEvents():
def __init__(self):
self.waiters = 0
self.raised = Event('raised')
self.dropped = Event('dropped')
self.all_dropped = Event('all_dropped')
ObjContextDict = Dict[Any, 'UVMObjectionContextObject']
ObjContextList = List['UVMObjectionContextObject']
#//------------------------------------------------------------------------------
#// Title: Objection Mechanism
#//------------------------------------------------------------------------------
#// The following classes define the objection mechanism and end-of-test
#// functionality, which is based on <uvm_objection>.
#//------------------------------------------------------------------------------
#//------------------------------------------------------------------------------
#//
#// Class: uvm_objection
#//
#//------------------------------------------------------------------------------
#// Objections provide a facility for coordinating status information between
#// two or more participating components, objects, and even module-based IP.
#//
#// Tracing of objection activity can be turned on to follow the activity of
#// the objection mechanism. It may be turned on for a specific objection
#// instance with <uvm_objection::trace_mode>, or it can be set for all
#// objections from the command line using the option +UVM_OBJECTION_TRACE.
#//------------------------------------------------------------------------------
class UVMObjection(UVMReportObject):
m_objections = [] # static uvm_objection[$]
# `uvm_register_cb(uvm_objection, uvm_objection_callback)
#
# protected bit self.m_trace_mode
# protected int self.m_source_count[uvm_object]
# protected int self.m_total_count [uvm_object]
# protected time self.m_drain_time [uvm_object]
# protected uvm_objection_events self.m_events [uvm_object]
# /*protected*/ bit self.m_top_all_dropped
#
# protected uvm_root self.m_top
#
#
# //// Drain Logic
#
# // The context pool holds used context objects, so that
# // they're not constantly being recreated. The maximum
# // number of contexts in the pool is equal to the maximum
# // number of simultaneous drains you could have occuring,
# // both pre and post forks.
# //
# // There's the potential for a programmability within the
# // library to dictate the largest this pool should be allowed
# // to grow, but that seems like overkill for the time being.
# local static uvm_objection_context_object UVMObjection.m_context_pool[$]
m_context_pool = []
# Notified when m_scheduled_list is not empty
m_scheduled_list_not_empty_event = Event('m_scheduled_list_not_empty_event')
# // These are the active drain processes, which have been
# // forked off by the background process. A raise can
# // use this array to kill a drain.
#`ifndef UVM_USE_PROCESS_CONTAINER
# local process self.m_drain_proc[uvm_object]
#`else
# local process_container_c self.m_drain_proc[uvm_object]
#`endif
# // These are the contexts which have been scheduled for
# // retrieval by the background process, but which the
# // background process hasn't seen yet.
# local static uvm_objection_context_object m_scheduled_list[$]
m_scheduled_list: ObjContextList = []
# // Once a context is seen by the background process, it is
# // removed from the scheduled list, and placed in the forked
# // list. At the same time, it is placed in the scheduled
# // contexts array. A re-raise can use the scheduled contexts
# // array to detect (and cancel) the drain.
# local uvm_objection_context_object self.m_scheduled_contexts[uvm_object]
# // Once the forked drain has actually started (this occurs
# // ~1 delta AFTER the background process schedules it), the
# // context is removed from the above array and list, and placed
# // in the forked_contexts list.
# local uvm_objection_context_object self.m_forked_contexts[uvm_object]
#
# protected bit self.m_prop_mode = 1
# // Function: new
# //
# // Creates a new objection instance. Accesses the command line
# // argument +UVM_OBJECTION_TRACE to turn tracing on for
# // all objection objects.
#
def __init__(self, name=""):
#uvm_cmdline_processor clp
#uvm_coreservice_t cs_
trace_args = [] # string [$]
UVMReportObject.__init__(self, name)
from .uvm_coreservice import UVMCoreService
cs_ = UVMCoreService.get()
#cs_ = uvm_coreservice_t::get()
self.m_top = cs_.get_root()
self.m_cleared = 0 # protected bit /* for checking obj count<0 */
self.m_forked_list: ObjContextList = [] # uvm_objection_context_object[$]
self.m_scheduled_contexts: ObjContextDict = {} # uvm_objection_context_object[uvm_object]
self.m_forked_contexts: ObjContextDict = {} # uvm_objection_context_object[uvm_object]
self.m_source_count = {} # int[uvm_object]
self.m_total_count = {} # int[uvm_object]
self.m_drain_time = {} # time [uvm_object]
self.m_events: Dict[Any, UVMObjectionEvents] = {} # uvm_objection_events[uvm_object]
self.m_top_all_dropped = 0
self.m_drain_proc = {} # process_container_c [uvm_object]
self.set_report_verbosity_level(self.m_top.get_report_verbosity_level())
# Get the command line trace mode setting
#clp = uvm_cmdline_processor::get_inst()
from .uvm_cmdline_processor import UVMCmdlineProcessor
clp = UVMCmdlineProcessor.get_inst()
self.m_prop_mode = 1
self.m_trace_mode = 0
if clp.get_arg_matches("+UVM_OBJECTION_TRACE", trace_args):
self.m_trace_mode = 1
UVMObjection.m_objections.append(self)
# // Function: trace_mode
# //
# // Set or get the trace mode for the objection object. If no
# // argument is specified (or an argument other than 0 or 1)
# // the current trace mode is unaffected. A trace_mode of
# // 0 turns tracing off. A trace mode of 1 turns tracing on.
# // The return value is the mode prior to being reset.
#
# function bit trace_mode (int mode=-1)
# trace_mode = self.m_trace_mode
# if(mode == 0) self.m_trace_mode = 0
# else if(mode == 1) self.m_trace_mode = 1
# endfunction
#
# // Function- m_report
# //
# // Internal method for reporting count updates
#
#def m_report(uvm_object obj, uvm_object source_obj, string description, int count, string action)
def m_report(self, obj, source_obj, description: str, count: int, action: str) -> None:
_count = 0
if obj in self.m_source_count:
_count = self.m_source_count[obj]
_total = 0
if obj in self.m_total_count:
_total = self.m_total_count[obj]
if (not uvm_report_enabled(UVM_NONE, UVM_INFO,"OBJTN_TRC") or (not self.m_trace_mode)):
return
descr = ""
if description != "":
descr = " (" + description + ")"
if source_obj is obj:
name = obj.get_full_name()
if name == "":
name = "uvm_top"
uvm_report_info("OBJTN_TRC",
sv.sformatf("Object %0s %0s %0d objection(s)%s: count=%0d total=%0d",
name, action, count, descr, _count, _total), UVM_NONE)
else:
cpath = 0
last_dot = 0
sname = source_obj.get_full_name()
nm = obj.get_full_name()
_max = sname.len()
if sname.len() > nm.len():
_max = nm.len()
# For readability, only print the part of the source obj hierarchy underneath
# the current object.
while ((sname[cpath] == nm[cpath]) and (cpath < _max)):
if (sname[cpath] == "."):
last_dot = cpath
cpath += 1
if last_dot:
sname = sname.substr(last_dot+1, sname.len())
name = obj.get_full_name()
if name == "":
name = "uvm_top"
act_type = "subtracted"
if action=="raised":
act_type = "added"
act_dir = "from"
if action=="raised":
act_dir = "to"
uvm_report_info("OBJTN_TRC",
sv.sformatf("Object %0s %0s %0d objection(s) %0s its total (%s from"
" source object %s%s): count=%0d total=%0d", name, act_type, count,
act_dir, action, sname, descr, _count, _total), UVM_NONE)
# // Function- m_get_parent
# //
# // Internal method for getting the parent of the given ~object~.
# // The ultimate parent is uvm_top, UVM's implicit top-level component.
#
def m_get_parent(self, obj):
comp = None
seq = None
if hasattr(obj, 'get_parent'):
comp = obj
obj = comp.get_parent()
elif hasattr(obj, 'get_sequencer'):
seq = obj
obj = seq.get_sequencer()
else:
obj = self.m_top
if obj is None:
obj = self.m_top
return obj
# endfunction
# // Function- m_propagate
# //
# // Propagate the objection to the objects parent. If the object is a
# // component, the parent is just the hierarchical parent. If the object is
# // a sequence, the parent is the parent sequence if one exists, or
# // it is the attached sequencer if there is no parent sequence.
# //
# // obj : the uvm_object on which the objection is being raised or lowered
# // source_obj : the root object on which the end user raised/lowered the
# // objection (as opposed to an anscestor of the end user object)a
# // count : the number of objections associated with the action.
# // raise : indicator of whether the objection is being raised or lowered. A
| |
from django.test import TestCase, Client
from django.test import RequestFactory
from django.conf import settings
from ..annotations import Annotations, AnnotationCrud
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.template import Context, Template
from django.core.serializers import serialize
from apps.iiif.annotations.models import Annotation
from apps.iiif.manifests.models import Manifest
from ..models import UserAnnotation
from apps.readux.views import VolumesList, VolumeDetail, CollectionDetail, Collection, ExportOptions, AnnotationsCount
from urllib.parse import urlencode
from cssutils import parseString
import warnings
import json
import re
import uuid
User = get_user_model()
class AnnotationTests(TestCase):
fixtures = ['users.json', 'kollections.json', 'manifests.json', 'canvases.json', 'annotations.json']
valid_mirador_annotations = {
'svg': { 'oa_annotation': '''{
"on": [{
"full": "https://readux-dev.org:3000/iiif/readux:st7r6/canvas/fedora:emory:5622",
"@type": "oa:SpecificResource",
"selector": {
"@type": "oa:Choice",
"item": {
"@type": "oa:SvgSelector",
"value": "<svg></svg>"
},
"default": {
"@type": "oa:FragmentSelector",
"value": "xywh=535,454,681,425"
}
},
"within": {
"@type": "sc:Manifest",
"@id": "https://ecds.emory.edu/iiif/MSS_Vat.lat.3225/manifest.json"
}
}],
"@type": "oa:Annotation",
"@context": "http://iiif.io/api/presentation/2/context.json",
"@id": "13d3b867-d668-4294-b56a-df3e8860016c",
"annotatedBy": {
"name": "Me"
},
"resource": [{
"format": "text/html",
"@type": "dctypes:Text",
"chars": "<p>wfv3v3v3</p>"
}],
"motivation": ["oa:commenting"]
}'''},
'text': {
'oa_annotation': '''{
"@type": "oa:Annotation",
"motivation": ["oa:commenting"],
"annotatedBy": {
"name": "Me"
},
"@context": "http://iiif.io/api/presentation/2/context.json",
"resource": [{
"chars": "<p>mcoewmewom</p>",
"format": "text/html",
"@type": "dctypes:Text"
}],
"stylesheet": {
"value": ".anno-049e4a47-1d9e-4d52-8d30-fb9047d34481 { background: rgba(0, 128, 0, 0.5); }",
"type": "CssStylesheet"
},
"on": [{
"full": "https://readux-dev.org:3000/iiif/readux:st7r6/canvas/fedora:emory:5622",
"@type": "oa:SpecificResource",
"selector": {
"item": {
"@type": "RangeSelector",
"endSelector": {
"@type": "XPathSelector",
"value": "//*[@id='f842fe71-e1da-49c3-865e-f3e62a5179ff']",
"refinedBy": {
"@type": "TextPositionSelector",
"end": 2
}
},
"value": "xywh=2971,453,28,39",
"startSelector": {
"@type": "XPathSelector",
"value": "//*[@id='f846587c-1e1c-44d3-b1ce-20c0f7104dc5']",
"refinedBy": {
"@type": "TextPositionSelector",
"start": 0
}
}
},
"@type": "oa:FragmentSelector"
},
"within": {
"@type": "sc:Manifest",
"@id": "https://readux-dev.org:3000/iiif/v2/readux:st7r6/manifest"
}
}]
}'''
},
'tag':{
'oa_annotation': '''{
"@type": "oa:Annotation",
"motivation": ["oa:commenting"],
"annotatedBy": {
"name": "Me"
},
"@context": "http://iiif.io/api/presentation/2/context.json",
"resource": [
{
"chars": "<p>mcoewmewom</p>",
"format": "text/html",
"@type": "dctypes:Text"
},
{
"@type": "oa:Tag",
"chars": "tag"
},
{
"@type": "oa:Tag",
"chars": "other tag"
}
],
"stylesheet": {
"value": ".anno-049e4a47-1d9e-4d52-8d30-fb9047d34481 { background: rgba(0, 128, 0, 0.5); }",
"type": "CssStylesheet"
},
"on": [{
"full": "https://readux-dev.org:3000/iiif/readux:st7r6/canvas/fedora:emory:5622",
"@type": "oa:SpecificResource",
"selector": {
"item": {
"@type": "RangeSelector",
"endSelector": {
"@type": "XPathSelector",
"value": "//*[@id='f842fe71-e1da-49c3-865e-f3e62a5179ff']",
"refinedBy": {
"@type": "TextPositionSelector",
"end": 2
}
},
"value": "xywh=2971,453,28,39",
"startSelector": {
"@type": "XPathSelector",
"value": "//*[@id='f846587c-1e1c-44d3-b1ce-20c0f7104dc5']",
"refinedBy": {
"@type": "TextPositionSelector",
"start": 0
}
}
},
"@type": "oa:FragmentSelector"
},
"within": {
"@type": "sc:Manifest",
"@id": "https://readux-dev.org:3000/iiif/v2/readux:st7r6/manifest"
}
}]
}'''
}
}
def setUp(self):
# fixtures = ['kollections.json', 'manifests.json', 'canvases.json', 'annotations.json']
self.user_a = get_user_model().objects.get(pk=111)
self.user_b = get_user_model().objects.get(pk=222)
self.factory = RequestFactory()
self.client = Client()
self.view = Annotations.as_view()
# self.volume_list_view = VolumeList.as_view()
self.crud_view = AnnotationCrud.as_view()
self.manifest = Manifest.objects.get(pk='464d82f6-6ae5-4503-9afc-8e3cdd92a3f1')
self.canvas = self.manifest.canvas_set.all().first()
self.collection = self.manifest.collections.first()
def create_user_annotations(self, count, user):
for anno in range(count):
text_anno = UserAnnotation(
oa_annotation=json.loads(self.valid_mirador_annotations['text']['oa_annotation']),
owner=user
)
text_anno.save()
def load_anno(self, response):
annotation_list = json.loads(response.content.decode('UTF-8-sig'))
if 'resources' in annotation_list:
return annotation_list['resources']
else:
return annotation_list
def rando_anno(self):
return UserAnnotation.objects.order_by("?").first()
def test_get_user_annotations_unauthenticated(self):
self.create_user_annotations(5, self.user_a)
kwargs = {'username': 'readux', 'volume': self.manifest.pid, 'canvas': self.canvas.pid}
url = reverse('user_annotations', kwargs=kwargs)
response = self.client.get(url)
assert response.status_code == 404
kwargs = {'username': self.user_a.username, 'volume': 'readux:st7r6', 'canvas': 'fedora:emory:5622'}
url = reverse('user_annotations', kwargs=kwargs)
response = self.client.get(url)
annotation = self.load_anno(response)
assert response.status_code == 401
# assert len(annotation) == 0
def test_mirador_svg_annotation_creation(self):
request = self.factory.post('/annotations-crud/', data=json.dumps(self.valid_mirador_annotations['svg']), content_type="application/json")
request.user = self.user_a
response = self.crud_view(request)
annotation = self.load_anno(response)
assert annotation['annotatedBy']['name'] == '<NAME>'
assert annotation['on']['selector']['value'] == 'xywh=535,454,681,425'
assert response.status_code == 201
annotation_object = UserAnnotation.objects.get(pk=annotation['@id'])
assert annotation_object.x == 535
assert annotation_object.y == 454
assert annotation_object.w == 681
assert annotation_object.h == 425
def test_mirador_text_annotation_creation(self):
request = self.factory.post('/annotations-crud/', data=json.dumps(self.valid_mirador_annotations['text']), content_type="application/json")
request.user = self.user_a
response = self.crud_view(request)
annotation = self.load_anno(response)
assert annotation['annotatedBy']['name'] == '<NAME>'
assert annotation['on']['selector']['value'] == 'xywh=468,2844,479,83'
assert re.match(r"http.*iiif/v2/readux:st7r6/canvas/fedora:emory:5622", annotation['on']['full'])
assert response.status_code == 201
def test_creating_annotation_from_string(self):
request = self.factory.post('/annotations-crud/', data=self.valid_mirador_annotations['text'], content_type="application/json")
request.user = self.user_a
response = self.crud_view(request)
annotation = self.load_anno(response)
assert annotation['annotatedBy']['name'] == '<NAME>'
assert annotation['on']['selector']['value'] == 'xywh=468,2844,479,83'
assert re.match(r"http.*iiif/v2/readux:st7r6/canvas/fedora:emory:5622", annotation['on']['full'])
assert response.status_code == 201
def test_get_user_annotations(self):
self.create_user_annotations(4, self.user_a)
kwargs = {'username': self.user_a.username, 'volume': self.manifest.pid, 'canvas': self.canvas.pid}
url = reverse('user_annotations', kwargs=kwargs)
request = self.factory.get(url)
request.user = self.user_a
response = self.view(request, username=self.user_a.username, volume=self.manifest.pid, canvas=self.canvas.pid)
annotation = self.load_anno(response)
assert len(annotation) == 4
assert response.status_code == 200
def test_get_only_users_user_annotations(self):
self.create_user_annotations(5, self.user_b)
self.create_user_annotations(4, self.user_a)
kwargs = {'username': 'marvin', 'volume': self.manifest.pid, 'canvas': self.canvas.pid}
url = reverse('user_annotations', kwargs=kwargs)
request = self.factory.get(url)
request.user = self.user_b
response = self.view(request, username=self.user_b.username, volume=self.manifest.pid, canvas=self.canvas.pid)
annotation = self.load_anno(response)
assert len(annotation) == 5
assert response.status_code == 200
assert len(UserAnnotation.objects.all()) == 9
kwargs = {'username': self.user_a.username, 'volume': 'readux:st7r6', 'canvas': 'fedora:emory:5622'}
url = reverse('user_annotations', kwargs=kwargs)
response = self.client.get(url)
annotation = self.load_anno(response)
assert response.status_code == 401
# assert len(annotation) == 0
def test_update_user_annotation(self):
self.create_user_annotations(1, self.user_a)
existing_anno = UserAnnotation.objects.all()[0]
data = json.loads(self.valid_mirador_annotations['svg']['oa_annotation'])
data['@id'] = str(existing_anno.id)
data = { 'oa_annotation': data }
resource = data['oa_annotation']['resource'][0]
resource['chars'] = 'updated annotation'
data['oa_annotation']['resource'] = resource
data['id'] = str(existing_anno.id)
request = self.factory.put('/annotations-crud/', data=json.dumps(data), content_type="application/json")
request.user = self.user_a
response = self.crud_view(request)
annotation = self.load_anno(response)
assert response.status_code == 200
assert annotation['resource']['chars'] == 'updated annotation'
def test_update_non_existing_user_annotation(self):
self.create_user_annotations(1, self.user_a)
data = json.loads(self.valid_mirador_annotations['svg']['oa_annotation'])
new_id = str(uuid.uuid4())
data['@id'] = new_id
data['id'] = new_id
request = self.factory.put('/annotations-crud/', data=json.dumps(data), content_type="application/json")
request.user = self.user_a
response = self.crud_view(request)
annotation = self.load_anno(response)
assert response.status_code == 404
def test_update_someone_elses_annotation(self):
self.create_user_annotations(4, self.user_a)
rando_anno = self.rando_anno()
data = {'id': str(rando_anno.pk)}
request = self.factory.put('/annotations-crud/', data=json.dumps(data), content_type="application/json")
request.user = self.user_b
response = self.crud_view(request)
annotation = self.load_anno(response)
assert response.status_code == 401
def test_updating_annotation_unauthenticated(self):
self.create_user_annotations(1, self.user_a)
existing_anno = UserAnnotation.objects.all()[0]
data = json.loads(self.valid_mirador_annotations['svg']['oa_annotation'])
data['@id'] = str(existing_anno.id)
data = {'oa_annotation': data}
resource = data['oa_annotation']['resource'][0]
data['oa_annotation']['resource'] = resource
data['id'] = str(existing_anno.id)
request = self.factory.put('/annotations-crud/', data=json.dumps(data), content_type="application/json")
response = self.crud_view(request)
message = self.load_anno(response)
assert response.status_code == 401
assert message['message'] == 'You are not the owner of this annotation.'
def test_delete_user_annotation_as_owner(self):
self.create_user_annotations(1, self.user_a)
data = {'id': str(uuid.uuid4())}
request = self.factory.delete('/annotations-crud/', data=json.dumps(data), content_type="application/json")
request.user = self.user_a
response = self.crud_view(request)
assert response.status_code == 404
def test_delete_non_existant_user_annotation(self):
self.create_user_annotations(1, self.user_a)
existing_anno = UserAnnotation.objects.all()[0]
data = {'id': str(existing_anno.pk)}
request = self.factory.delete('/annotations-crud/', data=json.dumps(data), content_type="application/json")
request.user = self.user_a
response = self.crud_view(request)
message = self.load_anno(response)
assert response.status_code == 204
assert len(UserAnnotation.objects.all()) == 0
def test_delete_someone_elses_annotation(self):
self.create_user_annotations(1, self.user_a)
rando_anno = self.rando_anno()
data = {'id': str(rando_anno.pk)}
request = self.factory.delete('/annotations-crud/', data=json.dumps(data), content_type="application/json")
request.user = self.user_b
response = self.crud_view(request)
message = self.load_anno(response)
assert response.status_code == 401
assert message['message'] == 'You are not the owner of this annotation.'
def test_delete_annotation_unauthenticated(self):
self.create_user_annotations(1, self.user_a)
rando_anno = self.rando_anno()
data = {'id': str(rando_anno.pk)}
request = self.factory.delete('/annotations-crud/', data=json.dumps(data), content_type="application/json")
response = self.crud_view(request)
message = self.load_anno(response)
assert response.status_code == 401
assert message['message'] == 'You are not the owner of this annotation.'
def test_user_annotations_on_canvas(self):
# fetch a manifest with no user annotations
kwargs = {'manifest': self.manifest.pid, 'pid': self.canvas.pid}
url = reverse('RenderCanvasDetail', kwargs=kwargs)
response = self.client.get(url, data=kwargs)
serialized_canvas = json.loads(response.content.decode('UTF-8-sig'))
assert len(serialized_canvas['otherContent']) == 1
# add annotations to the manifest
self.create_user_annotations(1, self.user_a)
self.create_user_annotations(2, self.user_b)
existing_anno_a = UserAnnotation.objects.all()[0]
assert self.canvas.identifier == existing_anno_a.canvas.identifier
existing_anno_b = UserAnnotation.objects.all()[2]
assert self.canvas.identifier == existing_anno_b.canvas.identifier
# fetch a manifest with annotations by two users
response = self.client.get(url)
serialized_canvas = json.loads(response.content.decode('UTF-8-sig'))
assert response.status_code == 200
assert serialized_canvas['@id'] == self.canvas.identifier
assert serialized_canvas['label'] == str(self.canvas.position)
assert len(serialized_canvas['otherContent']) == 1
def test_volume_list_view_no_kwargs(self):
response = self.client.get(reverse('volumes list'))
context = response.context_data
assert context['order_url_params'] == urlencode({'sort': 'title', 'order': 'asc'})
assert context['object_list'].count() == Manifest.objects.all().count()
def test_volume_list_invalid_kwargs(self):
kwargs = {'blueberry': 'pizza', 'jay': 'awesome'}
response = self.client.get(reverse('volumes list'), data=kwargs)
context = response.context_data
assert context['order_url_params'] == urlencode({'sort': 'title', 'order': 'asc'})
assert context['object_list'].count() == Manifest.objects.all().count()
def test_volumes_list_view_sort_and_order(self):
view = VolumesList()
for sort in view.SORT_OPTIONS:
for order in view.ORDER_OPTIONS:
kwargs = {'sort': sort, 'order': order}
url = reverse('volumes list')
response = self.client.get(url, data=kwargs)
context = response.context_data
assert context['order_url_params'] == urlencode({'sort': sort, 'order': order})
assert context['object_list'].count() == Manifest.objects.all().count()
assert view.get_queryset().ordered
def test_collection_detail_view_no_kwargs(self):
response = self.client.get(reverse('volumes list'))
context = response.context_data
assert context['order_url_params'] == urlencode({'sort': 'title', 'order': 'asc'})
assert context['object_list'].count() == Manifest.objects.all().count()
def test_collection_detail_invalid_kwargs(self):
kwargs = {'blueberry': 'pizza', 'jay': 'awesome'}
response = self.client.get(reverse('volumes list'), data=kwargs)
context = response.context_data
assert context['order_url_params'] == urlencode({'sort': 'title', 'order': 'asc'})
assert context['object_list'].count() == Manifest.objects.all().count()
# TODO are the volumes actually sorted?
def test_collection_detail_view_sort_and_order(self):
view = CollectionDetail()
for sort in view.SORT_OPTIONS:
for order in view.ORDER_OPTIONS:
kwargs = {'sort': sort, 'order': order }
url = reverse('collection', kwargs={ 'collection': self.collection.pid })
response = self.client.get(url, data=kwargs)
context = response.context_data
assert context['sort'] == sort
assert context['order'] == order
assert context['order_url_params'] == urlencode({'sort': sort, 'order': order})
assert context['manifest_query_set'].ordered
def test_collection_detail_view_with_no_sort_or_order_specified(self):
url = reverse('collection', kwargs={ 'collection': self.collection.pid })
response = self.client.get(url)
context = response.context_data
assert context['sort'] == 'title'
assert context['order'] == 'asc'
| |
valid classification technique number for ct. It can only be from 1-3 as in Table 4 in the paper")
return
# The classifcation model which is a Pipeline of vectorizers and transformers for the
# features selection technique
classification_model = Pipeline([
('vectorizer', CountVectorizer(analyzer="word",
ngram_range=(minn, maxn),
tokenizer=word_tokenize,
max_features=mf,
binary=bi)),
('tfidf', tfidf_tranformer),
('classifier', classifier)
])
# Print the selected properties for the classification model
print("-------------------------------------------------------------------------")
print("Number of labebled reviews:", len(reviews))
print("Number of features:", mf)
print("Labels:", ", ".join(labels_names))
print("Classes:", ", ".join(classes_names[:3]))
if multi_label: print("\t", ", ".join(classes_names[3:]))
print("Classification type:", classification_type)
if multi_label:
print("Classification technique:", technique_name)
print("Classification method:", method_name)
print("Classifier:", classifier_name)
# Creat empty arrays for the metrics (scores) with length of the folds to avaraged later
# *** This needs to optimized (Future Work)
scores, macro_scores = ([None] * fo for _ in range(2))
recall, precision, f1_score, support = ([0.0] * fo for _ in range(4))
accuracy, hamming_loss, total_time = ([0.0] * fo for _ in range(3))
# Arrays for individual scores for the classes
p1, p2, p3, p4, p5, p6 = ([0.0] * fo for _ in range(6))
r1, r2, r3, r4, r5, r6 = ([0.0] * fo for _ in range(6))
f1, f2, f3, f4, f5, f6 = ([0.0] * fo for _ in range(6))
s1, s2, s3, s4, s5, s6 = ([0.0] * fo for _ in range(6))
# Temporary variables to check the best predictions in binary classification
# This is used for evaluating the labeled dataset as in the paper
best_test_y = None
best_y_pred = None
best_acc = 0
# Split the dataset into training and testing using random sampling.
# The functions takes the number of fold and generates an arrays of indicies for all folds.
ss = ShuffleSplit(n_splits=fo, test_size=ts, random_state=28)
# Save the cross validation itteration number for the loop (for printing and reporting purpose)
cv = 0
# Starts the cross validations loop
# The loop will iterate on the array of indicies given by the sampling method above.
# The array indicies has a total number of elements equal to the folds,
# so loop will iterate by the number of folds.
for train_index, test_index in ss.split(reviews, labels):
print("---------------------------Cross validation #"+str(cv+1)+"---------------------------")
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# Splits the reviews text and labels array into training and testing
train_X, test_X = reviews[train_index], reviews[test_index]
train_y, test_y = labels[train_index], labels[test_index]
# Training and testing the classification model
# The result is an array of predicted labels for the test dataset stored in y_pred
print("Training, testing and evaluating the classification model...")
start = time.time()
y_pred = classification_model.fit(train_X, train_y).predict(test_X)
end = time.time()
# Calculate the total time for training and testing
total_time[cv] = end-start
# Get the macro for precision, recall and f1-score metrics
macro_scores = precision_recall_fscore_support(test_y, y_pred, average="macro")
precision[cv] = macro_scores[0]
recall[cv] = macro_scores[1]
f1_score[cv] = macro_scores[2]
# Get the precision, recall, f1-score and support metrics per class per label
# by setting average = None in the scoreing function
if multi_label:
# *** This needs to optimized (Future Work)
scores = precision_recall_fscore_support(test_y, y_pred, average=None)
p1[cv],p2[cv],p3[cv],p4[cv] = scores[0][0],scores[0][1],scores[0][2],scores[0][3]
r1[cv],r2[cv],r3[cv],r4[cv] = scores[1][0],scores[1][1],scores[1][2],scores[1][3]
f1[cv],f2[cv],f3[cv],f4[cv] = scores[2][0],scores[2][1],scores[2][2],scores[2][3]
s1[cv],s2[cv],s3[cv],s4[cv] = scores[3][0],scores[3][1],scores[3][2],scores[3][3]
if labels_count > 2:
p5[cv],p6[cv] = scores[0][4],scores[0][5]
r5[cv],r6[cv] = scores[1][4],scores[1][5]
f5[cv],f6[cv] = scores[2][4],scores[2][5]
s5[cv],s6[cv] = scores[3][4],scores[3][5]
support[cv] = s1[cv]+s2[cv]+s3[cv]+s4[cv]+s5[cv]+s6[cv]
# Get the accuracy metric
accuracy[cv] = accuracy_score(test_y, y_pred)
# Get the hamming loss metric
hamming_loss[cv] = hamming_loss_score(test_y, y_pred)
# Printing metrics for each cross validation
# if dt = Tru a full report is printed in each cross validation iteration
if dt:
print(classification_report(test_y, y_pred, target_names=classes_names, digits=3))
else:
print("Macro Recall: %0.3f" % recall[cv])
print("Macro Precision: %0.3f" % precision[cv])
print("Macro F1-Score: %0.3f" % f1_score[cv])
print("Hamming loss: %0.3f" % hamming_loss[cv])
print("Accuracy: %0.3f" % accuracy[cv])
print("Total Time: %0.3f" % total_time[cv])
# Get teh best accuracy score for evaluation purposes using binary classification
if accuracy[cv] > best_acc:
best_acc = accuracy[cv]
best_test_y = test_y
best_y_pred = y_pred
# Increase the cross validation for printing
cv+=1
# Print the average of metrics over all the folds of cross validations
print("-------------------Total of "+str(fo)+"-folds cross validations--------------------")
if multi_label:
precision = np.asarray(precision).mean()
recall = np.asarray(recall).mean()
f1_score = np.asarray(f1_score).mean()
support = np.asarray(support).mean()
print("Label\tClass\tPrecision\tRecall\tF1-score\tSupport")
print("L1\tC1\t%0.3f\t\t%0.3f\t%0.3f\t\t%0.0f" % (np.asarray(p1).mean(),np.asarray(r1).mean(),np.asarray(f1).mean(),np.asarray(s1).mean()))
print(" \tC2\t%0.3f\t\t%0.3f\t%0.3f\t\t%0.0f" % (np.asarray(p2).mean(),np.asarray(r2).mean(),np.asarray(f2).mean(),np.asarray(s2).mean()))
print("L2\tC3\t%0.3f\t\t%0.3f\t%0.3f\t\t%0.0f" % (np.asarray(p3).mean(),np.asarray(r3).mean(),np.asarray(f3).mean(),np.asarray(s3).mean()))
print(" \tC4\t%0.3f\t\t%0.3f\t%0.3f\t\t%0.0f" % (np.asarray(p4).mean(),np.asarray(r4).mean(),np.asarray(f4).mean(),np.asarray(s4).mean()))
if labels_count > 2:
print("L3\tC5\t%0.3f\t\t%0.3f\t%0.3f\t\t%0.0f" % (np.asarray(p5).mean(),np.asarray(r5).mean(),np.asarray(f5).mean(),np.asarray(s5).mean()))
print(" \tC6\t%0.3f\t\t%0.3f\t%0.3f\t\t%0.0f" % (np.asarray(p6).mean(),np.asarray(r6).mean(),np.asarray(f6).mean(),np.asarray(s6).mean()))
print("Macro\t\t%0.3f\t\t%0.3f\t%0.3f\t\t%0.0f" % (precision,recall,f1_score,support))
else:
print(classification_report(best_test_y, best_y_pred, target_names = classes_names, digits=3))
print("-------------------------------------------------------------------------")
print("Macro F1-Score: %0.3f (+/- %0.3f)" % (np.asarray(f1_score).mean(), np.asarray(f1_score).std() * 2))
print("Hamming loss: %0.3f (+/- %0.3f)" % (np.asarray(hamming_loss).mean(), np.asarray(hamming_loss).std() * 2))
print("Accuracy: %0.3f (+/- %0.3f)" % (np.asarray(accuracy).mean(), np.asarray(accuracy).std() * 2))
print("Total time: %0.3f (+/- %0.3f)" % (np.asarray(total_time).mean(), np.asarray(total_time).std() * 2))
print("-------------------------------------------------------------------------")
# If not multi-label problem prins the confusion matrix
if not multi_label and cf:
cm = confusion_matrix(best_test_y, best_y_pred)
plt.figure()
plot_confusion_matrix(cm, classes_names)
plt.show()
print("-------------------------------------------------------------------------")
else:
print("At least one label need to be classified.")
print("Either se, co or ab is set to True in arguments)")
# Get sets of labels for training, prediction and testing
# For multi label they are transformed for the problem using MultiLabel Binarizer function
def get_labels(dataset_name = "preprocessed_labeled", sentiment = True,
comparative = False, aspects_based = False):
"""The function returns the labels array as a signle-label or transformed
into a multi-label format.
Args:
dataset_name (str): The name of the dataset to be classified.
sentiment (bool): Get the sentiment label or not.
comparative (bool): Get the comparative label or not.
aspects_based (bool): Get the aspect-based label or not.
Returns:
array: either an array of single label for binary, or an array of
binarized multi-labels
"""
# Initialize a list of arrays based on the labels count
labels = []
# Get labels count and if it is a multi-label
multi_label = False
labels_count = 0
if sentiment: labels_count +=1
if comparative: labels_count +=1
if aspects_based: labels_count +=1
if labels_count > 1:
multi_label = True
# Check if the labels count is greater than 1, otherwise, fail the function.
if labels_count > 0:
data = load_dataset_from_csv(dataset_name)
dataset_size = len(data.index)
# If multi-label get a transformed labels
if multi_label:
# Loop on the labels count which equals to the dataset size
for j in range(dataset_size):
# The labels are stored as a set of labels such as (C1, C4, C6)
labels_set = set()
if sentiment:
labels_set.add("C1" if data["sentiment_polarity_label"][j] == "Positive" else "C2")
if comparative:
labels_set.add("C3" if data["is_comparative_label"][j] == "Yes" else "C4")
if aspects_based:
labels_set.add("C5" if data["is_aspects_based_label"][j] == "Yes" else "C6")
labels.append(labels_set)
# Label trnasformation in binary values using the function below.
# Discussed in section 3.1.4 in the paper.
mlb = MultiLabelBinarizer()
labels = mlb.fit_transform(np.asarray(labels))
# If not multi-label a basic numpy array is returned with one label as one column
else:
for i in range(dataset_size):
label = ""
if sentiment:
label += "C1" if data["sentiment_polarity_label"][i] == "Positive" else "C2"
if comparative:
label += "C3" if data["is_comparative_label"][i] == "Yes" else "C4"
if aspects_based:
label += "C5" if data["is_aspects_based_label"][i] == "Yes" else "C6"
labels.append(label)
labels = np.asarray(labels)
else:
print("At least one label need to be set to True in arguments")
return None
return labels
# Funtion to get reviews text array for the classification model
def get_reviews(dataset_name = "preprocessed_labeled", pos_tagged = False):
"""The function returns the reviews text array from the dataset.
Args:
dataset_name (str): The name of the dataset to be classified.
pos_tagged (bool): Get a pos_tagged text of the reviews.
Returns:
array: the reviews text
"""
data = load_dataset_from_csv(dataset_name)
# Only if pos tagged, replace the reviews text with pos tags (used for subjectivity classification)
if pos_tagged:
review_text = ""
data_length = len(data.index)
for i in range(data_length):
tagged_review = to_pos_tags(str(data["review_text"][i]))
review_text = tagged_review.strip()
data.loc[i,"review_text"] = review_text
loading(i, data_length, "Getting a total of "+str(data_length)+" reviews")
# Get the reviews text
reviews = np.asarray(data["review_text"])
return reviews
# The function to preprocess the labeled dataset:
# All the args that are set to False are fund to be not helpful in the classfication
# performance, so they are not used by default, but can be tested if needed
def preprocess(dataset_name="labeled", lemmatize=False, stem=False,
subjective=False, negation=False, remove_digits=False):
"""The function processes the labeled dataset and exports a preprocessed
CSV file of the labeled dataset.
Args:
dataset_name (str): The name of the dataset to be classified.
lemmatize (bool): Lemmatize the reviews text.
stem (bool): Stem the reviews text.
subjectivity (bool): Remove objective sentences and keep the
subjective ones from teh reviews text.
negation (bool): Negation handling the reviews text.
remove_digits (bool): Removes the digists the reviews text.
Returns:
None
"""
data = load_dataset_from_csv(dataset_name)
# In addiiton to reviews text, the comparative and aspects-based sentences
# identified are also preprocessed.
keys = ["review_text", "comparative_sentences", "aspects_based_sentences"]
dataset_size = len(data.index)
# Loop over the dataset size
for i in range(dataset_size):
for k in keys:
text = str(data[k][i])
if text and text != "nan":
processed_text = process_text(text, lemmatize=lemmatize, stem=stem,
subjective=subjective, negation=negation,
remove_digits=remove_digits)
data.loc[i,k] = processed_text
loading(i+1, data_length, "Processing data in a total of "+str(data_length))
# Exports the preprocessed dataset to the datasets folder with preprocessed_ tag
data[["review_id", "review_key", "review_type", "review_text", "review_rating",
"review_helpful", "sentiment_polarity_label", "is_comparative_label",
"is_aspects_based_label", "comparative_sentences", "aspects_based_sentences",
"review_status"]].to_csv("./datasets/preprocessed_"+dataset_name+".csv",
sep="\t", quoting=3, index=False)
# a sub function to process one review in the function above
def process_text(text, lemmatize=False, stem=False, subjective=False, negation=False, remove_digits=False):
processed_text = ""
if text and text != "nan":
processed_text = text.lower()
processed_text = remove_white_spaces(processed_text)
processed_text = process_symbols(processed_text)
processed_text = remove_accented_chars(processed_text)
processed_text = remove_special_characters(processed_text)
if remove_digits:
processed_text = remove_digits(processed_text)
if subjective:
processed_text | |
'mtn', 'p4']
if self.get('vc') and self.get('vc') not in vcs:
raise usage.UsageError("vc must be one of %s" % (', '.join(vcs)))
validateMasterOption(self.get('master'))
class TryOptions(base.SubcommandOptions):
subcommandFunction = "buildbot.scripts.trycmd.trycmd"
optParameters = [
["connect", "c", None,
"How to reach the buildmaster, either 'ssh' or 'pb'"],
# for ssh, use --host, --username, --jobdir and optionally
# --ssh
["host", None, None,
"Hostname (used by ssh) for the buildmaster"],
["port", None, None,
"Port (used by ssh) for the buildmaster"],
["jobdir", None, None,
"Directory (on the buildmaster host) where try jobs are deposited"],
["ssh", None, None,
"Command to use instead of the default \"ssh\""],
["username", "u", None,
"Username performing the try build"],
# for PB, use --master, --username, and --passwd
["master", "m", None,
"Location of the buildmaster's Try server (host:port)"],
["passwd", None, None,
"Password for PB authentication"],
["who", "w", None,
"Who is responsible for the try build"],
["comment", "C", None,
"A comment which can be used in notifications for this build"],
# for ssh to accommodate running in a virtualenv on the buildmaster
["buildbotbin", None, "buildbot",
"buildbot binary to use on the buildmaster host"],
["diff", None, None,
"Filename of a patch to use instead of scanning a local tree. "
"Use '-' for stdin."],
["patchlevel", "p", 0,
"Number of slashes to remove from patch pathnames, "
"like the -p option to 'patch'"],
["baserev", None, None,
"Base revision to use instead of scanning a local tree."],
["vc", None, None,
"The VC system in use, one of: bzr, cvs, darcs, git, hg, "
"mtn, p4, svn"],
["branch", None, None,
"The branch in use, for VC systems that can't figure it out "
"themselves"],
["repository", None, None,
"Repository to use, instead of path to working directory."],
["builder", "b", None,
"Run the trial build on this Builder. Can be used multiple times."],
["properties", None, None,
"A set of properties made available in the build environment, "
"format is --properties=prop1=value1,prop2=value2,.. "
"option can be specified multiple times."],
["property", None, None,
"A property made available in the build environment, "
"format:prop=value. Can be used multiple times."],
["topfile", None, None,
"Name of a file at the top of the tree, used to find the top. "
"Only needed for SVN and CVS."],
["topdir", None, None,
"Path to the top of the working copy. Only needed for SVN and CVS."],
]
optFlags = [
["wait", None,
"wait until the builds have finished"],
["dryrun", 'n',
"Gather info, but don't actually submit."],
["get-builder-names", None,
"Get the names of available builders. Doesn't submit anything. "
"Only supported for 'pb' connections."],
["quiet", "q",
"Don't print status of current builds while waiting."],
]
# Mapping of .buildbot/options names to command-line options
buildbotOptions = [
['try_connect', 'connect'],
# [ 'try_builders', 'builders' ], <-- handled in postOptions
['try_vc', 'vc'],
['try_branch', 'branch'],
['try_repository', 'repository'],
['try_topdir', 'topdir'],
['try_topfile', 'topfile'],
['try_host', 'host'],
['try_username', 'username'],
['try_jobdir', 'jobdir'],
['try_ssh', 'ssh'],
['try_buildbotbin', 'buildbotbin'],
['try_passwd', '<PASSWORD>'],
['try_master', 'master'],
['try_who', 'who'],
['try_comment', 'comment'],
# [ 'try_wait', 'wait' ], <-- handled in postOptions
# [ 'try_quiet', 'quiet' ], <-- handled in postOptions
# Deprecated command mappings from the quirky old days:
['try_masterstatus', 'master'],
['try_dir', 'jobdir'],
['try_password', '<PASSWORD>'],
]
def __init__(self):
base.SubcommandOptions.__init__(self)
self['builders'] = []
self['properties'] = {}
def opt_builder(self, option):
self['builders'].append(option)
def opt_properties(self, option):
# We need to split the value of this option
# into a dictionary of properties
propertylist = option.split(",")
for i in range(0, len(propertylist)):
splitproperty = propertylist[i].split("=", 1)
self['properties'][splitproperty[0]] = splitproperty[1]
def opt_property(self, option):
name, _, value = option.partition("=")
self['properties'][name] = value
def opt_patchlevel(self, option):
self['patchlevel'] = int(option)
def getSynopsis(self):
return "Usage: buildbot try [options]"
def postOptions(self):
base.SubcommandOptions.postOptions(self)
opts = self.optionsFile
if not self['builders']:
self['builders'] = opts.get('try_builders', [])
if opts.get('try_wait', False):
self['wait'] = True
if opts.get('try_quiet', False):
self['quiet'] = True
# get the global 'masterstatus' option if it's set and no master
# was specified otherwise
if not self['master']:
self['master'] = opts.get('masterstatus', None)
if self['connect'] == 'pb':
if not self['master']:
raise usage.UsageError("master location must be specified"
"for 'pb' connections")
validateMasterOption(self['master'])
class TryServerOptions(base.SubcommandOptions):
subcommandFunction = "buildbot.scripts.tryserver.tryserver"
optParameters = [
["jobdir", None, None, "the jobdir (maildir) for submitting jobs"],
]
requiredOptions = ['jobdir']
def getSynopsis(self):
return "Usage: buildbot tryserver [options]"
def postOptions(self):
if not self['jobdir']:
raise usage.UsageError('jobdir is required')
class CheckConfigOptions(base.SubcommandOptions):
subcommandFunction = "buildbot.scripts.checkconfig.checkconfig"
optFlags = [
['quiet', 'q', "Don't display error messages or tracebacks"],
]
# on tab completion, suggest files as first argument
if hasattr(usage, 'Completions'):
# only set completion suggestion if running with
# twisted version (>=11.1.0) that supports it
compData = usage.Completions(extraActions=[usage.CompleteFiles()])
def getSynopsis(self):
return "Usage:\t\tbuildbot checkconfig [configFile]\n" + \
"\t\tIf not specified, the config file specified in " + \
"'buildbot.tac' from the current directory will be used"
def parseArgs(self, *args):
if len(args) >= 1:
self['configFile'] = args[0]
class UserOptions(base.SubcommandOptions):
subcommandFunction = "buildbot.scripts.user.user"
optParameters = [
["master", "m", None,
"Location of the buildmaster's user service (host:port)"],
["username", "u", None,
"Username for PB authentication"],
["passwd", "p", None,
"Password for PB authentication"],
["op", None, None,
"User management operation: add, remove, update, get"],
["bb_username", None, None,
"Username to set for a given user. Only availabe on 'update', "
"and bb_password must be given as well."],
["bb_password", None, None,
"Password to set for a given user. Only availabe on 'update', "
"and bb_username must be given as well."],
["ids", None, None,
"User's identifiers, used to find users in 'remove' and 'get' "
"Can be specified multiple times (--ids=id1,id2,id3)"],
["info", None, None,
"User information in the form: --info=type=value,type=value,.. "
"Used in 'add' and 'update', can be specified multiple times. "
"Note that 'update' requires --info=id:type=value..."]
]
buildbotOptions = [
['master', 'master'],
['user_master', 'master'],
['user_username', 'username'],
['user_passwd', '<PASSWORD>'],
]
requiredOptions = ['master']
longdesc = textwrap.dedent("""
Currently implemented types for --info= are:\n
git, svn, hg, cvs, darcs, bzr, email
""")
def __init__(self):
base.SubcommandOptions.__init__(self)
self['ids'] = []
self['info'] = []
def opt_ids(self, option):
id_list = option.split(",")
self['ids'].extend(id_list)
def opt_info(self, option):
# splits info into type/value dictionary, appends to info
info_list = option.split(",")
info_elem = {}
if len(info_list) == 1 and '=' not in info_list[0]:
info_elem["identifier"] = info_list[0]
self['info'].append(info_elem)
else:
for i in range(0, len(info_list)):
split_info = info_list[i].split("=", 1)
# pull identifier from update --info
if ":" in split_info[0]:
split_id = split_info[0].split(":")
info_elem["identifier"] = split_id[0]
split_info[0] = split_id[1]
info_elem[split_info[0]] = split_info[1]
self['info'].append(info_elem)
def getSynopsis(self):
return "Usage: buildbot user [options]"
def _checkValidTypes(self, info):
from buildbot.process.users import users
valid = set(['identifier', 'email'] + users.srcs)
for user in info:
for attr_type in user:
if attr_type not in valid:
raise usage.UsageError(
"Type not a valid attr_type, must be in: %s"
% ', '.join(valid))
def postOptions(self):
base.SubcommandOptions.postOptions(self)
validateMasterOption(self.get('master'))
op = self.get('op')
if not op:
raise usage.UsageError("you must specify an operation: add, "
"remove, update, get")
if op not in ['add', 'remove', 'update', 'get']:
raise usage.UsageError("bad op %r, use 'add', 'remove', 'update', "
"or 'get'" % op)
if not self.get('username') or not self.get('passwd'):
raise usage.UsageError("A username and password must be given")
bb_username = self.get('bb_username')
bb_password = self.get('bb_password')
if bb_username or bb_password:
if op != 'update':
raise usage.UsageError("bb_username and bb_password only work "
"with update")
if not bb_username or not bb_password:
raise usage.UsageError("Must specify both bb_username and "
"bb_password or neither.")
info = self.get('info')
ids = self.get('ids')
# check for erroneous args
if not info and not ids:
raise usage.UsageError("must specify either --ids or --info")
if op == 'add' or op == 'update':
if ids:
raise usage.UsageError("cannot use --ids with 'add' or "
"'update'")
self._checkValidTypes(info)
if op == 'update':
for user in info:
if 'identifier' not in user:
raise usage.UsageError("no ids found in update info; "
"use: --info=id:type=value,type=value,..")
if op == 'add':
for user in info:
if 'identifier' in user:
raise usage.UsageError("identifier found in add info, "
"use: --info=type=value,type=value,..")
if op == 'remove' or op == 'get':
if info:
raise usage.UsageError("cannot use --info with 'remove' "
"or 'get'")
class DataSpecOption(base.BasedirMixin, base.SubcommandOptions):
subcommandFunction = "buildbot.scripts.dataspec.dataspec"
optParameters = [
['out', 'o', "dataspec.json", "output to specified path"],
['global', 'g', None,
"output a js script, that sets a global, for inclusion | |
"""
"""
# Created on 2014.05.31
#
# Author: <NAME>
#
# Copyright 2014 - 2020 <NAME>
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
import socket
from threading import Lock
from datetime import datetime, MINYEAR
from .. import DSA, SCHEMA, ALL, BASE, get_config_parameter, OFFLINE_EDIR_8_8_8, OFFLINE_EDIR_9_1_4, OFFLINE_AD_2012_R2, OFFLINE_SLAPD_2_4, OFFLINE_DS389_1_3_3, SEQUENCE_TYPES, IP_SYSTEM_DEFAULT, IP_V4_ONLY, IP_V6_ONLY, IP_V4_PREFERRED, IP_V6_PREFERRED, STRING_TYPES
from .exceptions import LDAPInvalidServerError, LDAPDefinitionError, LDAPInvalidPortError, LDAPInvalidTlsSpecificationError, LDAPSocketOpenError, LDAPInfoError
from ..protocol.formatters.standard import format_attribute_values
from ..protocol.rfc4511 import LDAP_MAX_INT
from ..protocol.rfc4512 import SchemaInfo, DsaInfo
from .tls import Tls
from ..utils.log import log, log_enabled, ERROR, BASIC, PROTOCOL, NETWORK
from ..utils.conv import to_unicode
from ..utils.port_validators import check_port, check_port_and_port_list
try:
from urllib.parse import unquote # Python 3
except ImportError:
from urllib import unquote # Python 2
try: # try to discover if unix sockets are available for LDAP over IPC (ldapi:// scheme)
# noinspection PyUnresolvedReferences
from socket import AF_UNIX
unix_socket_available = True
except ImportError:
unix_socket_available = False
class Server(object):
"""
LDAP Server definition class
Allowed_referral_hosts can be None (default), or a list of tuples of
allowed servers ip address or names to contact while redirecting
search to referrals.
The second element of the tuple is a boolean to indicate if
authentication to that server is allowed; if False only anonymous
bind will be used.
Per RFC 4516. Use [('*', False)] to allow any host with anonymous
bind, use [('*', True)] to allow any host with same authentication of
Server.
"""
_message_counter = 0
_message_id_lock = Lock() # global lock for message_id shared by all Server objects
def __init__(self,
host,
port=None,
use_ssl=False,
allowed_referral_hosts=None,
get_info=SCHEMA,
tls=None,
formatter=None,
connect_timeout=None,
mode=IP_V6_PREFERRED,
validator=None):
self.ipc = False
url_given = False
host = host.strip()
if host.lower().startswith('ldap://'):
self.host = host[7:]
use_ssl = False
url_given = True
elif host.lower().startswith('ldaps://'):
self.host = host[8:]
use_ssl = True
url_given = True
elif host.lower().startswith('ldapi://') and unix_socket_available:
self.ipc = True
use_ssl = False
url_given = True
elif host.lower().startswith('ldapi://') and not unix_socket_available:
raise LDAPSocketOpenError('LDAP over IPC not available - UNIX sockets non present')
else:
self.host = host
if self.ipc:
if str is bytes: # Python 2
self.host = unquote(host[7:]).decode('utf-8')
else: # Python 3
self.host = unquote(host[7:]) # encoding defaults to utf-8 in python3
self.port = None
elif ':' in self.host and self.host.count(':') == 1:
hostname, _, hostport = self.host.partition(':')
try:
port = int(hostport) or port
except ValueError:
if log_enabled(ERROR):
log(ERROR, 'port <%s> must be an integer', port)
raise LDAPInvalidPortError('port must be an integer')
self.host = hostname
elif url_given and self.host.startswith('['):
hostname, sep, hostport = self.host[1:].partition(']')
if sep != ']' or not self._is_ipv6(hostname):
if log_enabled(ERROR):
log(ERROR, 'invalid IPv6 server address for <%s>', self.host)
raise LDAPInvalidServerError()
if len(hostport):
if not hostport.startswith(':'):
if log_enabled(ERROR):
log(ERROR, 'invalid URL in server name for <%s>', self.host)
raise LDAPInvalidServerError('invalid URL in server name')
if not hostport[1:].isdecimal():
if log_enabled(ERROR):
log(ERROR, 'port must be an integer for <%s>', self.host)
raise LDAPInvalidPortError('port must be an integer')
port = int(hostport[1:])
self.host = hostname
elif not url_given and self._is_ipv6(self.host):
pass
elif self.host.count(':') > 1:
if log_enabled(ERROR):
log(ERROR, 'invalid server address for <%s>', self.host)
raise LDAPInvalidServerError()
if not self.ipc:
self.host.rstrip('/')
if not use_ssl and not port:
port = 389
elif use_ssl and not port:
port = 636
port_err = check_port(port)
if port_err:
if log_enabled(ERROR):
log(ERROR, port_err)
raise LDAPInvalidPortError(port_err)
self.port = port
if allowed_referral_hosts is None: # defaults to any server with authentication
allowed_referral_hosts = [('*', True)]
if isinstance(allowed_referral_hosts, SEQUENCE_TYPES):
self.allowed_referral_hosts = []
for referral_host in allowed_referral_hosts:
if isinstance(referral_host, tuple):
if isinstance(referral_host[1], bool):
self.allowed_referral_hosts.append(referral_host)
elif isinstance(allowed_referral_hosts, tuple):
if isinstance(allowed_referral_hosts[1], bool):
self.allowed_referral_hosts = [allowed_referral_hosts]
else:
self.allowed_referral_hosts = []
self.ssl = True if use_ssl else False
if tls and not isinstance(tls, Tls):
if log_enabled(ERROR):
log(ERROR, 'invalid tls specification: <%s>', tls)
raise LDAPInvalidTlsSpecificationError('invalid Tls object')
self.tls = Tls() if self.ssl and not tls else tls
if not self.ipc:
if self._is_ipv6(self.host):
self.name = ('ldaps' if self.ssl else 'ldap') + '://[' + self.host + ']:' + str(self.port)
else:
self.name = ('ldaps' if self.ssl else 'ldap') + '://' + self.host + ':' + str(self.port)
else:
self.name = host
self.get_info = get_info
self._dsa_info = None
self._schema_info = None
self.dit_lock = Lock()
self.custom_formatter = formatter
self.custom_validator = validator
self._address_info = [] # property self.address_info resolved at open time (or when check_availability is called)
self._address_info_resolved_time = datetime(MINYEAR, 1, 1) # smallest date ever
self.current_address = None
self.connect_timeout = connect_timeout
self.mode = mode
self.get_info_from_server(None) # load offline schema if needed
if log_enabled(BASIC):
log(BASIC, 'instantiated Server: <%r>', self)
@staticmethod
def _is_ipv6(host):
try:
socket.inet_pton(socket.AF_INET6, host)
except (socket.error, AttributeError, ValueError):
return False
return True
def __str__(self):
if self.host:
s = self.name + (' - ssl' if self.ssl else ' - cleartext') + (' - unix socket' if self.ipc else '')
else:
s = object.__str__(self)
return s
def __repr__(self):
r = 'Server(host={0.host!r}, port={0.port!r}, use_ssl={0.ssl!r}'.format(self)
r += '' if not self.allowed_referral_hosts else ', allowed_referral_hosts={0.allowed_referral_hosts!r}'.format(self)
r += '' if self.tls is None else ', tls={0.tls!r}'.format(self)
r += '' if not self.get_info else ', get_info={0.get_info!r}'.format(self)
r += '' if not self.connect_timeout else ', connect_timeout={0.connect_timeout!r}'.format(self)
r += '' if not self.mode else ', mode={0.mode!r}'.format(self)
r += ')'
return r
@property
def address_info(self):
conf_refresh_interval = get_config_parameter('ADDRESS_INFO_REFRESH_TIME')
if not self._address_info or (datetime.now() - self._address_info_resolved_time).seconds > conf_refresh_interval:
# converts addresses tuple to list and adds a 6th parameter for availability (None = not checked, True = available, False=not available) and a 7th parameter for the checking time
addresses = None
try:
if self.ipc:
addresses = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, None, self.host, None)]
else:
if self.mode == IP_V4_ONLY:
addresses = socket.getaddrinfo(self.host, self.port, socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG | socket.AI_V4MAPPED)
elif self.mode == IP_V6_ONLY:
addresses = socket.getaddrinfo(self.host, self.port, socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG | socket.AI_V4MAPPED)
else:
addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_ADDRCONFIG | socket.AI_V4MAPPED)
except (socket.gaierror, AttributeError):
pass
if not addresses: # if addresses not found or raised an exception (for example for bad flags) tries again without flags
try:
if self.mode == IP_V4_ONLY:
addresses = socket.getaddrinfo(self.host, self.port, socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
elif self.mode == IP_V6_ONLY:
addresses = socket.getaddrinfo(self.host, self.port, socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP)
else:
addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.IPPROTO_TCP)
except socket.gaierror:
pass
if addresses:
self._address_info = [list(address) + [None, None] for address in addresses]
self._address_info_resolved_time = datetime.now()
else:
self._address_info = []
self._address_info_resolved_time = datetime(MINYEAR, 1, 1) # smallest date
if log_enabled(BASIC):
for address in self._address_info:
log(BASIC, 'address for <%s> resolved as <%r>', self, address[:-2])
return self._address_info
def update_availability(self, address, available):
cont = 0
while cont < len(self._address_info):
if self.address_info[cont] == address:
self._address_info[cont][5] = True if available else False
self._address_info[cont][6] = datetime.now()
break
cont += 1
def reset_availability(self):
for address in self._address_info:
address[5] = None
address[6] = None
def check_availability(self, source_address=None, source_port=None, source_port_list=None):
"""
Tries to open, connect and close a socket to specified address and port to check availability.
Timeout in seconds is specified in CHECK_AVAILABITY_TIMEOUT if not specified in
the Server object.
If specified, use a specific address, port, or list of possible ports, when attempting to check availability.
NOTE: This will only consider multiple ports from the source port list if the first ones we try to bind to are
already in use. This will not attempt using different ports in the list if the server is unavailable,
as that could result in the runtime of check_availability significantly exceeding the connection timeout.
"""
source_port_err = check_port_and_port_list(source_port, source_port_list)
if source_port_err:
if log_enabled(ERROR):
log(ERROR, source_port_err)
raise LDAPInvalidPortError(source_port_err)
# using an empty string to bind a socket means "use the default as if this wasn't provided" because socket
# binding requires that | |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module define a WulffShape class to generate the Wulff shape from
a lattice, a list of indices and their corresponding surface energies,
and the total area and volume of the wulff shape,the weighted surface energy,
the anisotropy and shape_factor can also be calculated.
In support of plotting from a given view in terms of miller index.
The lattice is from the conventional unit cell, and (hkil) for hexagonal
lattices.
If you use this code extensively, consider citing the following:
<NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.
(2016). Surface energies of elemental crystals. Scientific Data.
"""
from pymatgen.core.structure import Structure
from pymatgen.util.coord import get_angle
import numpy as np
import scipy as sp
from scipy.spatial import ConvexHull
import logging
import warnings
__author__ = '<NAME>, <NAME>, <NAME>'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__date__ = 'May 5 2016'
logger = logging.getLogger(__name__)
def hkl_tuple_to_str(hkl):
"""
Prepare for display on plots
"(hkl)" for surfaces
Agrs:
hkl: in the form of [h, k, l] or (h, k, l)
"""
str_format = '($'
for x in hkl:
if x < 0:
str_format += '\\overline{' + str(-x) + '}'
else:
str_format += str(x)
str_format += '$)'
return str_format
def get_tri_area(pts):
"""
Given a list of coords for 3 points,
Compute the area of this triangle.
Args:
pts: [a, b, c] three points
"""
a, b, c = pts[0], pts[1], pts[2]
v1 = np.array(b) - np.array(a)
v2 = np.array(c) - np.array(a)
area_tri = abs(sp.linalg.norm(sp.cross(v1, v2)) / 2)
return area_tri
class WulffFacet:
"""
Helper container for each Wulff plane.
"""
def __init__(self, normal, e_surf, normal_pt, dual_pt, index, m_ind_orig,
miller):
"""
:param normal:
:param e_surf:
:param normal_pt:
:param dual_pt:
:param index:
:param m_ind_orig:
:param miller:
"""
self.normal = normal
self.e_surf = e_surf
self.normal_pt = normal_pt
self.dual_pt = dual_pt
self.index = index
self.m_ind_orig = m_ind_orig
self.miller = miller
self.points = []
self.outer_lines = []
class WulffShape:
"""
Generate Wulff Shape from list of miller index and surface energies,
with given conventional unit cell.
surface energy (Jm^2) is the length of normal.
Wulff shape is the convex hull.
Based on:
http://scipy.github.io/devdocs/generated/scipy.spatial.ConvexHull.html
Process:
1. get wulff simplices
2. label with color
3. get wulff_area and other properties
.. attribute:: debug (bool)
.. attribute:: alpha
transparency
.. attribute:: color_set
.. attribute:: grid_off (bool)
.. attribute:: axis_off (bool)
.. attribute:: show_area
.. attribute:: off_color
color of facets off wulff
.. attribute:: structure
Structure object, input conventional unit cell (with H ) from lattice
.. attribute:: miller_list
list of input miller index, for hcp in the form of hkil
.. attribute:: hkl_list
modify hkill to hkl, in the same order with input_miller
.. attribute:: e_surf_list
list of input surface energies, in the same order with input_miller
.. attribute:: lattice
Lattice object, the input lattice for the conventional unit cell
.. attribute:: facets
[WulffFacet] for all facets considering symm
.. attribute:: dual_cv_simp
simplices from the dual convex hull (dual_pt)
.. attribute:: wulff_pt_list
.. attribute:: wulff_cv_simp
simplices from the convex hull of wulff_pt_list
.. attribute:: on_wulff
list for all input_miller, True is on wulff.
.. attribute:: color_area
list for all input_miller, total area on wulff, off_wulff = 0.
.. attribute:: miller_area
($hkl$): area for all input_miller
"""
def __init__(self, lattice, miller_list, e_surf_list, symprec=1e-5):
"""
Args:
lattice: Lattice object of the conventional unit cell
miller_list ([(hkl), ...]: list of hkl or hkil for hcp
e_surf_list ([float]): list of corresponding surface energies
symprec (float): for recp_operation, default is 1e-5.
"""
if any([se < 0 for se in e_surf_list]):
warnings.warn("Unphysical (negative) surface energy detected.")
self.color_ind = list(range(len(miller_list)))
self.input_miller_fig = [hkl_tuple_to_str(x) for x in miller_list]
# store input data
self.structure = Structure(lattice, ["H"], [[0, 0, 0]])
self.miller_list = tuple([tuple(x) for x in miller_list])
self.hkl_list = tuple([(x[0], x[1], x[-1]) for x in miller_list])
self.e_surf_list = tuple(e_surf_list)
self.lattice = lattice
self.symprec = symprec
# 2. get all the data for wulff construction
# get all the surface normal from get_all_miller_e()
self.facets = self._get_all_miller_e()
logger.debug(len(self.facets))
# 3. consider the dual condition
dual_pts = [x.dual_pt for x in self.facets]
dual_convex = ConvexHull(dual_pts)
dual_cv_simp = dual_convex.simplices
# simplices (ndarray of ints, shape (nfacet, ndim))
# list of [i, j, k] , ndim = 3
# i, j, k: ind for normal_e_m
# recalculate the dual of dual, get the wulff shape.
# conner <-> surface
# get cross point from the simplices of the dual convex hull
wulff_pt_list = [self._get_cross_pt_dual_simp(dual_simp)
for dual_simp in dual_cv_simp]
wulff_convex = ConvexHull(wulff_pt_list)
wulff_cv_simp = wulff_convex.simplices
logger.debug(", ".join([str(len(x)) for x in wulff_cv_simp]))
# store simplices and convex
self.dual_cv_simp = dual_cv_simp
self.wulff_pt_list = wulff_pt_list
self.wulff_cv_simp = wulff_cv_simp
self.wulff_convex = wulff_convex
self.on_wulff, self.color_area = self._get_simpx_plane()
miller_area = []
for m, in_mill_fig in enumerate(self.input_miller_fig):
miller_area.append(
in_mill_fig + ' : ' + str(round(self.color_area[m], 4)))
self.miller_area = miller_area
def _get_all_miller_e(self):
"""
from self:
get miller_list(unique_miller), e_surf_list and symmetry
operations(symmops) according to lattice
apply symmops to get all the miller index, then get normal,
get all the facets functions for wulff shape calculation:
|normal| = 1, e_surf is plane's distance to (0, 0, 0),
normal[0]x + normal[1]y + normal[2]z = e_surf
return:
[WulffFacet]
"""
all_hkl = []
color_ind = self.color_ind
planes = []
recp = self.structure.lattice.reciprocal_lattice_crystallographic
recp_symmops = self.lattice.get_recp_symmetry_operation(self.symprec)
for i, (hkl, energy) in enumerate(zip(self.hkl_list,
self.e_surf_list)):
for op in recp_symmops:
miller = tuple([int(x) for x in op.operate(hkl)])
if miller not in all_hkl:
all_hkl.append(miller)
normal = recp.get_cartesian_coords(miller)
normal /= sp.linalg.norm(normal)
normal_pt = [x * energy for x in normal]
dual_pt = [x / energy for x in normal]
color_plane = color_ind[divmod(i, len(color_ind))[1]]
planes.append(WulffFacet(normal, energy, normal_pt,
dual_pt, color_plane, i, hkl))
# sort by e_surf
planes.sort(key=lambda x: x.e_surf)
return planes
def _get_cross_pt_dual_simp(self, dual_simp):
"""
|normal| = 1, e_surf is plane's distance to (0, 0, 0),
plane function:
normal[0]x + normal[1]y + normal[2]z = e_surf
from self:
normal_e_m to get the plane functions
dual_simp: (i, j, k) simplices from the dual convex hull
i, j, k: plane index(same order in normal_e_m)
"""
matrix_surfs = [self.facets[dual_simp[i]].normal for i in range(3)]
matrix_e = [self.facets[dual_simp[i]].e_surf for i in range(3)]
cross_pt = sp.dot(sp.linalg.inv(matrix_surfs), matrix_e)
return cross_pt
def _get_simpx_plane(self):
"""
Locate the plane for simpx of on wulff_cv, by comparing the center of
the simpx triangle with the plane functions.
"""
on_wulff = [False] * len(self.miller_list)
surface_area = [0.0] * len(self.miller_list)
for simpx in self.wulff_cv_simp:
pts = [self.wulff_pt_list[simpx[i]] for i in range(3)]
center = np.sum(pts, 0) / 3.0
# check whether the center of the simplices is on one plane
for plane in self.facets:
abs_diff = abs(np.dot(plane.normal, center) - plane.e_surf)
if abs_diff < 1e-5:
on_wulff[plane.index] = True
surface_area[plane.index] += get_tri_area(pts)
plane.points.append(pts)
plane.outer_lines.append([simpx[0], simpx[1]])
plane.outer_lines.append([simpx[1], simpx[2]])
plane.outer_lines.append([simpx[0], simpx[2]])
# already find the plane, move to the next simplices
break
for plane in self.facets:
plane.outer_lines.sort()
plane.outer_lines = [line for line in plane.outer_lines
if plane.outer_lines.count(line) != 2]
return on_wulff, surface_area
def _get_colors(self, color_set, alpha, off_color, custom_colors={}):
"""
assign colors according to the surface energies of on_wulff facets.
return:
(color_list, color_proxy, color_proxy_on_wulff, miller_on_wulff,
e_surf_on_wulff_list)
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
color_list = [off_color] * len(self.hkl_list)
color_proxy_on_wulff = []
miller_on_wulff = []
e_surf_on_wulff = [(i, e_surf)
for i, e_surf in enumerate(self.e_surf_list)
if self.on_wulff[i]]
c_map = plt.get_cmap(color_set)
e_surf_on_wulff.sort(key=lambda x: x[1], reverse=False)
e_surf_on_wulff_list = [x[1] for x in e_surf_on_wulff]
if len(e_surf_on_wulff) > 1:
cnorm = mpl.colors.Normalize(vmin=min(e_surf_on_wulff_list),
vmax=max(e_surf_on_wulff_list))
else:
# if there is only one hkl on wulff, choose the color of the median
cnorm = mpl.colors.Normalize(vmin=min(e_surf_on_wulff_list) - 0.1,
vmax=max(e_surf_on_wulff_list) + 0.1)
scalar_map = mpl.cm.ScalarMappable(norm=cnorm, cmap=c_map)
for i, e_surf in e_surf_on_wulff:
color_list[i] = scalar_map.to_rgba(e_surf, alpha=alpha)
if tuple(self.miller_list[i]) in custom_colors.keys():
color_list[i] = custom_colors[tuple(self.miller_list[i])]
color_proxy_on_wulff.append(
plt.Rectangle((2, 2), 1, 1, fc=color_list[i], alpha=alpha))
miller_on_wulff.append(self.input_miller_fig[i])
scalar_map.set_array([x[1] for x in e_surf_on_wulff])
color_proxy = [plt.Rectangle((2, 2), 1, 1, fc=x, alpha=alpha)
for x in color_list]
return color_list, color_proxy, color_proxy_on_wulff, miller_on_wulff, e_surf_on_wulff_list
def show(self, *args, **kwargs):
r"""
Show the Wulff plot.
Args:
*args: Passed to get_plot.
**kwargs: Passed to get_plot.
"""
self.get_plot(*args, **kwargs).show()
def get_line_in_facet(self, facet):
"""
Returns the sorted pts in a facet used to | |
<filename>source_code/patent_tracker.py
import asyncio
import time
import os
from datetime import datetime, date, timedelta
import aiohttp
from bs4 import BeautifulSoup
from typing import Union, List, Tuple, Iterator, Iterable, Dict
import openpyxl
from default_style import info_style, field_style, record_style, sheet_style
import string
api_key_path = "../input_setting/api_key.txt"
input_path = "../input_setting/input.xlsx"
output_directory = "../output"
## FYI ##
#current version: v1.0
#Comments & Docstring: English (for developers)
#print message: Korean (for consumers)
#TODO before 2.0:
# 1. Crwaling by Selenium -> (despite it's heaviness) Works without api_key.
# 2. More output formats: to_img(), to_csv()
class PatentTracker():
"""Creates PatentTracker instance.
Consists of 4 kinds of methods
I. GETTERS AND SETTERS: api_key, since, before, targets, results
II. SETTINGS AND INPUT: read_and_check_api_key, read_input
III. TRACKING: track_patents
IV. OUTPUT: to_excel and private methods
-- Quick Example --
tracker = PatentTracker()
time1 = time.time()
loop = asyncio.get_event_loop()
loop.run_until_complete(tracker.track_patents())
loop.close
time2 = time.time()
print(f"total time taken: {time2-time1}")
tracker.to_excel()
"""
def __init__(self):
self.__api_key: str = ""
self.__targets: Iterator = (i for i in [])
self.__since:date = date(1970,1,1)
self.__before:date = datetime.now().date()
self.__additional_info_dict: Dict = {}
self.__results: List = []
if os.path.exists(input_path): self.read_input(input_path)
else: raise FileNotFoundError(f"{input_path} 를 찾을 수 없습니다.")
if os.path.exists(api_key_path): self.read_and_check_api_key(api_key_path)
else: raise FileNotFoundError(f"{api_key_path}를 찾을 수 없습니다.")
############### I. GETTERS AND SETTERS ###############
@property
def api_key(self):
return self.__api_key
@api_key.setter
def api_key(self, api_key:str):
self.__api_key = api_key.strip()
print(f"tracker.api_key is now {self.__api_key}")
@property
def since(self):
return self.__since
@since.setter
def since(self, new_since):
if isinstance(new_since, datetime):
new_since = datetime.strftime(new_since, "%Y/%m/%d")
new_since = "/".join(new_since.split(" "))
new_since = "/".join(new_since.split("."))
new_since = "/".join(new_since.split("-"))
self.__since = datetime.strptime(new_since, "%Y/%m/%d").date()
print(f"tracker.since is set as {self.__since}")
@property
def before(self):
return self.__before
@before.setter
def before(self, new_before):
if isinstance(new_before, datetime):
new_before = datetime.strftime(new_before, "%Y/%m/%d")
new_before = "/".join(new_before.split(" "))
new_before = "/".join(new_before.split("."))
new_before = "/".join(new_before.split("-"))
self.__before = datetime.strptime(new_before, "%Y/%m/%d").date()
print(f"tracker.before is set as {self.__before}")
@property
def targets(self):
return self.__targets
@targets.setter
def targets(self, targets: List[str]):
weird_input = [target for target in targets if len(target) != 13]
if weird_input:
raise ValueError(
# "Some input elements does not satisfy condition." + "\n" +
# "Please check following elements and make sure they are 13 digit" +"\n" +
# f"{weird_input}"
"일부 타겟 정보가 올바르지 않습니다.\n"+
"아래 출원번호값들을 확인 해 주세요.\n"+
f"{weird_input}\n"+
"출원번호는 13자리 숫자 값이어야 합니다."
)
targets_gen = (target if len(target)==13 else "".join(target.split("-")) for target in targets)
self.__targets = targets_gen
print("타겟 입력 성공.")
# print("successfully fetched targets. ")
@property
def additional_info_dict(self):
return self.__additional_info_dict
@property
def results(self):
return self.__results
############### II. LOAD SETTINGS, INPUT ###############
def read_and_check_api_key(self, path:str, verbose=False):
with open(path, "r") as text:
api_key = text.readline()
if verbose: print("API key를 api_key.txt 로부터가져왔습니다.")
#print(f"Read api_key from api_key.txt as : {api_key}")
if self.check_api_key(api_key):
self.api_key = api_key
else:
print("읽어온 API key가 유효하지 않습니다. 기존 값으로 유지됩니다.")
print(f"읽어온 key 값: {api_key}")
print(f"현재 API key: {self.__api_key}")
#TODO:
def check_api_key(self, api_key):
# print(f"checking api_key vailidy")
# import urllib
# url = 'http://plus.kipris.or.kr/openapi/rest/RelatedDocsonfilePatService/relatedDocsonfileInfo'
# query = f'?applicationNumber=1019940701319&accessKey={api_key}'
# urllib.request(url+query)
return True
def read_input(self, input_path:str=input_path, verbose=False):
"""Reads file that contains input info and assigns properties using setters.
self.__targets, self.since, self.before will be assigned.
Argument(s)
- input_path:str = input_path
"""
targets:List[str] = []
additional_info_dict:Dict = {}
if os.path.exists(input_path):
wb = openpyxl.load_workbook(input_path) #Read xl file
if verbose: print(f"{input_path}로 부터 인풋 값을 불러옵니다.")
# Fetching application numbers from sheet 'targets'
try:target_sheet = wb['targets']
except KeyError:
print(f"{input_path} 에 'targets' 시트가 존재하지 않습니다.")
print("'Sheet2' 시트로 부터 타겟 정보를 불러옵니다.")
target_sheet = wb['Sheet1']
for row in target_sheet.iter_rows(min_row=3, max_row=5000, max_col=6, values_only=True):
if row[0] == None: continue
targets.append(str(row[0])) # application number.
additional_info_dict[str(row[0])] = row[1:] # additional info of target patent.
# print(f"출원번호 {row[0]} 및 추가정보를 읽어왔습니다.")
self.targets = targets #saved as generator
if verbose:
print(f"targets[:3]: {targets[:3]}")
print(f"self.targets: {self.targets}")
self.__additional_info_dict = additional_info_dict
if verbose:
print("타겟 정보를 성공적으로 불러왔습니다.")
print(f"타겟 수: {len(targets)}")
print(f"첫번째 타겟 출원번호: {list(self.additional_info_dict.keys())[0]}")
print(f"첫번째 타겟 부가정보: {list(self.additional_info_dict.values())[0]}")
# Reading date info from sheet 'dates'
try:dates_sheet = wb['dates']
except KeyError:
print(f"{input_path} 에 'dates' 시트가 존재하지 않습니다.")
# print("Current excel file doesn't have a sheet named 'dates'")
print(f"'Sheet2' 시트로 부터 날짜구간 정보를 불러옵니다.")
# print("worksheet 'Sheet2' will be open instead of worksheet 'dates'")
dates_sheet = wb['Sheet2']
last_n_day = abs(dates_sheet['C2'].value)
if last_n_day:
self.__before = datetime.now().date()
self.__since = self.__before - timedelta(days=last_n_day)
else:
self.since = dates_sheet['C3'].value
self.before = dates_sheet['C4'].value
else:
print(f"{input_path} 파일이 존재하지 않습니다.")
# print(f"file does not exist in the path: {input_path}")
############### III. TRACKING ###############
async def track_patents(self, verbose=False):
"""Asynchronously tracks patents in self.targets
Simply operates by repeating self.track_patent()
Saves a list containing tuples at self.__results.
[(application_number, result_2D_table), (...), ...]
"""
# returned values of each task will be appended into an empty list and then returned.
futures = [asyncio.ensure_future(self.track_patent(patent, verbose=verbose)) for patent in self.targets]
results = await asyncio.gather(*futures)
## this code will work synchronously -> compare with async
# results = []
# for patent in self.targets:
# results.append(await self.track_patent(patent))
# print(results)
self.__results = results
if verbose:
print(f"특허 트래킹 완료.")
print(f"첫 특허의 출원번호: {results[0][0]}")
print(f"첫 특허의 결과 테이블 일부: {results[0][1][:3]}")
async def track_patent(self, patent, verbose=False):
""" Requests information of a patent and filter_out unneccesary information.
Returns a 2-dimensional list.
"""
records = await self.request_and_parse_kipris_API(application_number = patent, api_key = self.api_key, verbose=verbose)
#print(f"records: {records}")
result_table = await self.filter_records(records, verbose=verbose)
# self.__result_dict[patent] = result_table
return (patent, result_table)
async def request_and_parse_kipris_API(self, application_number, api_key, verbose=False):
"""Request kipris REST API (asynchronously) and parse data using Beautifulsoup.
soup.findall("relateddocsonfileInfo") will be returned.
"""
url = 'http://plus.kipris.or.kr/openapi/rest/RelatedDocsonfilePatService/relatedDocsonfileInfo'
query = f'?applicationNumber={application_number}&accessKey={api_key}'
time1 = time.time()
if verbose: print(f"request for patent:{application_number} started.")
## request by requests and loop.run_in_executor
# import requests
# loop_ = asyncio.get_event_loop()
# response = await loop_.run_in_executor(None, requests.get, url+query)
# text= response.text
# request by aiohttp
async with aiohttp.ClientSession() as session:
async with session.get(url+query) as response:
text = await response.text()
time2 = time.time()
if verbose: print(f"request for patent:{application_number} finished. time:{time2-time1}")
# parse
soup = BeautifulSoup(text, "xml")
records = soup.find_all('relateddocsonfileInfo')
if records == []:
print("No records detected. Please check result message ")
print(f"result_message: {soup.find('resultMsg').text}")
return records
async def filter_records(self, records, verbose=False):
""" Filters out unnecessary records and fields.
Returns a 2-dimensional list.
"""
filtered_records = []
time1 = time.time()
for i, record in enumerate(records):
ymd = record.documentDate.text
record_date = date(int(ymd[:4]), int(ymd[4:6]), int(ymd[6:8]))
if record_date < self.since or record_date > self.before:
continue
else:
filtered_records.append([
i+1, #n-th record
record.documentTitle.text, #서류명
record.documentDate.text, #접수/발송 일자
record.status.text, #처리상태
record.step.text, #단계 (출원/등록 등.)
record.trialNumber.text, #심판 번호
record.registrationNumber.text #등록 번호
])
time2 = time.time()
if verbose: print(f"filtering records from a patent finished. time:{time2-time1}")
return filtered_records
############### OUTPUT ###############
def to_excel(self, verbose=False):
"""Saves result as an excel file(.xlsx)
"""
print(self.results)
if self.results == []:
print("결과 값이 없습니다. 엑셀파일을 생성하지 않고 종료합니다.")
#print("No results exists. Execute self.track_patents() to get results ")
return
# Create excel file
if verbose: print("엑셀 파일 작성을 시작합니다.")
result_wb = openpyxl.Workbook()
result_ws = result_wb.active
result_ws.title = 'result'
# Apply sheet_style
for letter in string.ascii_uppercase[:6]:
result_ws.column_dimensions[letter] = sheet_style[f"col_{letter}_width"]
current_row = 1
# Write data
for result in self.results:
application_number = result[0]
result_table = result[1]
self._write_title(result_ws, current_row, title="출원번호: "+ application_number)
current_row += 1
self._write_info(result_ws, current_row, additional_info=self.additional_info_dict[application_number])
current_row += 1
self._write_fields(result_ws, current_row)
current_row += 1
self._write_records(result_ws, current_row, records=result_table)
current_row += len(result_table)+2
# print(f"출원번호 {application_number} 의 결과테이블 작성 완료.")
#Save
timestamp = time.strftime("%y%m%d_%H%M%S")
output_name = output_directory + f"/output_{timestamp}.xlsx"
result_wb.save(output_name)
if verbose: print(f'엑셀 파일 {output_name} 저장을 완료했습니다.')
def _write_title(self, result_ws, current_row, title):
# default_title: application number.
result_ws.merge_cells(f'A{current_row}:F{current_row}')
result_ws[f'A{current_row}'].value = title
result_ws[f'A{current_row}'].style = info_style
def _write_info(self, result_ws, current_row, additional_info):
#result_ws[f'A{current_row}'].value = info_0
for i,j in enumerate('BCDEF'):
result_ws[f'{j}{current_row}'].value = additional_info[i] #info: from input.xlsx
for row in result_ws[f"A{current_row}":f"F{current_row}"]:
for cell in row:
cell.style = info_style
def _write_fields(self, result_ws, current_row):
fields = ["번호", "서류명", "접수/발송일자", "처리단계", "단계", "심판/등록 번호"]
for i,j in zip(fields, 'ABCDEF'):
result_ws[f'{j}{current_row}'].value = i
for row in result_ws[f"A{current_row}":f"F{current_row}"]:
for cell in row:
cell.style=field_style
# records = 2D array (list) n*5
def _write_records(self, result_ws, current_row, records):
for row in records:
number, document_title, document_date, status, step, trial_number, registration_number = row
for i,j in zip(row[:5],'ABCDE'): #번호, 서류명, 접수/발송일자, 처리상태, 단계
result_ws[f'{j}{current_row}'].value = i
if trial_number !=' ':
result_ws[f'F{current_row}'].value = trial_number #심판번호
elif registration_number !=' ':
result_ws[f'F{current_row}'].value = registration_number | |
redundancy e.g.
# Sunday...Increasing clouds.
# Sunday Night...Increasing clouds.
#
#If the previous period had increasing or decreasing wording, return 0
# Otherwise, return 1
# Check to see if previous period had increasing or decreasing wording
component = node.getComponent()
prevComp = component.getPrev()
if prevComp is not None:
# Look at the sky_phrase
skyWords = self.findWords(
tree, prevComp, "Sky", node.getAreaLabel(),
phraseList=[node.getAncestor('name')], phraseLevel=1)
if skyWords is not None:
if skyWords.find("increasing") >= 0 or \
skyWords.find("decreasing") >= 0:
return 0
return 1
return 1
"""
### For headlinesTiming
headlinesTiming = """
def allowedHeadlines(self):
allActions = []
return [
('LocalHazard1', allActions, 'Tropical'),
]
def _preProcessArea(self, fcst, editArea, areaLabel, argDict):
# This is the header for an edit area combination
areaHeader = self.makeAreaHeader(
argDict, areaLabel, self._issueTime, self._expireTime,
self._areaDictionary, self._defaultEditAreas)
fcst = fcst + areaHeader
# get the hazards text
self._hazards = argDict['hazards']
self._combinations = argDict["combinations"]
headlines = self.generateProduct("Hazards", argDict, area = editArea,
areaLabel=areaLabel,
timeRange = self._timeRange)
headlines_local = self.generateProduct("Headlines", argDict, area = editArea,
areaLabel=areaLabel,
timeRange = self._timeRange)
fcst = fcst + headlines + headlines_local
return fcst
"""
headlinesTiming1 = """
def headlinesTiming(self, tree, node, key, timeRange, areaLabel, issuanceTime):
return "DAY_NIGHT_ONLY", "DAY_NIGHT_ONLY", 0
"""
headlinesTiming2 = """
def headlinesTiming(self, tree, node, key, timeRange, areaLabel, issuanceTime):
return "FUZZY", "FUZZY", 3
"""
headlinesTiming3 = """
def headlinesTiming(self, tree, node, key, timeRange, areaLabel, issuanceTime):
return "EXPLICIT", "FUZZY", 3
"""
headlinesTiming4 = """
def headlinesTiming(self, tree, node, key, timeRange, areaLabel, issuanceTime):
return "EXPLICIT", "EXPLICIT", 3
"""
localHeadlines1 = """
def allowedHazards(self):
allActions = ["NEW", "EXA", "EXB", "EXT", "UPG", "CAN", "CON", "EXP"]
return [
('WS.W', allActions, 'WinterWx'), # WINTER STORM WARNING
('LocalHazard1', allActions, 'Misc'),
]
"""
hoursSChc1 = """#Definition["hoursSChcEnds"] = 24"""
hoursSChc2 = """Definition["hoursSChcEnds"] = 84"""
null_distinct = """
def null_alwaysDistinct_flag_dict(self, tree, node):
# If 1, null values will always be considered distinct from non-null values
# when combining subphrases. Thus, with a null value of 5,
# you may end up with phrases such as:
# Winds less than 5 mph becoming east 5 mph.
# If set to 0, the determination will be made based on the scalar or
# vector difference as with non-null values.
# (See scalar_difference_nlValue_dict and vector_mag_difference_nlValue_dict)
return {
"otherwise": 1,
"Wind": 0,
}
"""
tempCov = """
def temporalCoverage_dict(self, parmHisto, timeRange, componentName):
# This is temporalCoverage percentage by weather element
# Used by temporalCoverage_flag
return {
"LAL": 0,
"MinRH": 0,
"MaxRH": 0,
"MinT": 1,
"MaxT": 1,
"Haines": 0,
"PoP" : 20,
"Hazards" : 0,
}
"""
CWFPeriod = """
def _issuance_list(self, argDict):
# This method sets up configurable issuance times with associated
# narrative definitions. See the Text Product User Guide for documentation.
if self._definition["includeEveningPeriod"] == 1:
narrativeDefAM = [
("CWFPeriod", "period1"),
## ("", 12),# ("", 12), ("", 12), ("", 12),
## ("", 12),
## ("CWFExtended", 24), ("CWFExtended", 24)
]
narrativeDefPM = [
("", "period1"),
("", 12), ("", 12), ("", 12), ("", 12),
("", 12), ("", 12),
("CWFExtended", 24), ("CWFExtended", 24)
]
else:
narrativeDefAM = [
("", "period1"),
("", 12), ("", 12), ("", 12), ("", 24),
("CWFExtended", 24), ("CWFExtended", 24)
]
narrativeDefPM = [
("", "period1"),
("", 12), ("", 12), ("", 12), ("", 12),
("CWFExtended", 24),
("CWFExtended", 24), ("CWFExtended", 24)
]
return [
("Morning", self.DAY(), self.NIGHT(), "issuanceHour + 13",
".TODAY...", "early", "late", 1, narrativeDefAM),
("Morning with Pre-1st Period", "issuanceHour", self.NIGHT(),
"issuanceHour + 13", ".TODAY...", "early", "late", 1,
narrativeDefAM),
("Morning Update", "issuanceHour", self.NIGHT(),
"issuanceHour + 13", ".REST OF TODAY...", "early in the morning",
"late in the afternoon", 1, narrativeDefAM),
("Afternoon Update", "issuanceHour", self.NIGHT(), "issuanceHour + 13",
".REST OF TODAY...", "early in the morning", "late in the afternoon",
1, narrativeDefAM),
# End times are tomorrow:
("Afternoon", self.NIGHT(), 24 + self.DAY(), "issuanceHour + 13",
".TONIGHT...", "late in the night", "early in the evening", 1, narrativeDefPM),
("Afternoon with Pre-1st Period", "issuanceHour", 24 + self.DAY(),
"issuanceHour + 13", ".TONIGHT...", "late in the night", "early in the evening", 1,
narrativeDefPM),
("Evening Update", "issuanceHour", 24 + self.DAY(), "issuanceHour + 13",
".REST OF TONIGHT...", "early in the morning", "early in the evening", 1,
narrativeDefPM),
# For the early morning update, this produces:
# Rest of Tonight:
# Monday
# Monday Night
("Early Morning Update", "issuanceHour", self.DAY(), "issuanceHour + 13",
".REST OF TONIGHT...", "early in the morning", "late in the afternoon",
0, narrativeDefPM),
# Alternative
# For the early morning update, this produces:
# Early this morning:
# Today
# Tonight
#("Evening Update", "issuanceHour", 24 + self.DAY(), "issuanceHour + 13",
# ".REST OF TONIGHT...", "late in the night", "early in the evening",
# 1, narrativeDefPM),
#("Early Morning Update", "issuanceHour", self.DAY(), "issuanceHour + 13",
# ".EARLY THIS MORNING...", "early in the morning", "late in the afternoon",
# 1, narrativeDefPM),
]
def CWFPeriod(self):
return {
"type": "component",
"methodList": [
self.assemblePhrases,
self.wordWrap,
],
"analysisList": [
# NOTE: Choose from the following analysis options.
# Do not remove the "vectorMinMax" analysis for
# "Wind". This is necessary to get an absolute max if
# the useWindsForGusts flag is on.
# Use the following if you want moderated ranges
# (e.g. N WIND 10 to 20 KT)
# Set the moderating percentage in the "moderated_dict"
# dictionary module.
# Set the maximum range values in the "maximum_range_nlValue_dict"
# dictionary module.
("Wind", self.vectorModeratedMinMax, [3]),
("Wind", self.vectorMinMax, [12]),
("WindGust", self.moderatedMax, [3]),
("WaveHeight", self.moderatedMinMax, [6]),
("WindWaveHgt", self.moderatedMinMax, [6]),
("Swell", self.vectorModeratedMinMax, [6]),
("Swell2", self.vectorModeratedMinMax, [6]),
("Period", self.moderatedMinMax, [6]),
("Period2", self.moderatedMinMax, [6]),
("Wx", self.rankedWx, [6]),
("T", self.minMax),
("PoP", self._PoP_analysisMethod("CWFPeriod"), [6]),
("PoP", self.binnedPercent, [6]),
],
"phraseList":[
# WINDS
self.marine_wind_withGusts_phrase,
# Alternative:
#self.marine_wind_phrase,
#self.gust_phrase,
# WAVES
self.wave_withPeriods_phrase,
# Alternative:
#self.wave_phrase,
# Optional:
self.chop_phrase,
# SWELLS AND PERIODS
self.swell_phrase,
# Alternative:
#self.swell_phrase,
#self.period_phrase,
# WEATHER
self.weather_phrase,
],
}
def seasFlag(self, tree, node):
return 0
"""
nullSwell = """
def first_null_phrase_dict(self, tree, node):
dict = TextRules.TextRules.first_null_phrase_dict(self, tree, node)
dict["Swell"] = "light swells"
return dict
def null_phrase_dict(self, tree, node):
dict = TextRules.TextRules.null_phrase_dict(self, tree, node)
dict["Swell"] = "light"
return dict
"""
marine_wx = """
def pop_wx_lower_threshold(self, tree, node):
# Always report weather
return 20
"""
alternateTempTrends = """
def temp_trends_words(self, tree, node):
"Look for sharp temperature increases or decreases"
# Here is an alternative temp_trends method provided by <NAME>.
# If a 12-hour period, it looks at the 12, 3, and 5 o'clock grids
# (both am/pm depending on time of day) and verifies the trend (either
# going down or up) and then looks at the difference between the
# 5 o'clock grid and the MaxT/MinT grid. It only needs to look at the
# 5 o'clock grid since that is the last one in the 12-hour period,
# and if it is going to trip the threshold anywhere, it will be on that
# hour since if you have an unusual temperature trend, it will peak at
# that grid. If less than a 12-hour period, then the 3 times that it
# checks will be adjusted accordingly inside the smaller time range.
statDict = node.getStatDict()
timeRange = node.getTimeRange()
tStats = tree.stats.get("T", timeRange, node.getAreaLabel(),
mergeMethod="List")
if tStats is None:
return self.setWords(node, "")
tStats, subRange = tStats[0]
if tStats is None:
return self.setWords(node, "")
dayNight = self.getPeriod(timeRange,1)
trend_nlValue = self.temp_trend_nlValue(tree, node)
if dayNight == self.DAYTIME():
maxT = self.getStats(statDict, "MaxT")
if maxT is None:
return self.setWords(node, "")
maxT = self.getValue(maxT)
threshold = self.nlValue(trend_nlValue, maxT)
else:
minT = self.getStats(statDict, "MinT")
if minT is None:
return self.setWords(node, "")
minT = self.getValue(minT)
threshold = self.nlValue(trend_nlValue, minT)
if len(tStats) >= 6:
halfWay = len(tStats) - 6
quarterWay = len(tStats) - 3
endPoint = len(tStats) - 1
elif len(tStats) >= 4:
halfWay = 0
quarterWay = len(tStats) - 3
endPoint = len(tStats) - 1
elif len(tStats) == 1:
halfWay = 0
quarterWay = 0
endPoint = 0
else:
halfWay = 0
quarterWay = 1
endPoint = len(tStats) - 1
tempValue_halfWay, curHour1 = tStats[halfWay]
tempValue_quarterWay, curHour2 = tStats[quarterWay]
tempValue_endPoint, curHour3 = tStats[endPoint]
if tempValue_halfWay is None:
return self.setWords(node, "")
if tempValue_quarterWay is None:
return self.setWords(node, "")
| |
# coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
#from fused_adam_local import FusedAdam
from apex.optimizers import FusedAdam
from apex.multi_tensor_apply import multi_tensor_applier
import amp_C
multi_tensor_l2norm = amp_C.multi_tensor_l2norm
lamb_compute_update = amp_C.multi_tensor_lamb_stage1_cuda
lamb_apply_update = amp_C.multi_tensor_lamb_stage2_cuda
scale = amp_C.multi_tensor_scale
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return max((x - 1. )/ (warmup - 1.), 0.)
def warmup_poly(x, warmup=0.002, degree=0.5):
if x < warmup:
return x/warmup
return (1.0 - x)**degree
SCHEDULES = {
'warmup_cosine':warmup_cosine,
'warmup_constant':warmup_constant,
'warmup_linear':warmup_linear,
'warmup_poly':warmup_poly,
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
return loss
class BertLAMB(Optimizer):
"""Implements BERT version of LAMB algorithm.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: LAMBs b1. Default: 0.9
b2: LAMBs b2. Default: 0.999
e: LAMBs epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum global norm for the gradients. Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_poly',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertLAMB, self).__init__(params, defaults)
self.step_count = 0
self.b1 = b1
self.b2 = b2
self.epsilon = e
self.max_global_grad_norm = max_grad_norm
self.learning_rate = lr
self.schedule = schedule
self.warmup = warmup
self.max_steps = t_total
self.updates_created = False
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step'] / group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def apply_gradients(self, dummy_overflow_buf, lr_scheduled, per_param_decay, grad_list, param_list, momentum,
velocity, update):
# Compute global gradient norm
global_grad_norm = multi_tensor_applier(
multi_tensor_l2norm,
dummy_overflow_buf,
[grad_list],
False)[0].item()
# Compute per parameter norm
param_norms = multi_tensor_applier(
multi_tensor_l2norm,
dummy_overflow_buf,
[param_list],
True)[1]
# Compute LAMB update
multi_tensor_applier(
lamb_compute_update,
dummy_overflow_buf,
[grad_list, param_list, momentum, velocity, update],
torch.cuda.FloatTensor(per_param_decay),
self.step_count,
self.b1,
self.b2,
self.epsilon,
global_grad_norm,
self.max_global_grad_norm,
)
# Computer per parameter update norm
update_norms = multi_tensor_applier(
multi_tensor_l2norm,
dummy_overflow_buf,
[update],
True)[1]
# Apply LAMB update on parameters
multi_tensor_applier(
lamb_apply_update,
dummy_overflow_buf,
[param_list, update],
param_norms,
update_norms,
lr_scheduled,
)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
check = 1 # torch.norm(all_grads, 2)
grad_list = []
param_list = []
per_param_decay = []
momentum = []
velocity = []
fp16_grad_list = []
fp16_from_fp32_param_list = []
fp32_param_list = []
fp16_per_param_decay = []
fp16_momentum = []
fp16_velocity = []
if not self.updates_created:
self.update = []
self.fp16_update = []
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
# Keep step here for compatibility with earlier resume from checkpoint
state['step'] = 0
# Exponential moving average of gradient values
state['momentum'] = torch.zeros_like(p.data, dtype=torch.float32)
# Exponential moving average of squared gradient values
state['velocity'] = torch.zeros_like(p.data, dtype=torch.float32)
# fp32 master weights
if 'master_param' not in state.keys() and p.type() == 'torch.cuda.HalfTensor':
state['master_param'] = p.detach().clone().float()
# ensure these 3 are float tensors
if state['momentum'].type() | |
<filename>model.py
import os
import sys
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from modules import ISAB, PMA, SAB
class STN3d(nn.Module):
def __init__(self):
super(STN3d, self).__init__()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 9)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64,eps=1e-03)
self.bn2 = nn.BatchNorm1d(128,eps=1e-03)
self.bn3 = nn.BatchNorm1d(1024,eps=1e-03)
self.bn4 = nn.BatchNorm1d(512,eps=1e-03)
self.bn5 = nn.BatchNorm1d(256,eps=1e-03)
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.array([1,0,0,0,1,0,0,0,1]).astype(np.float32))).view(1,9).repeat(batchsize,1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, 3, 3)
return x
class STNkd(nn.Module):
def __init__(self, k=64):
super(STNkd, self).__init__()
self.conv1 = torch.nn.Conv1d(k, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k*k)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64,eps=1e-03)
self.bn2 = nn.BatchNorm1d(128,eps=1e-03)
self.bn3 = nn.BatchNorm1d(1024,eps=1e-03)
self.bn4 = nn.BatchNorm1d(512,eps=1e-03)
self.bn5 = nn.BatchNorm1d(256,eps=1e-03)
self.k = k
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32))).view(1,self.k*self.k).repeat(batchsize,1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, self.k, self.k)
return x
def knn(x, k):
inner = -2*torch.matmul(x.transpose(2, 1), x)
xx = torch.sum(x**2, dim=1, keepdim=True)
pairwise_distance = -xx - inner - xx.transpose(2, 1)
idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)
return idx
def get_graph_feature(x, k=20, idx=None):
batch_size = x.size(0)
num_points = x.size(2)
x = x.view(batch_size, -1, num_points)
if idx is None:
idx = knn(x, k=k) # (batch_size, num_points, k)
device = torch.device('cuda')
idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1)*num_points
idx = idx + idx_base
idx = idx.view(-1)
_, num_dims, _ = x.size()
x = x.transpose(2, 1).contiguous() # (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) # batch_size * num_points * k + range(0, batch_size*num_points)
feature = x.view(batch_size*num_points, -1)[idx, :]
feature = feature.view(batch_size, num_points, k, num_dims)
x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
feature = torch.cat((feature-x, x), dim=3).permute(0, 3, 1, 2).contiguous()
return feature
class PointNetfeat(nn.Module):
def __init__(self, global_feat = True, feature_transform = False):
super(PointNetfeat, self).__init__()
self.stn = STN3d()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.global_feat = global_feat
self.feature_transform = feature_transform
if self.feature_transform:
self.fstn = STNkd(k=64)
def forward(self, x):
n_pts = x.size()[2]
trans = self.stn(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans)
x = x.transpose(2, 1)
x = F.relu(self.bn1(self.conv1(x)))
if self.feature_transform:
trans_feat = self.fstn(x)
x = x.transpose(2,1)
x = torch.bmm(x, trans_feat)
x = x.transpose(2,1)
else:
trans_feat = None
pointfeat = x
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
if self.global_feat:
return x, trans, trans_feat
else:
x = x.view(-1, 1024, 1).repeat(1, 1, n_pts)
return torch.cat([x, pointfeat], 1), trans, trans_feat
class MLPPool(nn.Module):
def __init__(self,num_in_features,num_out_features,num_points):
super().__init__()
self.num_points = num_points
self.bn1 = nn.BatchNorm1d(num_points)
self.bn2 = nn.BatchNorm1d(num_points)
self.bn3 = nn.BatchNorm1d(num_points)
self.linear1 = nn.Sequential(nn.Linear(num_in_features, num_in_features//2),
self.bn1,
nn.LeakyReLU(negative_slope=0.2))
self.linear2 = nn.Sequential(nn.Linear(num_in_features//2, num_in_features//4),
self.bn2,
nn.LeakyReLU(negative_slope=0.2))
self.linear3 = nn.Sequential(nn.Linear(num_in_features//4, num_in_features//8),
self.bn3,
nn.LeakyReLU(negative_slope=0.2))
self.linear4 = nn.Linear(num_in_features//8,num_out_features)
def forward(self,x,sort_dim=2):
B,D,N = x.shape
x = torch.sort(x,sort_dim)[0]
x = self.linear1(x)
x = self.linear2(x)
x = self.linear3(x)
x = self.linear4(x)
x = x.view(B,-1)
return x
#reduces last dimension [B,D,N,K] -> [B,D,N]
class FSPOOL(nn.Module):
def __init__(self,d,n):
"""
d = size of third to last dimension in input
n = size of last dimension in input (the one that is reduced)
"""
super().__init__()
self.weight = nn.Parameter(torch.zeros(d,n))
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.weight)
def forward(self,x):
sorted_by_feature = torch.sort(x.permute(0,2,1,3),-1)[0]
sorted_by_feature = self.weight*sorted_by_feature
sorted_by_feature = torch.sum(sorted_by_feature,dim=-1)
return sorted_by_feature.permute(0,2,1)
class MAXPOOL(nn.Module):
def __init__(self):
super().__init__()
def forward(self,x):
return x.max(dim=-1, keepdim=False)[0]
class PointNet(nn.Module):
def __init__(self, args, output_channels=40):
super(PointNet, self).__init__()
self.args = args
self.stn = STN3d()
self.fstn = STNkd(k=64)
self.conv1 = nn.Conv1d(3, 64, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.conv3 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.conv4 = nn.Conv1d(64, 128, kernel_size=1, bias=False)
self.conv5 = nn.Conv1d(128, args.emb_dims, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(64,eps=1e-03)
self.bn2 = nn.BatchNorm1d(64,eps=1e-03)
self.bn3 = nn.BatchNorm1d(64,eps=1e-03)
self.bn4 = nn.BatchNorm1d(128,eps=1e-03)
self.bn5 = nn.BatchNorm1d(args.emb_dims)
self.linear1 = nn.Linear(args.emb_dims, 512, bias=False)
self.bn6 = nn.BatchNorm1d(512,eps=1e-03)
self.dp1 = nn.Dropout(p=0.3)
self.linear2 = nn.Linear(512, 256, bias=False)
self.bn7 = nn.BatchNorm1d(256,eps=1e-03)
self.dp2 = nn.Dropout(p=0.3)
self.linear3 = nn.Linear(256,output_channels)
if args.rotation:
self.linear4 = nn.Linear(args.emb_dims, 512, bias=False)
self.bn8 = nn.BatchNorm1d(512,eps=1e-03)
self.dp3 = nn.Dropout(p=0.3)
self.linear5 = nn.Linear(512, 256, bias=False)
self.bn9 = nn.BatchNorm1d(256,eps=1e-03)
self.dp4 = nn.Dropout(p=0.3)
self.linear6 = nn.Linear(256,args.angles)
def forward(self, x, rotation=False):
trans = self.stn(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans)
x = x.transpose(2, 1)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
trans_feat = self.fstn(x)
x = x.transpose(2,1)
x = torch.bmm(x, trans_feat)
x = x.transpose(2,1)
x = F.relu(self.bn3(self.conv3(x)))
x = F.relu(self.bn4(self.conv4(x)))
x = F.relu(self.bn5(self.conv5(x)))
x = F.adaptive_max_pool1d(x, 1).squeeze()
if not rotation:
x = F.relu(self.bn6(self.linear1(x)))
x = self.dp1(x)
x = F.relu(self.bn7(self.linear2(x)))
x = self.dp2(x)
x = self.linear3(x)
else:
x = F.relu(self.bn8(self.linear4(x)))
x = self.dp3(x)
x = F.relu(self.bn9(self.linear5(x)))
x = self.dp4(x)
x = self.linear6(x)
return x, trans, trans_feat
class PointNet_3(nn.Module):
def __init__(self, args, output_channels=40):
super(PointNet_3, self).__init__()
self.args = args
self.stn = STN3d()
self.fstn = STNkd(k=64)
self.conv1 = nn.Conv1d(3, 64, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.conv3 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.conv4 = nn.Conv1d(64, 128, kernel_size=1, bias=False)
self.conv5 = nn.Conv1d(128, args.emb_dims, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(64,eps=1e-03)
self.bn2 = nn.BatchNorm1d(64,eps=1e-03)
self.bn3 = nn.BatchNorm1d(64,eps=1e-03)
self.bn4 = nn.BatchNorm1d(128,eps=1e-03)
self.bn5 = nn.BatchNorm1d(args.emb_dims)
self.linear1 = nn.Linear(args.emb_dims, 512, bias=False)
self.bn6 = nn.BatchNorm1d(512,eps=1e-03)
self.dp1 = nn.Dropout(p=0.3)
self.linear2 = nn.Linear(512, 256, bias=False)
self.bn7 = nn.BatchNorm1d(256,eps=1e-03)
self.dp2 = nn.Dropout(p=0.3)
self.linear3 = nn.Linear(256,output_channels)
self.pool = MLPPool(args.emb_dims,1,args.num_points)
if args.rotation:
self.linear4 = nn.Linear(256,args.angles)
def forward(self, x, rotation=False):
batch_size = x.size(0)
trans = self.stn(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans)
x = x.transpose(2, 1)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
trans_feat = self.fstn(x)
x = x.transpose(2,1)
x = torch.bmm(x, trans_feat)
x = x.transpose(2,1)
x = F.relu(self.bn3(self.conv3(x)))
x = F.relu(self.bn4(self.conv4(x)))
x = F.relu(self.bn5(self.conv5(x)))
x = self.pool(x).view(batch_size,-1)
x = F.relu(self.bn6(self.linear1(x)))
x = self.dp1(x)
x = F.relu(self.bn7(self.linear2(x)))
x = self.dp2(x)
if not rotation:
x = self.linear3(x)
else:
x = self.linear4(x)
return x, trans, trans_feat
class PointNet_2(nn.Module):
def __init__(self, args, output_channels=40):
super(PointNet_2, self).__init__()
self.args = args
self.stn = STN3d()
self.fstn = STNkd(k=64)
self.conv1 = nn.Conv1d(3, 64, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.conv3 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.conv4 = nn.Conv1d(64, 128, kernel_size=1, bias=False)
self.conv5 = nn.Conv1d(128, args.emb_dims, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(64,eps=1e-03)
self.bn2 = nn.BatchNorm1d(64,eps=1e-03)
self.bn3 = nn.BatchNorm1d(64,eps=1e-03)
self.bn4 = nn.BatchNorm1d(128,eps=1e-03)
self.bn5 = nn.BatchNorm1d(args.emb_dims)
self.linear1 = nn.Linear(args.emb_dims, 512, bias=False)
self.bn6 = nn.BatchNorm1d(512,eps=1e-03)
self.dp1 = nn.Dropout(p=0.3)
self.linear2 = nn.Linear(512, 256, bias=False)
self.bn7 = nn.BatchNorm1d(256,eps=1e-03)
self.dp2 = nn.Dropout(p=0.3)
self.linear3 = nn.Linear(256,output_channels)
if args.rotation:
self.linear4 = nn.Linear(256,args.angles)
def forward(self, x, rotation=False):
trans = self.stn(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans)
x = x.transpose(2, 1)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
trans_feat = self.fstn(x)
x = x.transpose(2,1)
x = torch.bmm(x, trans_feat)
x = x.transpose(2,1)
x = F.relu(self.bn3(self.conv3(x)))
x = F.relu(self.bn4(self.conv4(x)))
x = F.relu(self.bn5(self.conv5(x)))
x = F.adaptive_max_pool1d(x, 1).squeeze()
x = F.relu(self.bn6(self.linear1(x)))
x = self.dp1(x)
x = F.relu(self.bn7(self.linear2(x)))
x = self.dp2(x)
if not rotation:
x = self.linear3(x)
else:
x = self.linear4(x)
return x, trans, trans_feat
class PointNet_Jigsaw(nn.Module):
def __init__(self, args, output_channels=40):
super(PointNet_Jigsaw, self).__init__()
self.args = args
self.k = (args.k1)**3
self.stn = STN3d()
self.fstn = STNkd(k=64)
self.conv1 = nn.Conv1d(3, 64, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.conv3 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.conv4 = nn.Conv1d(64, 128, kernel_size=1, bias=False)
self.conv5 = nn.Conv1d(128, args.emb_dims, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(64,eps=1e-03)
self.bn2 = nn.BatchNorm1d(64,eps=1e-03)
self.bn3 = nn.BatchNorm1d(64,eps=1e-03)
self.bn4 = nn.BatchNorm1d(128,eps=1e-03)
self.bn5 = nn.BatchNorm1d(args.emb_dims)
self.linear1 = nn.Linear(args.emb_dims, 512, bias=False)
self.bn6 = nn.BatchNorm1d(512,eps=1e-03)
self.dp1 = nn.Dropout(p=0.3)
self.linear2 = nn.Linear(512, 256, bias=False)
self.bn7 = nn.BatchNorm1d(256,eps=1e-03)
self.dp2 = nn.Dropout(p=0.3)
self.linear3 = nn.Linear(256,output_channels)
self.conv6 = nn.Conv1d(64 + args.emb_dims, 512, 1, bias=False)
self.conv7 = nn.Conv1d(512, 256, 1, bias=False)
self.conv8 = nn.Conv1d(256, 128, 1, bias=False)
self.conv9 = | |
<gh_stars>0
import numpy
import operator
import pickle
from ..General.units import conversion_facs_position
from .Classes.general import Coordinate,Energy
from .Classes.atomic_orbital import AO,Atom,l_orient
from .Classes.molecular_orbital import MO
from .Classes.excitation import Excitation
from ..General.UnitsManager import energy_units,position_units
from .Classes.mdinfo import MDinfo
# TODO: All coordinates should be position units managed
def read_gaussian_fchk(filename, **kwargs):
''' Reads usefull information from gaussian 09 fchk file - so far only for
restricted calculations
Parameters
----------
filename : str
Specifies the filename for the input file (including the path if needed)
**kwargs : dictionary
Specifies which density should be read from checkpoint file. If not present
ground state density is read and stored in variable mo.densmat_grnd and if
also excited state density is present it will be written in exct_dens
If non optimized excited state density is needed \n
**kwargs has to be specified as follows:
**{'CI_rho_density': True}
Returns
-------
struc : Structure class
Contains the information about molecule structure: coordinates,
atom types, charges... For more information look at the class documentation
ao : AO class
Contains the information about atomic orbitals: expansion coefficients
into gaussian orbitals, exponents of the functions in expansion, orbital
types... For more information look at the class documentation
mo : MO class
Contains the information about molecular orbitals: expansion coefficients
into atomic orbitals, energies of the orbitals, ground state density
matrix... For more information look at the class documentation
TotalEnergy : Energy class
Total ground state energy of the molecule
exct_dens : numpy array of float (dimension Nao_orient x Nao_orient)
Excited state density matrix. If not present in the fchk file it will
have zero length
hess_fchk : numpy array of float (dimension 3Natom x 3Natom)
Hessian matrix for the molecule in *Hartree/(Bohr^2)*. Second derivatives
of total energy in respect to coordinate displacements of individual
atoms. If not present in the fchk file it will have zero length
'''
from .Classes.structure import Structure,get_atom_symbol
fid = open(filename,'r') # Open the file
flines = fid.readlines() # Read the WHOLE file into RAM
fid.close() # Close the file
use_CI_rho_density=False
for key in list(kwargs.keys()):
if key=='CI_rho_density':
use_CI_rho_density=kwargs['CI_rho_density']
# Is this an unrestricted calculation?
has_beta = False
is_6D = False
is_10F = False
for line in flines:
if 'beta mo coefficients' in line.lower():
has_beta = True
if 'Pure/Cartesian d shells' in line:
is_6D = int(line.split()[-1]) == 1
if 'Pure/Cartesian f shells' in line:
is_10F = int(line.split()[-1]) == 1
cartesian_basis = (is_6D and is_10F)
# if spin is not None:
# if spin != 'alpha' and spin != 'beta':
# raise IOError('`spin=%s` is not a valid option' % spin)
# elif has_beta:
# print('Reading only molecular orbitals of spin %s.' % spin)
# else:
# raise IOError('The keyword `spin` is only supported for unrestricted calculations.')
restricted = (not has_beta)
# inicializace promennych
sec_flag = None
el_num = [0,0]
mo_i0 = {'alpha': 0, 'beta': 0}
what = 'alpha'
index = 0
at_num = 0
ao_num = 0
ao=AO()
struc=Structure()
mo=MO()
mo_spec=[]
gr_dens=[]
exct_dens=[]
hess_fchk=[]
# Set a counter for the AOs
basis_count = 0
num_cons=None
with energy_units('Ha'):
with position_units('Bohr'):
# Go through the file line by line
for il in range(len(flines)):
line = flines[il] # The current line as string
thisline = line.split() # The current line split into segments
# Check the file for keywords
if 'Number of alpha electrons' in line:
el_num[0] = int(thisline[5]) # pocet alfa el.
elif 'Number of beta electrons' in line:
el_num[1] = int(thisline[5]) # pocet beta el.
elif 'Number of basis functions' in line:
basis_number = int(thisline[5]) # pocet bazovych fci
elif 'Atomic numbers' in line:
sec_flag = 'geo_info'
index = 0
at_num = int(thisline[-1]) #pocet atomu
count = 0
ncharge=[]
elif 'Nuclear charges' in line:
sec_flag = 'geo_info'
index = 2
at_num = int(thisline[-1])
count = 0
elif 'Total Energy' in line:
TotalEnergy=Energy(float(thisline[-1]))
elif 'Current cartesian coordinates' in line:
at_num = int(thisline[-1])/3
sec_flag = 'geo_pos'
coor=Coordinate(None)
count = 0
xyz = []
elif 'Real atomic weights' in line:
sec_flag = 'at_wights'
at_num = int(thisline[-1])
count=0
mass=numpy.zeros(at_num,dtype='f8')
elif ('Shell types' in line) and (not ('Density' in line)):
sec_flag = 'ao_info'
index = 'type'
ao_num = int(thisline[-1])
count = 0
ao_types=[]
elif ('Number of primitives per shell' in line) and (not ('Density' in line)):
sec_flag = 'ao_info'
index = 'pnum'
ao_num = int(thisline[-1])
count = 0
ao_prim_number=[]
elif ('Shell to atom map' in line) and (not ('Density' in line)):
sec_flag = 'ao_info'
index = 'atom'
ao_num = int(thisline[-1])
count = 0
elif ('Primitive exponents' in line) and (not ('Density' in line)):
sec_flag = 'ao_exps'
ao_num = int(thisline[-1])
count = 0
index = 0
exps=[]
elif ('Contraction coefficients' in line) and (not ('P(S=P) Contraction coefficients' in line) and (not ('Density' in line))): #((not is_6D) and is_10F)
sec_flag = 'ao_coeffs'
ao_num = int(thisline[-1])
count = 0
index = 0
coeffs=[]
elif ('P(S=P) Contraction coefficients' in line) and (not ('Density' in line)):
sec_flag = 'ao_coeffs_p'
ao_num = int(thisline[-1])
count = 0
index = 0
index_all = 0
elif 'Density Number of contracted shells' in line:
sec_flag=None
elif 'Coordinates of each shell' in line:
sec_flag=None
elif 'Cartesian Force Constants' in line:
sec_flag='hessian'
num_cons=int(thisline[-1])
MatHess=numpy.zeros(num_cons)
count=0
elif 'Orbital Energies' in line:
sec_flag = 'mo_eorb'
mo_num = int(thisline[-1])
mo_i0[thisline[0].lower()] = len(mo_spec)
if restricted:
if el_num[0] == el_num[1]:
i = el_num[0]
occ = 2
else:
i = el_num[0 if 'Alpha' in line else 1]
occ = 1
else:
i = el_num[0 if 'Alpha' in line else 1]
occ = 1
for ii in range(mo_num):
mo_spec.append({'coeffs': numpy.zeros(basis_number,dtype='f8'), #basis_count),
'energy': 0.0,
'occ_num': float(occ if ii < i else 0),
'sym': '%i.1' % (ii+1),
'spin':thisline[0].lower()
})
elif 'MO coefficients' in line:
sec_flag = 'mo_coeffs'
count = 0
index = 0
mo_num = int(thisline[-1])
what = thisline[0].lower()
elif 'Total SCF Density' in line:
sec_flag = 'tot_scf_dens'
dens_num = int(thisline[-1])
if (basis_number*(basis_number+1)/2)!=dens_num:
raise IOError('basis_number does not represent number of basis functions')
gr_dens=numpy.zeros((basis_number,basis_number),dtype='f8')
denscount_line=0
denscount_row=0
elif 'Total CI Density' in line:
sec_flag = 'tot_ci_dens'
dens_num = int(thisline[-1])
if (basis_number*(basis_number+1)/2)!=dens_num:
raise IOError('basis_number does not represent number of basis functions')
exct_dens=numpy.zeros((basis_number,basis_number),dtype='f8')
denscount_line=0
denscount_row=0
elif ("Total CI Rho(1) Density" in line) and use_CI_rho_density:
# This density is more precise than
sec_flag = 'tot_scf_dens'
dens_num = int(thisline[-1])
if (basis_number*(basis_number+1)/2)!=dens_num:
raise IOError('basis_number does not represent number of basis functions')
gr_dens=numpy.zeros((basis_number,basis_number),dtype='f8')
denscount_line=0
denscount_row=0
elif sec_flag=='tot_scf_dens' and 'Charges' in line:
sec_flag = None
else:
if sec_flag == 'geo_info':
if index==2:
for ii in thisline:
ncharge.append(int(float(ii)))
count += 1
if count == at_num:
sec_flag = None
elif sec_flag == 'geo_pos':
for ii in thisline:
xyz.append(float(ii))
if len(xyz) == 3:
coor.add_coor(numpy.array(xyz,dtype='f8'))
xyz = []
count += 1
if count == at_num:
sec_flag = None
elif sec_flag == 'at_wights':
for ii in thisline:
mass[count]=float(ii)
count+=1
if count == at_num:
sec_flag = None
elif sec_flag == 'ao_info':
for ii in thisline:
# TODO: Correct changing ii - correct but not nice
ii = int(ii)
if index is 'type':
ao_types.append(ii)
if ii == -1:
ii='sp'
ao.type.append('s')
ao.type.append('p')
l = lquant['p']
basis_count += l_deg(l,cartesian_basis=True) # za p orbital z sp hybridizace
basis_count += 1 # za s orbital z sp hybridizace
elif ii == -2:
ii='5d'
ao.type.append('5d')
basis_count += l_deg(ii,cartesian_basis=True)
elif ii == -3:
ii='7f'
ao.type.append('7f')
basis_count += l_deg(ii,cartesian_basis=True)
else:
ao.type.append(orbit[abs(ii)])
ii = orbit[abs(ii)]
l = lquant[ii]
basis_count += l_deg(l,cartesian_basis=True)
#print(ii,l_deg(l,cartesian_basis=True),basis_count)
# if not cartesian_basis:
# for m in (range(0,l+1) if l != 1 else [1,0]):
# qc.ao_spherical.append([count,(l,m)])
# if m != 0:
# qc.ao_spherical.append([count,(l,-m)])
elif index is 'atom':
if ao_types[count]==-1:
ao.atom.append(Atom(get_atom_symbol(ncharge[ii-1]),ii-1)) # for sp orbital we have to add two atoms (one for s-orbital and one for p-orbital)
ao.atom.append(Atom(get_atom_symbol(ncharge[ii-1]),ii-1))
else:
ao.atom.append(Atom(get_atom_symbol(ncharge[ii-1]),ii-1))
elif index is 'pnum':
ao_prim_number.append(ii)
# qc.ao_spec[count][index] = ii
count += 1
if count == ao_num:
sec_flag = None
elif sec_flag == 'ao_exps':
for ii in thisline:
exps.append(float(ii))
count += 1
if count==ao_prim_number[index]:
if ao_types[index]==-1: # for sp orbital we have the same exponents
ao.exp.append(numpy.array(exps,dtype='f8'))
ao.exp.append(numpy.array(exps,dtype='f8'))
else:
ao.exp.append(numpy.array(exps,dtype='f8'))
index += 1
count | |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import isns_get_device_brief
import isns_get_discovery_domain
import isns_get_discovery_domain_set
import isns_get_vrf_forwarding
import isns_get_last_device_timestamp
import isns_get_server_role
class brocade_isns_ext(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-isns-ext - based on the path /brocade_isns_ext_rpc. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This management module is an extension to iSNS model for
- Defining RPCs to retrieve operational information of
the iSNS.
Glossary of the terms used:
---------------------------
ACC frame - Accept frame
iSNS - Internet Storage Name Service (iSNS) is a
protocol that provides internet name service
to the iSCSI and FC devices.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__isns_get_device_brief','__isns_get_discovery_domain','__isns_get_discovery_domain_set','__isns_get_vrf_forwarding','__isns_get_last_device_timestamp','__isns_get_server_role',)
_yang_name = 'brocade-isns-ext'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__isns_get_device_brief = YANGDynClass(base=isns_get_device_brief.isns_get_device_brief, is_leaf=True, yang_name="isns-get-device-brief", rest_name="isns-get-device-brief", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'isns-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='rpc', is_config=True)
self.__isns_get_discovery_domain = YANGDynClass(base=isns_get_discovery_domain.isns_get_discovery_domain, is_leaf=True, yang_name="isns-get-discovery-domain", rest_name="isns-get-discovery-domain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'isns-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='rpc', is_config=True)
self.__isns_get_vrf_forwarding = YANGDynClass(base=isns_get_vrf_forwarding.isns_get_vrf_forwarding, is_leaf=True, yang_name="isns-get-vrf-forwarding", rest_name="isns-get-vrf-forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'isns-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='rpc', is_config=True)
self.__isns_get_server_role = YANGDynClass(base=isns_get_server_role.isns_get_server_role, is_leaf=True, yang_name="isns-get-server-role", rest_name="isns-get-server-role", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'isns-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='rpc', is_config=True)
self.__isns_get_last_device_timestamp = YANGDynClass(base=isns_get_last_device_timestamp.isns_get_last_device_timestamp, is_leaf=True, yang_name="isns-get-last-device-timestamp", rest_name="isns-get-last-device-timestamp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'isns-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='rpc', is_config=True)
self.__isns_get_discovery_domain_set = YANGDynClass(base=isns_get_discovery_domain_set.isns_get_discovery_domain_set, is_leaf=True, yang_name="isns-get-discovery-domain-set", rest_name="isns-get-discovery-domain-set", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'isns-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='rpc', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_isns_ext_rpc']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return []
def _get_isns_get_device_brief(self):
"""
Getter method for isns_get_device_brief, mapped from YANG variable /brocade_isns_ext_rpc/isns_get_device_brief (rpc)
YANG Description: This function provides the iSNS device information
that have been logged in.
"""
return self.__isns_get_device_brief
def _set_isns_get_device_brief(self, v, load=False):
"""
Setter method for isns_get_device_brief, mapped from YANG variable /brocade_isns_ext_rpc/isns_get_device_brief (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_isns_get_device_brief is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_isns_get_device_brief() directly.
YANG Description: This function provides the iSNS device information
that have been logged in.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=isns_get_device_brief.isns_get_device_brief, is_leaf=True, yang_name="isns-get-device-brief", rest_name="isns-get-device-brief", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'isns-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """isns_get_device_brief must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=isns_get_device_brief.isns_get_device_brief, is_leaf=True, yang_name="isns-get-device-brief", rest_name="isns-get-device-brief", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'isns-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='rpc', is_config=True)""",
})
self.__isns_get_device_brief = t
if hasattr(self, '_set'):
self._set()
def _unset_isns_get_device_brief(self):
self.__isns_get_device_brief = YANGDynClass(base=isns_get_device_brief.isns_get_device_brief, is_leaf=True, yang_name="isns-get-device-brief", rest_name="isns-get-device-brief", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'isns-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='rpc', is_config=True)
def _get_isns_get_discovery_domain(self):
"""
Getter method for isns_get_discovery_domain, mapped from YANG variable /brocade_isns_ext_rpc/isns_get_discovery_domain (rpc)
YANG Description: This function provides the iSNS device information
that have been logged in.
"""
return self.__isns_get_discovery_domain
def _set_isns_get_discovery_domain(self, v, load=False):
"""
Setter method for isns_get_discovery_domain, mapped from YANG variable /brocade_isns_ext_rpc/isns_get_discovery_domain (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_isns_get_discovery_domain is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_isns_get_discovery_domain() directly.
YANG Description: This function provides the iSNS device information
that have been logged in.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=isns_get_discovery_domain.isns_get_discovery_domain, is_leaf=True, yang_name="isns-get-discovery-domain", rest_name="isns-get-discovery-domain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'isns-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """isns_get_discovery_domain must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=isns_get_discovery_domain.isns_get_discovery_domain, is_leaf=True, yang_name="isns-get-discovery-domain", rest_name="isns-get-discovery-domain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'isns-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='rpc', is_config=True)""",
})
self.__isns_get_discovery_domain = t
if hasattr(self, '_set'):
self._set()
def _unset_isns_get_discovery_domain(self):
self.__isns_get_discovery_domain = YANGDynClass(base=isns_get_discovery_domain.isns_get_discovery_domain, is_leaf=True, yang_name="isns-get-discovery-domain", rest_name="isns-get-discovery-domain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'isns-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='rpc', is_config=True)
def _get_isns_get_discovery_domain_set(self):
"""
Getter method for isns_get_discovery_domain_set, mapped from YANG variable /brocade_isns_ext_rpc/isns_get_discovery_domain_set (rpc)
YANG Description: This function provides the iSNS discovery domain set information
that have been created in.
"""
return self.__isns_get_discovery_domain_set
def _set_isns_get_discovery_domain_set(self, v, load=False):
"""
Setter method for isns_get_discovery_domain_set, mapped from YANG variable /brocade_isns_ext_rpc/isns_get_discovery_domain_set (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_isns_get_discovery_domain_set is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_isns_get_discovery_domain_set() directly.
YANG Description: This function provides the iSNS discovery domain set information
that have been created in.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=isns_get_discovery_domain_set.isns_get_discovery_domain_set, is_leaf=True, yang_name="isns-get-discovery-domain-set", rest_name="isns-get-discovery-domain-set", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'isns-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """isns_get_discovery_domain_set must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=isns_get_discovery_domain_set.isns_get_discovery_domain_set, is_leaf=True, yang_name="isns-get-discovery-domain-set", rest_name="isns-get-discovery-domain-set", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'isns-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='rpc', is_config=True)""",
})
self.__isns_get_discovery_domain_set = t
if hasattr(self, '_set'):
self._set()
def _unset_isns_get_discovery_domain_set(self):
self.__isns_get_discovery_domain_set = YANGDynClass(base=isns_get_discovery_domain_set.isns_get_discovery_domain_set, is_leaf=True, yang_name="isns-get-discovery-domain-set", rest_name="isns-get-discovery-domain-set", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'isns-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='rpc', is_config=True)
def _get_isns_get_vrf_forwarding(self):
"""
Getter method for isns_get_vrf_forwarding, mapped from YANG variable /brocade_isns_ext_rpc/isns_get_vrf_forwarding (rpc)
YANG Description: This function provides the iSNS esi timeout and
loopback ip address.
"""
return self.__isns_get_vrf_forwarding
def _set_isns_get_vrf_forwarding(self, v, load=False):
"""
Setter method for isns_get_vrf_forwarding, mapped from YANG variable /brocade_isns_ext_rpc/isns_get_vrf_forwarding (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_isns_get_vrf_forwarding is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_isns_get_vrf_forwarding() directly.
YANG Description: This function provides the iSNS esi timeout and
loopback ip address.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=isns_get_vrf_forwarding.isns_get_vrf_forwarding, is_leaf=True, yang_name="isns-get-vrf-forwarding", rest_name="isns-get-vrf-forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'isns-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """isns_get_vrf_forwarding must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=isns_get_vrf_forwarding.isns_get_vrf_forwarding, is_leaf=True, yang_name="isns-get-vrf-forwarding", rest_name="isns-get-vrf-forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'isns-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='rpc', is_config=True)""",
})
self.__isns_get_vrf_forwarding = t
if hasattr(self, '_set'):
self._set()
def _unset_isns_get_vrf_forwarding(self):
self.__isns_get_vrf_forwarding = YANGDynClass(base=isns_get_vrf_forwarding.isns_get_vrf_forwarding, is_leaf=True, yang_name="isns-get-vrf-forwarding", rest_name="isns-get-vrf-forwarding", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'isns-show-action-point'}}, namespace='urn:brocade.com:mgmt:brocade-isns-ext', defining_module='brocade-isns-ext', yang_type='rpc', is_config=True)
def _get_isns_get_last_device_timestamp(self):
"""
Getter method for isns_get_last_device_timestamp, mapped from YANG variable /brocade_isns_ext_rpc/isns_get_last_device_timestamp (rpc)
YANG Description: This function provides login or logout time of last isns device registered or deregistered respectively
"""
return self.__isns_get_last_device_timestamp
def _set_isns_get_last_device_timestamp(self, v, load=False):
"""
Setter method for isns_get_last_device_timestamp, mapped from YANG variable /brocade_isns_ext_rpc/isns_get_last_device_timestamp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_isns_get_last_device_timestamp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_isns_get_last_device_timestamp() directly.
YANG Description: This function provides login or logout time of last isns device registered or deregistered respectively
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=isns_get_last_device_timestamp.isns_get_last_device_timestamp, is_leaf=True, yang_name="isns-get-last-device-timestamp", rest_name="isns-get-last-device-timestamp", | |
4987 3163413 4987
3196064 3373 3197400 3373
3206308 3373 3207112 3373
3217753 13193 3218864 13193
3230901 13193 3231986 13193
3237244 167 3238306 167
3253064 167 3254003 167
3253064 167 3254147 167
3266552 167 3267780 167
3276436 9498 3277574 9498
3278061 9498 3279393 9498
3288266 9498 3289224 9498
3300144 9498 3301551 9498
3302088 9498 3303206 9498
3305475 4410 3306613 4410
3310538 4410 3311461 4410
3356456 12645 3357367 12645
3357952 12645 3359049 12645
3361120 12645 3361996 12645
3388953 236 3389951 236
3402926 236 3403845 236
3409552 2547 3410849 2547
3427256 2547 3428383 2547
3430831 2547 3431781 2547
3437543 2547 3438481 2547
3445630 2547 3446937 2547
3447548 16950 3448560 16950
3456010 16950 3456962 16950
3456010 16950 3457014 16950
3463494 16950 3464768 16950
3475440 19391 3476516 19391
3482080 19391 3483340 19391
3539080 340 3540286 340
3544358 340 3545584 340
3564901 7644 3566221 7644
3575690 7644 3576913 7644
3579286 7644 3580693 7644
3594195 7644 3595312 7644
3637407 5038 3638222 5038
3643977 5038 3645296 5038
3665930 16988 3667050 16988
3679513 16988 3680666 16988
3689350 16988 3690418 16988
3689350 16988 3690588 16988
3699669 49 3700680 49
3715351 49 3716597 49
3734906 4457 3735912 4457
3754067 2478 3755198 2478
3754067 2478 3755254 2478
3767437 2478 3768299 2478
3780485 6014 3781464 6014
3792826 6014 3793915 6014
3809249 6014 3810156 6014
3818979 2029 3820084 2029
3848277 2029 3849315 2029
3861297 7556 3862269 7556
3864259 7556 3865205 7556
3872248 7556 3873529 7556
3884900 9436 3885999 9436
3903356 9436 3904181 9436
3911498 9436 3912552 9436
3914828 9436 3916008 9436
3935761 7764 3936909 7764
3944512 7764 3945651 7764
3954088 4499 3955147 4499
3966465 4499 3967899 4499
3986583 6686 3987888 6686
4012563 6686 4013464 6686
4012563 6686 4013618 6686
4017126 6171 4018190 6171
4018789 6171 4019838 6171
4028739 6171 4029902 6171
4043843 6171 4044957 6171
4050571 6171 4051705 6171
4074003 367 4075014 367
4093867 10013 4095116 10013
4103452 10013 4104256 10013
4114729 10013 4115678 10013
4139622 37 4140486 37
4171103 226 4171987 226
4196020 8346 4197216 8346
4223914 5117 4224944 5117
4248789 17687 4249886 17687
4248789 17687 4250074 17687
4255681 17687 4256849 17687
4258959 17687 4260192 17687
4260939 17687 4262136 17687
4293610 2248 4294425 2248
4308288 11925 4309481 11925
4323405 11925 4324428 11925
4339379 3392 4340292 3392
4367646 3392 4368912 3392
4425649 9339 4426524 9339
4432219 9339 4433469 9339
4434158 9339 4435102 9339
4438392 9339 4439718 9339
4453937 78 4454916 78
4461946 78 4462942 78
4483130 16119 4484191 16119
4494812 16119 4495885 16119
4500855 8411 4502178 8411
4500855 8411 4502246 8411
4507302 8411 4508619 8411
4517447 8411 4518368 8411
4532447 8411 4533452 8411
4535381 8411 4536365 8411
4549404 9597 4550558 9597
4565412 9597 4566499 9597
4591921 2158 4593329 2158
4632327 2213 4633493 2213
4654758 4467 4655632 4467
4670319 5987 4671447 5987
4719917 58 4721156 58
4740183 2166 4741016 2166
4747898 2166 4749094 2166
4747898 2166 4749130 2166
4774801 2003 4775949 2003
4804770 10032 4805901 10032
4808265 10032 4809077 10032
4833606 15153 4834702 15153
4841706 15153 4842959 15153
4845117 15153 4846185 15153
4862844 11313 4863935 11313
4891746 390 4892964 390
4891746 390 4892978 390
4895201 390 4896263 390
4910448 390 4911918 390
4922897 5098 4923825 5098
4963178 119 4964536 119
4980071 7785 4981098 7785
4986318 7785 4987710 7785
5002741 7785 5003678 7785
5010519 7785 5011643 7785
5013807 7785 5014780 7785
5039031 5129 5039918 5129
5057161 3400 5058187 3400
5086009 157 5087035 157
5092288 157 5093356 157
5105306 157 5106427 157
5146026 6140 5146891 6140
5185225 274 5186603 274
5193443 274 5194501 274
5199734 274 5200862 274
5229448 16162 5230519 16162
5232378 16162 5233596 16162
5237073 16162 5238399 16162
5248071 16162 5249141 16162
5268411 4351 5269772 4351
5271841 4351 5272802 4351
5276683 4351 5277946 4351
5295633 16345 5296951 16345
5307269 16345 5308587 16345
5322118 16345 5323134 16345
5325180 16345 5326259 16345
5326732 2436 5327890 2436
5360696 5030 5361929 5030
5360696 5030 5362011 5030
5367270 5030 5367989 5030
5373652 5030 5374492 5030
5399842 14342 5401129 14342
5406256 14342 5407028 14342
5407578 14342 5408494 14342
5414107 14342 5415287 14342
5462575 4306 5463747 4306
5472967 2025 5473870 2025
5487452 2025 5488903 2025
5496923 18323 5497828 18323
5516054 18323 5517247 18323
5532555 6713 5533651 6713
5542756 6713 5543829 6713
5566894 13377 5567780 13377
5568357 13377 5569516 13377
5570087 13377 5571337 13377
5601430 298 5602862 298
5611813 298 5612886 298
5617928 298 5619296 298
5624610 12699 5625742 12699
5661574 11908 5662544 11908
5669510 11908 5670668 11908
5690835 2299 5691836 2299
5699282 2299 5700276 2299
5720659 9323 5721718 9323
5725577 9323 5726748 9323
5730518 9323 5731514 9323
5762425 17009 5763743 17009
5820556 22818 5821492 22818
5842783 22818 5843741 22818
5855770 2041 5856574 2041
5858677 2041 5859609 2041
5861804 2041 5862776 2041
5865213 2041 5866435 2041
5868735 2041 5869721 2041
5884474 3399 5885320 3399
5890247 3399 5891545 3399
5895564 3399 5896888 3399
5898860 3399 5900094 3399
5912248 3358 5913294 3358
5916619 3358 5917661 3358
5918293 3358 5919201 3358
5918293 3358 5919347 3358
5919789 3358 5920791 3358
5937241 3358 5938355 3358
5941990 12775 5943086 12775
5999741 5076 6000711 5076
6004543 5076 6005875 5076
6012328 3422 6013585 3422
6014239 3422 6015463 3422
6019502 3422 6020491 3422
6021095 3422 6022353 3422
6090366 8288 6091736 8288
6124253 2252 6125409 2252
6151625 5963 6152786 5963
6182460 5963 6183508 5963
6197740 299 6198748 299
6216361 299 6217649 299
6226368 13346 6227351 13346
6228029 13346 6229193 13346
6231424 13346 6232652 13346
6243578 13346 6244894 13346
6269220 7677 6270614 7677
6271221 7677 6272569 7677
6274572 7677 6275745 7677
6281439 7677 6282630 7677
6323824 3229 6324905 3229
6333605 3229 6334750 3229
6349311 3278 6350386 3278
6370764 3278 6372124 3278
6396343 3236 6397354 3236
6396343 3236 6397424 3236
6399763 3236 6400866 3236
6412814 3236 6413767 3236
6424125 21085 6425141 21085
6433768 21085 6434939 21085
6435388 21085 6436564 21085
6437277 21085 6438363 21085
6440395 21085 6441567 21085
6447135 11203 6448294 11203
6471307 11203 6472523 11203
6479410 21908 6480622 21908
6479410 21908 6480794 21908
6496103 21908 6497377 21908
6496103 21908 6497429 21908
6508044 21908 6509163 21908
6521606 15076 6522620 15076
6523233 15076 6524294 15076
6526288 15076 6527265 15076
6531031 15076 6531869 15076
6559535 3269 6560552 3269
6572555 3269 6573655 3269
6626616 8273 6627779 8273
6634650 8273 6635930 8273
6644202 8273 6645262 8273
6645871 20054 6646917 20054
6664019 20054 6665008 20054
6671959 9594 6672879 9594
6673528 9594 6674594 9594
6689788 9594 6690725 9594
6700759 3407 6702198 3407
6705628 3407 6706530 3407
6731253 3407 6732096 3407
6749738 2327 6750942 2327
6758029 2327 6759258 2327
6783974 17899 6785420 17899
6788821 17899 6790000 17899
6798861 17899 6799699 17899
6804015 17899 6804843 17899
6808549 17899 6809305 17899
6828282 18297 6829369 18297
6838611 9405 6839689 9405
6843405 9405 6844337 9405
6854812 9405 6855999 9405
6854812 9405 6856155 9405
6858357 9405 6859359 9405
6870207 9405 6871195 9405
6876718 15212 6877852 15212
6879719 15212 6880734 15212
6881146 15212 6882284 15212
6906554 10157 6907741 10157
6919438 10157 6920629 10157
6926151 10157 6927656 10157
6929922 2037 6930996 2037
6947421 2037 6948233 2037
6979017 6146 6979845 6146
6981986 6146 6983057 6146
6985238 6146 6986117 6146
7000941 10014 7002136 10014
7005890 10014 7006850 10014
7012050 10014 7013307 10014
7021624 10014 7022586 10014
7021624 10014 7022656 10014
7024858 10014 7026242 10014
7046013 16142 7047334 16142
7051251 16142 7052347 16142
7081825 14388 7082880 14388
7085009 14388 7085760 14388
7085009 14388 7085814 14388
7091094 14388 7092272 14388
7100658 15206 7101575 15206
7109968 15206 7111625 15206
7124748 15206 7125775 15206
7127866 16155 7128956 16155
7150066 16155 7151029 16155
7189433 16977 7190417 16977
7199200 8468 7200265 8468
7207908 8468 7208787 8468
7217446 8468 7218617 8468
7217446 8468 7218623 8468
7220646 8468 7221693 8468
7225595 8468 7226577 8468
7256884 6794 7258192 6794
7267566 6794 7268899 6794
7269356 6794 7270554 6794
7302335 96 7303302 96
7310537 96 7311926 96
7345519 141 7346523 141
7361574 7592 7362854 7592
7368176 7592 7369290 7592
7379686 7592 7380740 7592
7379686 7592 7380788 7592
7382790 7592 7383859 7592
7389524 | |
flag (f), or unflag (u) a location?`")
try:
msg = await self.client.wait_for(
"message", check=lambda m: m.channel == ctx.channel and m.author == ctx.author, timeout=120
)
except TimeoutError:
return await self.gameIdle(ctx, game, True)
decision = msg.content
if decision.casefold() == "quit" or decision.casefold() == "exit" or decision.casefold() == "stop":
return game.revealDots()
if decision.casefold() == "time":
m, s = game.getTime()
return await funcs.sendTime(ctx, m, s)
yy = await self.rowOrCol(ctx, game, True, True)
if yy == "quit" or yy is None:
return
else:
yy = int(yy)
xx = await self.rowOrCol(ctx, game, False, True)
if xx == "quit" or xx is None:
return
else:
xx = int(xx)
if decision.casefold() == "f" or decision.casefold() == "flag":
if game.getDispboard()[yy][xx] != ".":
if game.getDispboard()[yy][xx] == "F":
game.getDispboard()[yy][xx] = "."
else:
await ctx.send(embed=funcs.errorEmbed(None, "This location has already been revealed before."))
else:
game.getDispboard()[yy][xx] = "F"
elif decision.casefold() == "u" or decision.casefold() == "unflag":
if game.getDispboard()[yy][xx] != "F":
await ctx.send(embed=funcs.errorEmbed(None, "This location is not flagged."))
else:
game.getDispboard()[yy][xx] = "."
elif decision.casefold() == "r" or decision.casefold() == "reveal":
if game.getDispboard()[yy][xx] != ".":
if game.getDispboard()[yy][xx] == "F":
await ctx.send("`Watch out, you have previously flagged this location before!`")
else:
await ctx.send(embed=funcs.errorEmbed(None, "This location has already been revealed before."))
else:
game.uncoverDots(xx, yy)
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.command(name="minesweeper", description="Play Minesweeper.", aliases=["ms", "mines"])
async def minesweeper(self, ctx):
if await self.checkGameInChannel(ctx):
return
await ctx.send("**Welcome to Minesweeper. Input `time` to see total elapsed time, or `quit` to quit the game.**")
self.gameChannels.append(ctx.channel.id)
game = games.Minesweeper()
won = False
while not game.getGameEnd():
await ctx.send("```Attempt {:,} for {}. ".format(game.getAttempts() + 1, ctx.author.name) +
f"{game.displayBoard()}```")
await self.gameOptions(ctx, game)
won = game.winLose()
await ctx.send(f"```{game.displayBoard()}```")
m, s = game.getTime()
await ctx.send(
"```You have {} Minesweeper!\n\nTotal attempts: {:,}".format("won" if won else "lost", game.getAttempts()) +
f"\n\nThanks for playing, {ctx.author.name}!```"
)
await funcs.sendTime(ctx, m, s)
self.gameChannels.remove(ctx.channel.id)
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.command(name="battleship", description="Play Battleship.", aliases=["bs", "battleshit"])
async def battleship(self, ctx):
if await self.checkGameInChannel(ctx):
return
await ctx.send("**Welcome to Battleship. Input `time` to see total elapsed time, " +
"or `quit` to quit the game.**")
self.gameChannels.append(ctx.channel.id)
game = games.Battleship()
while game.getShipcount():
await ctx.send(
"```Attempt {:,} for {}. {}```".format(game.getAttempts() + 1, ctx.author.name, game.displayBoard())
)
await ctx.send(
f"`{ctx.author.name} has {game.getShipcount()} ship{'' if game.getShipcount() == 1 else 's'} left to find.`"
)
yy = await self.rowOrCol(ctx, game, True, False)
if yy == "quit":
await ctx.send(f"```{game.displayBoard(True)}```")
break
elif yy is None:
continue
else:
yy = int(yy)
xx = await self.rowOrCol(ctx, game, False, False)
if xx == "quit":
await ctx.send(f"```{game.displayBoard(True)}```")
break
elif xx is None:
continue
else:
xx = int(xx)
await ctx.send(f"`{ctx.author.name} has {game.takeTurn(yy, xx)}.`")
m, s = game.getTime()
await ctx.send(f"```You have {'won' if game.getWonBool() else 'lost'} Battleship!\n\n" +
"Total attempts: {:,}\n\nThanks for playing, {}!```".format(game.getAttempts(), ctx.author.name))
await funcs.sendTime(ctx, m, s)
self.gameChannels.remove(ctx.channel.id)
def playerOneAndTwo(self, ctx, user):
computer1, computer2 = False, False
player1, player2 = None, None
isplayerone = funcs.oneIn(2)
if isplayerone:
player1 = ctx.author
else:
player2 = ctx.author
if user == self.client.user or not user:
if isplayerone:
computer2 = True
else:
computer1 = True
elif user == ctx.author or ctx.guild and not user.bot and user in ctx.guild.members:
if isplayerone:
player2 = user
else:
player1 = user
else:
raise Exception("Invalid user.")
return player1, player2, computer1, computer2
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.command(name="tictactoe", description="Play Tic-Tac-Toe. Mention someone to play with them.",
aliases=["ttt", "tictac", "noughtsandcrosses", "nc"], usage="[@mention]")
async def tictactoe(self, ctx, *, user: User=None):
if await self.checkGameInChannel(ctx):
return
try:
player1, player2, computer1, computer2 = self.playerOneAndTwo(ctx, user)
except Exception as ex:
return await ctx.send(embed=funcs.errorEmbed(None, str(ex)))
self.gameChannels.append(ctx.channel.id)
game = games.TicTacToe(player1=player1, player2=player2)
if player1 == player2:
msg = f"Both **Player 1 ({game.CROSS})** and **Player 2 ({game.NOUGHT})** are {player1.mention}."
else:
msg = f"**Player 1 ({game.CROSS})** is {'me' if computer1 else player1.mention}.\n**Player 2 ({game.NOUGHT}" + \
f")** is {'me' if computer2 else player2.mention}."
await ctx.send(f"**Welcome to Tic-Tac-Toe. Input `quit` to quit the game.**\n\n{msg}")
await ctx.send(funcs.formatting(game.displayBoard(numbers=True)))
if computer1:
game.move(randint(1, 9))
await ctx.send(funcs.formatting(game.displayBoard()))
while game.getEmptySlots():
currentPlayer = game.getCurrentPlayer()
await ctx.send(f"**[{currentPlayer.getPlayerType()}]** `It is {currentPlayer.getPlayer().name}'s turn! " +
"Please select a slot number between 1-9.`")
try:
move = await self.client.wait_for(
"message", check=lambda m: m.channel == ctx.channel and m.author == currentPlayer.getPlayer(), timeout=120
)
except TimeoutError:
await ctx.send(f"`{currentPlayer.getPlayer().name} has left Tic-Tac-Toe for idling for too long. Game over!`")
break
if move.content.casefold() == "quit":
await ctx.send(f"`{currentPlayer.getPlayer().name} has left Tic-Tac-Toe. Game over!`")
break
try:
game.move(move.content)
except Exception as ex:
await ctx.send(embed=funcs.errorEmbed(None, str(ex)))
continue
await ctx.send(funcs.formatting(game.displayBoard()))
if game.getWinner():
try:
winner = game.getWinner().getPlayer().name + " wins"
except:
winner = "I win"
await ctx.send(f"**[{game.getWinner().getPlayerType()}]** `{winner}! Game over!`")
break
if not game.getEmptySlots() and not game.getWinner():
await ctx.send("`Draw! Game over!`")
m, s = game.getTime()
await funcs.sendTime(ctx, m, s)
self.gameChannels.remove(ctx.channel.id)
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.command(name="connectfour", description="Play Connect Four. Mention someone to play with them.",
aliases=["c4", "connect4", "4inarow", "fourinarow", "4", "bitconnect", "four"], usage="[@mention]")
async def connectfour(self, ctx, *, user: User=None):
if await self.checkGameInChannel(ctx):
return
try:
player1, player2, computer1, computer2 = self.playerOneAndTwo(ctx, user)
except Exception as ex:
return await ctx.send(embed=funcs.errorEmbed(None, str(ex)))
self.gameChannels.append(ctx.channel.id)
game = games.ConnectFour(player1=player1, player2=player2)
if player1 == player2:
msg = f"Both **Player 1 ({game.RED})** and **Player 2 ({game.YELLOW})** are {player1.mention}."
else:
msg = f"**Player 1 ({game.RED})** is {'me' if computer1 else player1.mention}.\n**Player 2 ({game.YELLOW}" + \
f")** is {'me' if computer2 else player2.mention}."
await ctx.send(f"**Welcome to Connect Four. Input `quit` to quit the game.**\n\n{msg}")
if computer1:
game.insert(randint(1, 7))
await ctx.send(embed=Embed(title="Connect Four", description=game.displayBoard()))
while game.getEmptySlots():
currentPlayer = game.getCurrentPlayer()
await ctx.send(f"**[{currentPlayer.getPlayerType()}]** `It is {currentPlayer.getPlayer().name}'s turn! " +
"Please select a column number between 1-7.`")
try:
move = await self.client.wait_for(
"message", check=lambda m: m.channel == ctx.channel and m.author == currentPlayer.getPlayer(), timeout=120
)
except TimeoutError:
await ctx.send(f"`{currentPlayer.getPlayer().name} has left Connect Four for idling for too long. Game over!`")
break
if move.content.casefold() == "quit":
await ctx.send(f"`{currentPlayer.getPlayer().name} has left Connect Four. Game over!`")
break
try:
game.insert(move.content)
except Exception as ex:
await ctx.send(embed=funcs.errorEmbed(None, str(ex)))
continue
await ctx.send(embed=Embed(title="Connect Four", description=game.displayBoard()))
if game.getWinner():
try:
winner = game.getWinner().getPlayer().name + " wins"
except:
winner = "I win"
await ctx.send(f"**[{game.getWinner().getPlayerType()}]** `{winner}! Game over!`")
break
if not game.getEmptySlots() and not game.getWinner():
await ctx.send("`Draw! Game over!`")
m, s = game.getTime()
await funcs.sendTime(ctx, m, s)
self.gameChannels.remove(ctx.channel.id)
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.command(name="rps", description="Play Rock Paper Scissors.", hidden=True,
aliases=["rockpaperscissors", "rsp", "psr", "prs", "srp", "spr"])
async def rps(self, ctx):
if await self.checkGameInChannel(ctx):
return
self.gameChannels.append(ctx.channel.id)
while True:
aic = choice(["Rock", "Paper", "Scissors"])
aiclist = ["Scissors", "Rock", "Paper"] if aic == "Rock" \
else ["Rock", "Paper", "Scissors"] if aic == "Paper" \
else ["Paper", "Scissors", "Rock"]
listindex = aiclist.index(aic)
await ctx.send("`Rock, paper, or scissors?`")
try:
msg = await self.client.wait_for(
"message", check=lambda m: m.channel == ctx.channel and m.author == ctx.author, timeout=60
)
except TimeoutError:
self.gameChannels.remove(ctx.channel.id)
return await ctx.send(f"`{ctx.author.name} has feft Rock Paper Scissors for idling for too long.`")
if msg.content.casefold().startswith(("r", "p", "s")):
answer = "Rock" if msg.content.casefold().startswith("r") else "Paper" \
if msg.content.casefold().startswith("p") else "Scissors"
else:
await ctx.send(embed=funcs.errorEmbed(None, "Invalid input."))
continue
await ctx.send(f"`{self.client.user.name} chose: {aic}`")
if answer == aic:
await ctx.send(f"`It's a tie! {ctx.author.name} gets to play again.`")
continue
else:
getindex = aiclist.index(answer)
await ctx.send(f"`{ctx.author.name} has {'lost' if getindex < listindex else 'won'} Rock Paper Scissors!`")
self.gameChannels.remove(ctx.channel.id)
return
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.command(name="hangman", description="Play Hangman.", aliases=["hm", "hangyourself", "hang"])
async def hangman(self, ctx):
if await self.checkGameInChannel(ctx):
return
await ctx.send(
"**Welcome to Hangman. You have 10 lives. Guess one letter at a time. " +
"Input `lives` to see how many lives you have left, `time` to see total elapsed time, or `quit` to quit the game.**"
)
self.gameChannels.append(ctx.channel.id)
game = games.Hangman(self.client)
await sleep(1)
while game.getLives() and not game.getDashes() == game.getWord():
await ctx.send(f"```{ctx.author.name}'s word:\n\n{game.getDashes()}```")
await ctx.send("`Please guess a letter.`")
try:
guess = await self.client.wait_for(
"message", check=lambda m: m.channel == ctx.channel and m.author == ctx.author, timeout=120
)
except TimeoutError:
await ctx.send(f"`{ctx.author.name} has left Hangman for idling for too long.`")
self.gameChannels.remove(ctx.channel.id)
return await ctx.send(f"`{ctx.author.name}'s word was {game.getWord()}.`")
content = guess.content
if len(content) != 1:
if content.casefold() == "quit" or content.casefold() == "exit" or content.casefold() == "stop":
await ctx.send(f"`{ctx.author.name} has left Hangman.`")
self.gameChannels.remove(ctx.channel.id)
return await ctx.send(f"`{ctx.author.name}'s word was {game.getWord()}.`")
elif content.casefold().startswith("live"):
lives = game.getLives()
await ctx.send(f"`{ctx.author.name} has {game.getLives()} live{'s' if lives!=1 else ''} left.`")
elif content.casefold().startswith("time"):
m, s = game.getTime()
await funcs.sendTime(ctx, m, s)
| |
shared_lib_paths is not None else []) + (import_lib_paths if import_lib_paths is not None else [])
if len(combined_paths) > 0:
self.apply_lib_based_uselib(pdb_filenames, combined_paths, "PDB", uselib_env_name, lib_source_path, False, True)
copy_extras = self.get_most_specific_entry("copy_extra", uselib_name, lib_configuration)
if copy_extras is not None:
copy_extras_key = '.'.join(copy_extras)
if copy_extras_key in copy_extra_cache:
copy_extra_base = copy_extra_cache[copy_extras_key]
uselib_env_key = 'COPY_EXTRA_{}'.format(uselib_env_name)
self.apply_env_func(uselib_env_key, '@{}'.format(copy_extra_base))
else:
copy_extra_cache[copy_extras_key] = uselib_env_name
self.apply_copy_extra_values(lib_source_path, copy_extras, uselib_env_name)
def get_most_specific_entry(self,
key_base_name,
uselib_name,
lib_configuration,
fail_if_missing=False):
"""
Attempt to get the most specific entry list based on platform and/or configuration for a library.
:param key_base_name: Base name of the entry to lookup
:param uselib_name: The name of the uselib this entry that is being looked up
:param lib_configuration: Library configuration
:param fail_if_missing: Raise a RuntimeError if the value is missing, otherwise its an optional existence check
:return: The requested entry value list if not, empty string if not
"""
entry = None
platform_node = None
# Platform specific nodes are optional provided as long as we can eventually determine the value
if "platform" in self.lib_root:
# Has list of platforms in the config
platform_root_node = self.lib_root["platform"]
# Has the specific platform
if self.processed_platform_key in platform_root_node:
platform_node = platform_root_node[self.processed_platform_key]
uselib_name_specific_entry_key = "{}/{}".format(uselib_name, key_base_name)
configuration_specific_entry_key = "{}_{}".format(key_base_name, lib_configuration)
uselib_name_and_configuration_specific_entry_key = "{}/{}_{}".format(uselib_name, key_base_name, lib_configuration)
entry_keys = [
key_base_name,
uselib_name_specific_entry_key,
configuration_specific_entry_key,
uselib_name_and_configuration_specific_entry_key,
]
version_options = self.get_platform_version_options()
if version_options:
entry_keys_master = entry_keys[:]
entry_keys = []
for key in entry_keys_master:
entry_keys.append(key)
for version in version_options:
version_key = "{}/{}".format(key, version)
entry_keys.append(version_key)
# reverse the key order so the most specific is found first
entry_keys.reverse()
# Check against the value at the root level
for key in entry_keys:
if key in self.lib_root:
entry = self.lib_root[key]
break
# Check against the value at the platform level
if platform_node is not None:
for key in entry_keys:
if key in platform_node:
entry = platform_node[key]
break
# Fix up the entry if its not in a list
if entry is not None and not isinstance(entry, list):
entry = [entry]
# Validate if the entry was found or not
if entry is None:
if fail_if_missing:
self.raise_config_error("Cannot determine {} for 3rd Party library {} and platform {}".format(key_base_name, self.lib_key, self.platform_key))
return entry
else:
# Apply any alias if the entry implies an alias
vetted_list = []
for single_entry in entry:
if isinstance(single_entry, str):
alias_match = ALIAS_SEARCH_PATTERN.search(single_entry)
if alias_match is not None:
# This contains an aliased value
alias_key = alias_match.group(2)
if alias_key not in self.name_alias_map:
raise RuntimeError("Invalid alias key {} for 3rd Party library {} and platform {}".format(alias_key, key_base_name, self.lib_key, self.platform_key))
aliased_names = self.name_alias_map[alias_key] if isinstance(self.name_alias_map[alias_key],list) else [self.name_alias_map[alias_key]]
for aliased_name in aliased_names:
updated_name = ALIAS_SEARCH_PATTERN.sub(aliased_name, single_entry)
vetted_list.append(updated_name)
else:
# This is a normal value
vetted_list.append(single_entry)
else:
vetted_list.append(single_entry)
return vetted_list
def apply_general_uselib_values(self, uselib_env_name, values, uselib_var, is_file_path, path_prefix, configuration, platform_config_lib_map):
"""
Given a list of values for a library uselib variable, apply it optionally as a file (with validation) or a value
:param uselib_env_name: The uselib ENV environment key header (i.e. FOO for FOO_DEFINES, FOO_LIBATH)
:param values: List of values to apply
:param uselib_var: The uselib name (DEFINES, LIBPATH, etc)
:param is_file_path: Is this a filepath? (vs value). Adds extra path validation if it is
:param path_prefix: If this is a file, then the path prefix is used to represent the full path prefix
:param configuration: The configuration for value being applied
:param platform_config_lib_map: Optional platform configuration alias map
"""
if values is None:
return
for value in values:
if is_file_path and path_prefix is not None:
if value == '.':
# Single dot means current dir relative to the path prefix
apply_value = path_prefix
else:
apply_value = self.apply_optional_path_alias(value, path_prefix)
elif configuration is not None and platform_config_lib_map is not None and '%LIBPATH' in value:
apply_value = self.process_value_libpath(configuration, value, platform_config_lib_map)
else:
apply_value = value
if is_file_path and not self.ctx.cached_does_path_exist(apply_value):
self.raise_config_error("Invalid/missing {} value '{}'".format(uselib_var, apply_value))
uselib_env_key = '{}_{}'.format(uselib_var, uselib_env_name)
if self.warn_on_collision and self.get_env_func(uselib_env_key):
if Logs.verbose > 1:
Logs.warn('[WARN] 3rd party uselib collision on key {}. '
'The previous value ({}) will be overridden with ({})'.format(uselib_env_key, self.get_env_func(uselib_env_key), apply_value))
else:
Logs.warn('[WARN] 3rd party uselib collision on key {}. The previous value will be overridden'.format(uselib_env_key))
self.apply_env_func(uselib_env_key, apply_value)
def apply_uselib_values_file_path(self, uselib_env_name, values, uselib_var, path_prefix=None, configuration=None, platform_config_lib_map=None):
"""
Given a list of filepath related values for a library uselib variable, apply it optionally as a file (with validation) or a value
:param uselib_env_name: The uselib ENV environment key header (i.e. FOO for FOO_DEFINES, FOO_LIBATH)
:param values: List of values to apply
:param uselib_var: The uselib name (LIBPATH, etc)
:param path_prefix: If this is a file, then the path prefix is used to represent the full path prefix
:param configuration: The configuration for value being applied to
:param platform_config_lib_map: Optional platform configuration alias map
"""
self.apply_general_uselib_values(uselib_env_name, values, uselib_var, True, path_prefix, configuration, platform_config_lib_map)
def apply_uselib_values_general(self, uselib_env_name, values, uselib_var, configuration=None, platform_config_lib_map=None):
"""
Given a list of values for a library uselib variable, apply it optionally as a value
:param uselib_env_name: The uselib ENV environment key header (i.e. FOO for FOO_DEFINES, FOO_LIBATH)
:param values: List of values to apply
:param uselib_var: The uselib name (DEFINES, LIB, etc)
:param configuration: The configuration for value being applied to
:param platform_config_lib_map: Optional platform configuration alias map
"""
self.apply_general_uselib_values(uselib_env_name, values, uselib_var, False, None, configuration, platform_config_lib_map)
def apply_lib_based_uselib(self, lib_filenames, lib_paths, uselib_var, uselib_env_name, lib_source_path, is_required, fullpath, configuration=None, platform_config_lib_map=None):
"""
Given a list of lib filenames, validate and apply the uselib value LIB_ against the library and platform
:param lib_filenames: The list of file names from the library configuration
:param lib_paths: The list of library paths from the library configuration
:param uselib_var: The uselib variable to apply to
:param uselib_env_name: The uselib environment to apply the uselib variable to
:param lib_source_path: The root path to the library in case any of the lib paths do not use any aliases
:param is_required: Flag to validate if the key is required or not
:param fullpath: Is the path a full path (vs relative)
:param configuration: Configuration to use to build the valid library full path dictionary (platform_config_lib_map)
:param platform_config_lib_map: Map to collect the library full paths based on a compound key of ${}_${}
:return:
"""
if lib_filenames is None:
return
for lib_filename in lib_filenames:
uselib_key = "{}_{}".format(uselib_var, uselib_env_name)
# Validate that the file exists
lib_found_fullpath = None
for lib_path in lib_paths:
check_base_path = self.apply_optional_path_alias(lib_path, lib_source_path)
lib_file_path = os.path.normpath(os.path.join(check_base_path, lib_filename))
if self.ctx.cached_does_path_exist(lib_file_path):
lib_found_fullpath = lib_file_path
break
if lib_filename.endswith('.so'):
# Special case: If we can't find an .so, then check for any versioned .so. There should be post-processing
# during configure to establish a symlink-connection
for check in os.listdir(check_base_path):
if fnmatch.fnmatch(check, '{}.*'.format(lib_filename)):
# We found a potential match, but make sure its not a '.a' file since clang will link to that if its present
if os.path.splitext(check)[1] != '.a':
lib_found_fullpath = lib_file_path
self.ctx.warn_once("[WARN] shared library {} not found, but found '{}' instead. Assuming that this is the correct version.".format(lib_filename, check))
break
if lib_found_fullpath:
break
if lib_found_fullpath is None:
if is_required:
self.raise_config_error("Invalid/missing library file '{}'".format(lib_filename))
else:
continue
if configuration is not None and platform_config_lib_map is not None:
library_plat_conf_key = '{}_{}'.format(self.processed_platform_key, configuration)
if library_plat_conf_key not in platform_config_lib_map:
platform_config_lib_map[library_plat_conf_key] = [lib_found_fullpath]
else:
platform_config_lib_map[library_plat_conf_key].append(lib_found_fullpath)
# the windows linker is different in the sense it only takes in .libs regardless of the library type
if uselib_var == 'LIB':
if self.platform_key.startswith('xenia'):
import_lib_path = "{}.lib".format(os.path.splitext(lib_file_path)[0])
if not self.ctx.cached_does_path_exist(import_lib_path):
continue
if self.platform_key.startswith('win_x64'):
import_lib_path = "{}.lib".format(os.path.splitext(lib_file_path)[0])
if not self.ctx.cached_does_path_exist(import_lib_path):
continue
if fullpath:
self.apply_env_func(uselib_key, lib_found_fullpath)
else:
lib_parts = os.path.splitext(lib_filename)
lib_name = trim_lib_name(self.processed_platform_key, lib_parts[0])
self.apply_env_func(uselib_key, lib_name)
def apply_framework(self, uselib_env_name, frameworks):
"""
Apply a framework value to the env (darwin)
:param uselib_env_name: The uselib env name
:param frameworks: The framework list
"""
if frameworks is None:
return
framework_key = 'FRAMEWORK_{}'.format(uselib_env_name)
for framework in frameworks:
self.apply_env_func(framework_key, framework)
def apply_copy_extra_values(self, src_prefix, values, uselib_env_name):
"""
Apply a custom copy_extra value to the env
:param src_prefix: Prefix (path) to the source file(s) to copy
:param values: The list of filenames to copy
:param uselib_env_name: The uselib env | |
(eV)')
#ax.set_title(r'$T_2 = $' + str(t2))
# plt.colorbar(im)
# fig.subplots_adjust(hspace=0.0,wspace=0.0,bottom=0.14,left=0.0,top=0.95,right=0.98)
return S
def DQC_R1(evals, dip, omega1=None, omega2=[], omega3=None, tau1=None, tau3=None,\
g_idx=[0], e_idx=None, f_idx=None, gamma=None):
'''
Double quantum coherence, diagram 1:
gg -> eg -> fg -> fe' -> e'e' in the impulsive limit.
The signal wave vector is ks = k1 + k2 - k3
Parameters
----------
evals : TYPE
DESCRIPTION.
dip : TYPE
DESCRIPTION.
omega3 : TYPE
DESCRIPTION.
t2 : TYPE
DESCRIPTION.
omega1 : TYPE
DESCRIPTION.
g_idx: list of integers
indexes for ground manifold
e_idx: list of integers
indexes for excited state manifold
Returns
-------
chi : TYPE
DESCRIPTION.
'''
a = 0
if omega3 is None and tau3 is not None:
signal = np.zeros((len(omega1), len(omega2)), dtype=complex)
for i in range(len(omega1)):
pump = omega1[i]
for j in range(len(omega2)):
probe = omega2[j]
# sum-over-states
for b in e_idx:
G_ba = 1./(probe - (evals[b]-evals[a]) + 1j * (gamma[b] + gamma[a])/2.0)
for c in f_idx:
G_ca = 1./(probe - (evals[c]-evals[a]) + 1j * (gamma[c] + gamma[a])/2.0)
for d in e_idx:
U_cd = -1j * np.exp(-1j * (evals[c] - evals[d]) * tau3 - (gamma[c] + gamma[d])/2. * tau3)
signal[i,j] += dip[b, a] * dip[c,b] * dip[d,a]* dip[d,c] * \
G_ba * G_ca * U_cd
elif omega1 is None and tau1 is not None:
signal = np.zeros((len(omega2), len(omega3)), dtype=complex)
for i in range(len(omega2)):
pump = omega2[i]
for j in range(len(omega3)):
probe = omega3[j]
# sum-over-states
for b in e_idx:
U_ba = -1j * np.exp(-1j * (evals[b] - evals[a]) * tau1 - (gamma[b] + gamma[a])/2. * tau1)
for c in f_idx:
G_ca = 1./(pump - (evals[c]-evals[a]) + 1j * (gamma[c] + gamma[a])/2.0)
for d in e_idx:
G_cd = 1./(probe - (evals[c]-evals[d]) + 1j * (gamma[c] + gamma[d])/2.0)
signal[i,j] += dip[b, a] * dip[c,b] * dip[d,a]* dip[d,c] * \
U_ba * G_ca * G_cd
# one interaction in the bra side
sign = -1
return sign * signal
def DQC_R2(evals, dip, omega1=None, omega2=[], omega3=None, tau1=None, tau3=None,\
g_idx=[0], e_idx=None, f_idx=None, gamma=None):
'''
Double quantum coherence, diagram 2:
gg -> eg -> fg -> eg -> gg in the impulsive limit.
The signal wave vector is ks = k1 + k2 - k3
Parameters
----------
evals : TYPE
DESCRIPTION.
dip : TYPE
DESCRIPTION.
omega1 : TYPE, optional
DESCRIPTION. The default is None.
omega2 : TYPE, optional
DESCRIPTION. The default is [].
omega3 : TYPE, optional
DESCRIPTION. The default is None.
tau1 : TYPE, optional
DESCRIPTION. The default is None.
tau3 : TYPE, optional
DESCRIPTION. The default is None.
g_idx : TYPE, optional
DESCRIPTION. The default is [0].
e_idx : TYPE, optional
DESCRIPTION. The default is None.
f_idx : TYPE, optional
DESCRIPTION. The default is None.
gamma : TYPE, optional
DESCRIPTION. The default is None.
Raises
------
Exception
DESCRIPTION.
Returns
-------
signal : TYPE
DESCRIPTION.
'''
a = 0
if omega3 is None and tau3 is not None:
signal = np.zeros((len(omega1), len(omega2)), dtype=complex)
for i in range(len(omega1)):
pump = omega1[i]
for j in range(len(omega2)):
probe = omega2[j]
# sum-over-states
for b in e_idx:
G_ba = 1./(pump - (evals[b]-evals[a]) + 1j * (gamma[b] + gamma[a])/2.0)
for c in f_idx:
G_ca = 1./(probe - (evals[c]-evals[a]) + 1j * (gamma[c] + gamma[a])/2.0)
for d in e_idx:
U_da = -1j * np.exp(-1j * (evals[d] - evals[a]) * tau3 - (gamma[d] + gamma[a])/2. * tau3)
signal[i,j] += dip[b, a] * dip[c,b] * dip[d,c]* dip[a,d] * \
G_ba * G_ca * U_da
elif omega1 is None and tau1 is not None:
signal = np.zeros((len(omega2), len(omega3)), dtype=complex)
for i in range(len(omega2)):
pump = omega2[i]
for j in range(len(omega3)):
probe = omega3[j]
# sum-over-states
for b in e_idx:
U_ba = np.exp(-1j * (evals[b] - evals[a]) * tau1 - (gamma[b] + gamma[a])/2. * tau1)
for c in f_idx:
G_ca = 1./(pump - (evals[c]-evals[a]) + 1j * (gamma[c] + gamma[a])/2.0)
for d in e_idx:
G_da = 1./(probe - (evals[d]-evals[a]) + 1j * (gamma[d] + gamma[a])/2.0)
signal[i,j] += dip[b, a] * dip[c,b] * dip[d,c]* dip[a,d] * \
U_ba * G_ca * G_da
else:
raise Exception('Input Error! Please specify either omega1, tau3 or omega3, tau1.')
# positive sign due to 0 interactions at the bra side
sign = 1
return sign * signal
# def spontaneous_photon_echo(E, dip, pump, probe, tau2=0.0, normalize=True):
# """
# Compute the spontaneous photon echo signal.
# Parameters
# ----------
# E : TYPE
# DESCRIPTION.
# dip : TYPE
# DESCRIPTION.
# pump: 1d array
# pump frequency of the first pulse
# probe: 1d array
# probe frequency of the third pulse
# tau2: float
# time-delay between the second and third pulses. The default is 0.0.
# Returns
# -------
# None.
# """
# signal = np.zeros((len(pump), len(probe)))
# for i in range(len(pump)):
# for j in range(len(probe)):
# signal[i,j] = response2_freq(E, dip, probe[j], tau2, pump[i]) + \
# response3_freq(E, dip, probe[j], tau2, pump[i])
# if normalize:
# signal /= abs(signal).max() # normalize
# return signal
def etpa(omegaps, mol, epp, g_idx, e_idx, f_idx):
"""
ETPA signal with temporal modes (TMs).
The JSA is reconstructed with the TMs first.
Parameters
----------
omegaps : TYPE
DESCRIPTION.
g_idx : TYPE
DESCRIPTION.
e_idx : TYPE
DESCRIPTION.
f_idx : TYPE
DESCRIPTION.
Returns
-------
signal : TYPE
DESCRIPTION.
"""
Es = mol.eigenenergies()
edip = mol.edip
# joint temporal amplitude
t1, t2, jta = epp.get_jta()
return _etpa(omegaps, Es, edip, jta, t1, t2, g_idx, e_idx, f_idx)
# @jit
def _etpa(omegaps, Es, edip, jta, t1, t2, g_idx, e_idx, f_idx):
"""
internal function to compute the ETPA signal.
The double time integrals are computed numerically.
Parameters
----------
omegaps: pump center frequencies
Es: eigenenergies
edip: electric dipole operator
jta: 2d array
joint temporal amplitude
t1: 1d array
t2: 1d array
g_idx: ground-state manifold
e_idx: intermediate states
f_idx: final states
Returns
-------
signal: 1d array
"""
# setup the temporal grid
T1, T2 = np.meshgrid(t1, t2)
# discrete heaviside function
theta = heaviside(T2 - T1, 0.5)
signal = np.zeros(len(omegaps), dtype=complex)
g = g_idx
for j, omegap in enumerate(omegaps): # loop over pump frequencies
omega1 = omega2 = omegap/2.
for f in f_idx: # loop over final states
for e in e_idx:
detuning2 = Es[f] - Es[e] - omega2
detuning1 = Es[e] - Es[g] - omega1
D = edip[e, g] * edip[f, e]
signal[j] += D * np.sum(theta * np.exp(1j * detuning2 * T2 +
1j * detuning1 * T1) * jta)
detuning2 = Es[f] - Es[e] - omega1
detuning1 = Es[e] - Es[g] - omega2
signal[j] += D * np.sum(theta * np.exp(1j * detuning2 * T2 +
1j * detuning1 * T1) * jta.T)
return signal
def test_etpa():
epp = Biphoton(0, 0.04/au2ev, Te=10./au2fs)
p = np.linspace(-4, 4, 256)/au2ev
q = p
epp.set_grid(p, q)
epp.get_jsa()
# epp.plt_jsa()
pump = np.linspace(0.5, 1.5, 100)/au2ev
signal = etpa(pump, mol, epp, [0], [1, 2, 3], [2, 3])
fig, ax = subplots()
ax.plot(pump*au2ev, np.abs(signal)**2)
plt.show()
return
def cars(E, edip, shift, omega1, t2=0, gamma=10/au2mev):
'''
two pump pulses followed by a stimulated raman probe.
The first, second, and fourth pulses are assumed impulse,
the thrid pulse is cw.
S = \sum_{b,a = e} 2 * pi * delta(Eshift - omega_{ba}) * mu_{bg} *
mu_{ag} * alpha_{ba}
Parameters
----------
E : TYPE
DESCRIPTION.
edip : TYPE
DESCRIPTION.
t1 : TYPE
time decay between pulses 1 and 2
t2 : TYPE, optional
time delay between pulse 2 and 4 (stokes beam). The default is 0.
Returns
-------
S : TYPE
DESCRIPTION.
'''
N = len(E)
g = 0
S = 0
alpha = np.ones((N, N))
np.fill_diagonal(alpha, 0)
for a in range(1, N):
for b in range(1, N):
S += edip[b, g] * edip[a, g] * alpha[b, a] * np.outer(lorentzian(shift - (E[b] - E[a]), gamma), \
1./(omega1 - (E[a] - E[g]) + 1j * gamma))
return S
def mcd(mol, omegas):
'''
magentic circular dichroism signal with SOS
The electronic structure data should contain the B field ready,
not the bare quantities.
B = (0, 0, Bz)
Reference:
<NAME>, <NAME>, and <NAME>, JCTC, 2019, 15, 3162-3169
Parameters
----------
mol : TYPE
DESCRIPTION.
omegas : TYPE
DESCRIPTION.
Returns
-------
signal : TYPE
| |
<< 8)
self.log_load(address, data, 'half')
self.check_address(address)
return data
def load_byte(self, address):
self.flash_prefetch_hint(address)
self.local_addresses.seek(address)
if self.local_addresses.read(1) == '\xff':
self.local_data.seek(address)
return ord(self.local_data.read(1))
self.flush()
data = self.device.peek_byte(address)
self.log_load(address, data, 'byte')
self.check_address(address)
return data
def store(self, address, data):
self.local_addresses.seek(address)
if self.local_addresses.read(4) == '\xff\xff\xff\xff':
self.local_data.seek(address)
self.local_data.write(struct.pack('<I', data))
return
if address in self.skip_stores:
self.log_store(address, data,
message='(skipped: %s)'% self.skip_stores[address])
return
self.post_rle_store(*self.rle.write(address, data, 4))
def store_half(self, address, data):
self.local_addresses.seek(address)
if self.local_addresses.read(2) == '\xff\xff':
self.local_data.seek(address)
self.local_data.write(struct.pack('<H', data))
return
if address in self.skip_stores:
self.log_store(address, data,
message='(skipped: %s)'% self.skip_stores[address])
return
self.post_rle_store(*self.rle.write(address, data, 2))
def store_byte(self, address, data):
self.local_addresses.seek(address)
if self.local_addresses.read(1) == '\xff':
self.local_data.seek(address)
self.local_data.write(chr(data))
return
if address in self.skip_stores:
self.log_store(address, data,
message='(skipped: %s)'% self.skip_stores[address])
return
self.post_rle_store(*self.rle.write(address, data, 1))
def fetch(self, address, thumb):
try:
return self.instructions[thumb | (address & ~1)]
except KeyError:
self.check_address(address)
self._load_instruction(address, thumb)
return self.instructions[thumb | (address & ~1)]
def _load_instruction(self, address, thumb):
self.flush()
block_size = self.flash_prefetch_hint(address)
assert block_size >= 8
self.local_data.seek(address)
data = self.local_data.read(block_size)
lines = disassembly_lines(disassemble_string(data, address, thumb=thumb))
self._load_assembly(address, lines, thumb=thumb)
def _load_assembly(self, address, lines, thumb):
# NOTE: Requires an extra instruction of padding at the end
for i in range(len(lines) - 1):
instr = lines[i]
instr.next_address = lines[i+1].address
addr = thumb | (lines[i].address & ~1)
instr.hle = self.patch_hle.get(addr)
if addr not in self.instructions:
self.instructions[addr] = instr
def hle_init(self, code_address = pad):
"""Install a C++ library to handle high-level emulation operations
"""
self.hle_symbols = compile_library(self.device, code_address, self.hle_handlers)
print "* Installed High Level Emulation handlers at %08x" % code_address
def hle_invoke(self, instruction, r0):
"""Invoke the high-level emulation operation for an instruction
Captures console output to the log.
"""
cb = ConsoleBuffer(self.device)
cb.discard()
r0, _ = self.device.blx(self.hle_symbols[instruction.hle], r0)
logdata = cb.read(max_round_trips = None)
# Prefix log lines, normalize trailing newline
logdata = '\n'.join([ 'HLE: ' + l for l in logdata.rstrip().split('\n') ]) + '\n'
sys.stdout.write(logdata)
if self.logfile:
self.logfile.write(logdata)
return r0
class SimARM(object):
"""Main simulator class for the ARM subset we support in %sim
Registers are available at regs[], call step() to single-step. Uses the
provided memory manager object to handle load(), store(), and fetch().
The lightweight CPU state is available as a dictionary property 'state'.
Full local state including local memory can be stored with save_state().
"""
def __init__(self, memory):
self.memory = memory
self.reset(0)
# Register lookup
self.reg_names = ('r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7',
'r8', 'r9', 'r10', 'r11', 'r12', 'sp', 'lr', 'pc')
self.alt_names = ('a1', 'a2', 'a3', 'a4', 'v1', 'v2', 'v3', 'v4',
'v5', 'sb', 'sl', 'fp', 'ip', 'r13', 'r14', 'r15')
self.reg_numbers = {}
for i, name in enumerate(self.reg_names): self.reg_numbers[name] = i
for i, name in enumerate(self.alt_names): self.reg_numbers[name] = i
# Initialize ldm/stm variants
for memop in ('ld', 'st'):
for mode in ('', 'ia', 'ib', 'da', 'db', 'fd', 'fa', 'ed', 'ea'):
self._generate_ldstm(memop, mode)
# Initialize condition codes
for name in self.__class__.__dict__.keys():
if name.startswith('op_'):
self._generate_condition_codes(getattr(self, name), name + '%s')
self.memory.hle_init()
def reset(self, vector):
self.regs = [0] * 16
self.thumb = vector & 1
self.cpsrV = False
self.cpsrC = False
self.cpsrZ = False
self.cpsrN = False
self.regs[15] = vector & 0xfffffffe
self.regs[14] = 0xffffffff
self.step_count = 0
_state_fields = ('thumb', 'cpsrV', 'cpsrC', 'cpsrZ', 'cpsrN', 'step_count')
@property
def state(self):
d = {}
for name in self._state_fields:
d[name] = getattr(self, name)
d['regs'] = self.regs[:]
return d
@state.setter
def state(self, value):
for name in self._state_fields:
setattr(self, name, value[name])
self.regs[:] = value['regs']
def save_state(self, filebase):
"""Save state to disk, using files beginning with 'filebase'"""
self.memory.save_state(filebase)
with open(filebase + '.core', 'w') as f:
json.dump(self.state, f)
def load_state(self, filebase):
"""Load state from save_state()"""
self.memory.load_state(filebase)
with open(filebase + '.core', 'r') as f:
self.state = json.load(f)
def step(self, repeat = 1, breakpoint = None):
"""Step the simulated ARM by one or more instructions
Stops when the repeat count is exhausted or we hit a breakpoint.
"""
regs = self.regs
while repeat > 0:
repeat -= 1
self.step_count += 1
hook = self.memory.hooks.get(regs[15], None)
instr = self.memory.fetch(regs[15], self.thumb)
self._branch = None
if self.thumb:
regs[15] = (instr.next_address + 3) & ~3
else:
regs[15] += 8
try:
# The op_ function does some precalculation and returns a function that
# actually runs the operation. We cache the latter function.
try:
opfunc = instr.opfunc
except AttributeError:
opfunc = instr.opfunc = getattr(self, 'op_' + instr.op.split('.', 1)[0])(instr)
opfunc()
regs[15] = self._branch or instr.next_address
if regs[15] == breakpoint:
return
except:
# If we don't finish, point the PC at that instruction
regs[15] = instr.address
raise
if instr.hle:
regs[0] = self.memory.hle_invoke(instr, regs[0])
if hook:
# Hooks can do anything including reentrantly step()'ing
hook(self)
def get_next_instruction(self):
return self.memory.fetch(self.regs[15], self.thumb)
def flags_string(self):
return ''.join([
'-N'[self.cpsrN],
'-Z'[self.cpsrZ],
'-C'[self.cpsrC],
'-V'[self.cpsrV],
'-T'[self.thumb],
])
def summary_line(self):
up_next = self.get_next_instruction()
return "%s %s >%08x %5s %-8s %s" % (
str(self.step_count).rjust(12, '.'),
self.flags_string(),
up_next.address,
self.memory.note(up_next.address),
up_next.op,
up_next.args)
def register_trace(self):
parts = []
for y in range(4):
for x in range(4):
i = y + x*4
parts.append('%4s=%08x' % (self.reg_names[i], self.regs[i]))
parts.append('\n')
return ''.join(parts)
def register_trace_line(self, count=15):
return ' '.join('%s=%08x' % (self.reg_names[i], self.regs[i]) for i in range(count))
def copy_registers_from(self, ns):
self.regs = [ns.get(n, 0) for n in self.reg_names]
def copy_registers_to(self, ns):
for i, name in enumerate(self.reg_names):
ns[name] = self.regs[i]
def _generate_ldstm(self, memop, mode):
impl = mode or 'ia'
# Stack mnemonics
if memop == 'st' and mode =='fd': impl = 'db'
if memop == 'st' and mode =='fa': impl = 'ib'
if memop == 'st' and mode =='ed': impl = 'da'
if memop == 'st' and mode =='ea': impl = 'ia'
if memop == 'ld' and mode =='fd': impl = 'ia'
if memop == 'ld' and mode =='fa': impl = 'da'
if memop == 'ld' and mode =='ed': impl = 'ib'
if memop == 'ld' and mode =='ea': impl = 'db'
if impl == 'ib': pre = +4
elif impl == 'db': pre = -4
else : pre = 0
if impl == 'ia': post = +4
elif impl == 'da': post = -4
else : post = 0
def op_fn(i):
left, right = i.args.split(', ', 1)
assert right[0] == '{'
writeback = left.endswith('!')
left = self.reg_numbers[left.strip('!')]
regs = right.strip('{}').split(', ')
if memop =='st':
src_rn = [self.reg_numbers[n] for n in regs]
def fn():
addr = self.regs[left]
for rn in src_rn:
addr += pre
self.memory.store(addr, self.regs[rn])
addr += post
if writeback:
self.regs[left] = addr
return fn
else:
dst_funcs = [self._dstpc(n) for n in regs]
def fn():
addr = self.regs[left]
for dF in dst_funcs:
addr += pre
dF(self.memory.load(addr))
addr += post
if writeback:
self.regs[left] = addr
return fn
setattr(self, 'op_' + memop + 'm' + mode, op_fn)
self._generate_condition_codes(op_fn, 'op_' + memop + 'm%s' + mode)
self._generate_condition_codes(op_fn, 'op_' + memop + 'm' + mode + '%s')
def _generate_condition_codes(self, fn, name):
setattr(self, name % 'eq', lambda i: lambda fn=fn(i): (self.cpsrZ ) and fn())
setattr(self, name % 'ne', lambda i: lambda fn=fn(i): (not self.cpsrZ ) and fn())
setattr(self, name % 'cs', lambda i: lambda fn=fn(i): (self.cpsrC ) and fn())
setattr(self, name % 'hs', lambda i: lambda fn=fn(i): (self.cpsrC ) and fn())
setattr(self, name % 'cc', lambda i: lambda fn=fn(i): (not self.cpsrC ) and fn())
setattr(self, name % 'lo', lambda i: lambda fn=fn(i): (not self.cpsrC ) and fn())
setattr(self, name % 'mi', lambda i: lambda fn=fn(i): (self.cpsrN ) and fn())
setattr(self, name % 'pl', lambda i: lambda fn=fn(i): (not self.cpsrN ) and fn())
setattr(self, name % 'vs', lambda i: lambda fn=fn(i): (self.cpsrV ) and fn())
setattr(self, name % 'vc', lambda i: lambda fn=fn(i): (not self.cpsrV ) and fn())
setattr(self, name % 'hi', lambda i: lambda fn=fn(i): (self.cpsrC and not self.cpsrZ ) and fn())
setattr(self, name % 'ls', lambda i: lambda fn=fn(i): (self.cpsrZ or not self.cpsrC ) and fn())
setattr(self, name % 'ge', lambda i: lambda fn=fn(i): (((not self.cpsrN) == (not self.cpsrV)) ) and fn())
setattr(self, name % 'lt', lambda i: lambda fn=fn(i): (((not self.cpsrN) != (not self.cpsrV)) ) and fn())
setattr(self, name % 'gt', lambda i: lambda fn=fn(i): (((not self.cpsrN) == (not self.cpsrV)) and not self.cpsrZ ) and fn())
setattr(self, name % 'le', lambda i: lambda fn=fn(i): (((not self.cpsrN) != (not self.cpsrV)) or | |
unescape:
string = unescape_name_str(raw_string)
if is_container_id(string):
return ([string] if multi else string)
if string in cached_project_names:
return ([cached_project_names[string]] if multi else cached_project_names[string])
try:
results = list(dxpy.find_projects(name=string, describe=True, level='VIEW'))
except Exception as details:
raise ResolutionError(str(details))
if len(results) == 1:
cached_project_names[string] = results[0]['id']
return ([results[0]['id']] if multi else results[0]['id'])
elif len(results) == 0:
if is_error:
raise ResolutionError('Could not find a project named "' + string + '"')
return ([] if multi else None)
elif not multi:
if INTERACTIVE_CLI:
print('Found multiple projects with name "' + string + '"')
choice = pick(['{id} ({level})'.format(id=result['id'], level=result['level'])
for result in results])
return results[choice]['id']
else:
raise ResolutionError('Found multiple projects with name "' + string + '"; please use a project ID to specify the desired project')
else:
# len(results) > 1 and multi
return [result['id'] for result in results]
def resolve_path(path, expected=None, expected_classes=None, multi_projects=False, allow_empty_string=True):
'''
:param path: A path to a data object to attempt to resolve
:type path: string
:param expected: one of the following: "folder", "entity", or None to indicate whether the expected path is a folder, a data object, or either
:type expected: string or None
:param expected_classes: a list of DNAnexus data object classes (if any) by which the search can be filtered
:type expected_classes: list of strings or None
:returns: A tuple of 3 values: container_ID, folderpath, entity_name
:rtype: string, string, string
:raises: exc:`ResolutionError` if 1) a colon is provided but no project can be resolved, or 2) *expected* was set to "folder" but no project can be resolved from which to establish context
:param allow_empty_string: If false, a ResolutionError will be raised if *path* is an empty string. Use this when resolving the empty string could result in unexpected behavior.
:type allow_empty_string: boolean
Attempts to resolve *path* to a project or container ID, a folder
path, and a data object or folder name. This method will NOT
raise an exception if the specified folder or object does not
exist. This method is primarily for parsing purposes.
'''
if '_DX_FUSE' in os.environ:
from xattr import xattr
path = xattr(path)['project'] + ":" + xattr(path)['id']
if path == '' and not allow_empty_string:
raise ResolutionError('Cannot parse ""; expected the path to be a non-empty string')
try:
possible_hash = json.loads(path)
if isinstance(possible_hash, dict) and '$dnanexus_link' in possible_hash:
if isinstance(possible_hash['$dnanexus_link'], basestring):
path = possible_hash['$dnanexus_link']
elif isinstance(possible_hash['$dnanexus_link'], dict) and isinstance(possible_hash['$dnanexus_link'].get('project', None), basestring) and isinstance(possible_hash['$dnanexus_link'].get('id', None), basestring):
path = possible_hash['$dnanexus_link']['project'] + ':' + possible_hash['$dnanexus_link']['id']
except:
pass
# Easy case: ":"
if path == ':':
if dxpy.WORKSPACE_ID is None:
raise ResolutionError('Cannot parse ":"; expected a project name or ID to the left of a colon or for a current project to be set')
return ([dxpy.WORKSPACE_ID] if multi_projects else dxpy.WORKSPACE_ID), '/', None
# Second easy case: empty string
if path == '':
if dxpy.WORKSPACE_ID is None:
raise ResolutionError('Expected a project name or ID to the left of a colon or for a current project to be set')
return ([dxpy.WORKSPACE_ID] if multi_projects else dxpy.WORKSPACE_ID), os.environ.get('DX_CLI_WD', '/'), None
# Third easy case: hash ID
if is_container_id(path):
return ([path] if multi_projects else path), '/', None
elif is_hashid(path):
return ([dxpy.WORKSPACE_ID] if multi_projects else dxpy.WORKSPACE_ID), None, path
# using a numerical sentinel value to indicate that it hasn't been
# set in case dxpy.WORKSPACE_ID is actually None
project = 0
folderpath = None
entity_name = None
wd = None
# Test for multiple colons
last_colon = get_last_pos_of_char(':', path)
if last_colon >= 0:
last_last_colon = get_last_pos_of_char(':', path[:last_colon])
if last_last_colon >= 0:
raise ResolutionError('Cannot parse "' + path + '" as a path; at most one unescaped colon can be present')
substrings = split_unescaped(':', path)
if len(substrings) == 2:
# One of the following:
# 1) job-id:fieldname
# 2) project-name-or-id:folderpath/to/possible/entity
if is_job_id(substrings[0]):
return ([substrings[0]] if multi_projects else substrings[0]), None, substrings[1]
if multi_projects:
project_ids = resolve_container_id_or_name(substrings[0], is_error=True, multi=True)
else:
project = resolve_container_id_or_name(substrings[0], is_error=True)
wd = '/'
elif get_last_pos_of_char(':', path) >= 0:
# :folderpath/to/possible/entity OR project-name-or-id:
# Colon is either at the beginning or at the end
wd = '/'
if path.startswith(':'):
if dxpy.WORKSPACE_ID is None:
raise ResolutionError('Cannot parse "' + path + '" as a path; expected a project name or ID to the left of a colon or for a current project to be set')
project = dxpy.WORKSPACE_ID
else:
# One nonempty string to the left of a colon
project = resolve_container_id_or_name(substrings[0], is_error=True)
folderpath = '/'
else:
# One nonempty string, no colon present, do NOT interpret as
# project
project = dxpy.WORKSPACE_ID
if expected == 'folder' and project is None:
raise ResolutionError('a project context was expected for a path, but a current project is not set, nor was one provided in the path (preceding a colon) in "' + path + '"')
wd = get_env_var('DX_CLI_WD', u'/')
# Determine folderpath and entity_name if necessary
if folderpath is None:
folderpath = substrings[-1]
folderpath, entity_name = clean_folder_path(('' if folderpath.startswith('/') else wd + '/') + folderpath, expected)
if multi_projects:
return (project_ids if project == 0 else [project]), folderpath, entity_name
else:
return project, folderpath, entity_name
def resolve_job_ref(job_id, name, describe={}):
try:
job_desc = dxpy.api.job_describe(job_id)
except Exception as details:
raise ResolutionError(str(details))
project = job_desc['project']
describe['project'] = project
if job_desc['state'] != 'done':
raise ResolutionError('the job ' + job_id + ' is ' + job_desc['state'] + ', and it must be in the done state for its outputs to be accessed')
index = None
if '.' in name:
try:
actual_name, str_index = name.rsplit('.', 1)
index = int(str_index)
name = actual_name
except ValueError:
pass
output_field = job_desc['output'].get(name, None)
if index is not None:
if not isinstance(output_field, list):
raise ResolutionError('Found "' + name + '" as an output field name of ' + job_id +
', but it is not an array and cannot be indexed')
if index < 0 or index >= len(output_field):
raise ResolutionError('Found "' + name + '" as an output field name of ' + job_id +
', but the specified index ' + str_index + ' is out of range')
output_field = output_field[index]
results = []
if output_field is not None:
if isinstance(output_field, list):
if len(output_field) > 0:
if not isinstance(output_field[0], dict) or '$dnanexus_link' not in output_field[0]:
raise ResolutionError('Found "' + name + '" as an output field name of ' + job_id + ', but it is an array of non-data objects')
ids = [link['$dnanexus_link'] for link in output_field]
try:
results = [{"id": out_id,
"describe": dxpy.DXHTTPRequest('/' + out_id + '/describe', describe)} for out_id in ids]
except Exception as details:
raise ResolutionError(str(details))
else:
raise ResolutionError('Found "' + name + '" as an output field name of ' + job_id + ', but it is an empty array')
elif isinstance(output_field, dict) and '$dnanexus_link' in output_field:
obj_id = output_field['$dnanexus_link']
try:
results = [{"id": obj_id, "describe": dxpy.DXHTTPRequest('/' + obj_id + '/describe', describe)}]
except Exception as details:
raise ResolutionError(str(details))
else:
raise ResolutionError('Found "' + name + '" as an output field name of ' + job_id + ', but it is not of a data object class')
else:
raise ResolutionError('Could not find "' + name + '" as an output field name of ' + job_id + '; available fields are: ' + ', '.join(job_desc['output'].keys()))
return results
def resolve_existing_path(path, expected=None, ask_to_resolve=True, expected_classes=None, allow_mult=False, describe={}, all_mult=False, allow_empty_string=True,
visibility="either"):
'''
:param ask_to_resolve: Whether picking may be necessary (if true, a list is returned; if false, only one result is returned)
:type ask_to_resolve: boolean
:param allow_mult: Whether to allow the user to select multiple results from the same path
:type allow_mult: boolean
:param describe: Input hash to describe call for the results
:type describe: dict
:param all_mult: Whether to return all matching results without prompting (only applicable if allow_mult == True)
:type all_mult: boolean
:returns: A LIST of results when ask_to_resolve is False or allow_mult is True
:raises: :exc:`ResolutionError` if the request path was invalid, or a single result was requested and input is not a TTY
:param allow_empty_string: If false, a | |
import tensorflow as tf
import numpy as np
import os
from voicepuppet.builder import ModelBuilder
from config.configure import YParams
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
class PixFlowNet(ModelBuilder):
def __init__(self, config_path):
if (not os.path.exists(config_path)):
logger.error('config_path not exists.')
exit(0)
self.__params = PixFlowNet.default_hparams(config_path)
@staticmethod
def default_hparams(config_path, name='default'):
params = YParams(config_path, name)
params.add_hparam('separable_conv', False)
params.add_hparam('ngf', 64)
params.add_hparam('ndf', 48)
params.add_hparam('l1_weight', 500.0)
params.add_hparam('gan_weight', 1.0)
params.training['learning_rate'] = 0.0003
params.training['beta1'] = 0.5
params.training['decay_rate'] = 0.999
return params
@property
def params(self):
return self.__params
def set_params(self, params):
self.learning_rate = params.training['learning_rate']
self.beta1 = params.training['beta1']
self.decay_rate = params.training['decay_rate']
self.decay_steps = params.training['decay_steps']
self.batch_size = params.batch_size
self.separable_conv = params.separable_conv
self.ngf = params.ngf
self.ndf = params.ndf
self.l1_weight = params.l1_weight
self.gan_weight = params.gan_weight
def build_network(self, inputs, fg_inputs, trainable=True):
def discrim_conv(batch_input, out_channels, stride):
padded_input = tf.pad(batch_input, [[0, 0], [1, 1], [1, 1], [0, 0]], mode="CONSTANT")
return tf.layers.conv2d(padded_input, out_channels, kernel_size=4, strides=(stride, stride), padding="valid",
kernel_initializer=tf.random_normal_initializer(0, 0.02))
def gen_conv(batch_input, out_channels, kernel_size=4):
# [batch, in_height, in_width, in_channels] => [batch, out_height, out_width, out_channels]
initializer = tf.random_normal_initializer(0, 0.02)
if self.separable_conv:
return tf.layers.separable_conv2d(batch_input, out_channels, kernel_size=kernel_size, strides=(2, 2), padding="same",
depthwise_initializer=initializer, pointwise_initializer=initializer)
else:
return tf.layers.conv2d(batch_input, out_channels, kernel_size=kernel_size, strides=(2, 2), padding="same",
kernel_initializer=initializer)
def gen_deconv(batch_input, out_channels, kernel_size=4):
# [batch, in_height, in_width, in_channels] => [batch, out_height, out_width, out_channels]
initializer = tf.random_normal_initializer(0, 0.02)
if self.separable_conv:
_b, h, w, _c = batch_input.shape
resized_input = tf.image.resize_images(batch_input, [h * 2, w * 2], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return tf.layers.separable_conv2d(resized_input, out_channels, kernel_size=kernel_size, strides=(1, 1), padding="same",
depthwise_initializer=initializer, pointwise_initializer=initializer)
else:
return tf.layers.conv2d_transpose(batch_input, out_channels, kernel_size=kernel_size, strides=(2, 2), padding="same",
kernel_initializer=initializer)
def lrelu(x, a):
with tf.name_scope("lrelu"):
# adding these together creates the leak part and linear part
# then cancels them out by subtracting/adding an absolute value term
# leak: a*x/2 - a*abs(x)/2
# linear: x/2 + abs(x)/2
# this block looks like it has 2 inputs on the graph unless we do this
x = tf.identity(x)
return (0.5 * (1 + a)) * x + (0.5 * (1 - a)) * tf.abs(x)
def batchnorm(inputs):
return tf.layers.batch_normalization(inputs, axis=3, epsilon=1e-5, momentum=0.1, training=True,
gamma_initializer=tf.random_normal_initializer(1.0, 0.02))
def resnet(batch_input, out_channels):
initializer = tf.random_normal_initializer(0, 0.02)
convolved = tf.layers.conv2d(batch_input, out_channels, kernel_size=3, strides=(1, 1), padding="same",
kernel_initializer=initializer)
normalized = batchnorm(convolved)
rectified = lrelu(normalized, 0.2)
if(trainable):
rectified = tf.nn.dropout(rectified, keep_prob=0.5)
convolved = tf.layers.conv2d(rectified, out_channels, kernel_size=3, strides=(1, 1), padding="same",
kernel_initializer=initializer)
normalized = batchnorm(convolved)
return batch_input + normalized
def encoder_net(batch_input):
layers = []
# encoder_1: [batch, 256, 256, in_channels] => [batch, 128, 128, ngf]
with tf.variable_scope("encoder_conv"):
output = gen_conv(batch_input, self.ngf, kernel_size=7)
layers.append(output)
layer_specs = [
self.ngf * 2, # encoder_2: [batch, 128, 128, ngf] => [batch, 64, 64, ngf * 2]
self.ngf * 4, # encoder_3: [batch, 64, 64, ngf * 2] => [batch, 32, 32, ngf * 4]
self.ngf * 8, # encoder_4: [batch, 32, 32, ngf * 4] => [batch, 16, 16, ngf * 8]
]
for out_channels in layer_specs:
with tf.variable_scope("encoder_%d" % (len(layers))):
rectified = lrelu(layers[-1], 0.2)
# [batch, in_height, in_width, in_channels] => [batch, in_height/2, in_width/2, out_channels]
convolved = gen_conv(rectified, out_channels)
output = batchnorm(convolved)
layers.append(output)
return layers[-1]
def diffnet(batch_input):
layers = []
with tf.variable_scope("diff_conv"):
output = gen_conv(batch_input, self.ngf, kernel_size=7)
layers.append(output)
layer_specs = [
self.ngf * 2, # encoder_2: [batch, 128, 128, ngf] => [batch, 64, 64, ngf * 2]
self.ngf * 4, # encoder_3: [batch, 64, 64, ngf * 2] => [batch, 32, 32, ngf * 4]
self.ngf * 8, # encoder_4: [batch, 32, 32, ngf * 4] => [batch, 16, 16, ngf * 8]
]
for out_channels in layer_specs:
with tf.variable_scope("diff_%d" % (len(layers))):
rectified = lrelu(layers[-1], 0.2)
# [batch, in_height, in_width, in_channels] => [batch, in_height/2, in_width/2, out_channels]
convolved = gen_conv(rectified, out_channels)
output = batchnorm(convolved)
layers.append(output)
return layers[-1]
def decoder_net(batch_input, generator_outputs_channels):
layers = [batch_input]
layer_specs = [
self.ngf * 8, # encoder_6: [batch, 8, 8, ngf * 8] => [batch, 4, 4, ngf * 8]
self.ngf * 8, # encoder_7: [batch, 4, 4, ngf * 8] => [batch, 2, 2, ngf * 8]
]
for out_channels in layer_specs:
with tf.variable_scope("post_resnet_%d" % (len(layers))):
output = resnet(layers[-1], out_channels)
layers.append(output)
layer_specs = [
(self.ngf * 8), # decoder_7: [batch, 2, 2, ngf * 8 * 2] => [batch, 4, 4, ngf * 8 * 2]
(self.ngf * 4), # decoder_7: [batch, 2, 2, ngf * 8 * 2] => [batch, 4, 4, ngf * 8 * 2]
(self.ngf * 2), # decoder_6: [batch, 4, 4, ngf * 8 * 2] => [batch, 8, 8, ngf * 8 * 2]
]
for decoder_layer, out_channels in enumerate(layer_specs):
with tf.variable_scope("decoder_%d" % (decoder_layer)):
rectified = tf.nn.relu(layers[-1])
# [batch, in_height, in_width, in_channels] => [batch, in_height*2, in_width*2, out_channels]
output = gen_deconv(rectified, out_channels)
output = batchnorm(output)
layers.append(output)
# decoder_1: [batch, 128, 128, ngf * 2] => [batch, 256, 256, generator_outputs_channels]
with tf.variable_scope("final"):
rectified = tf.nn.relu(layers[-1])
output = gen_deconv(rectified, generator_outputs_channels, kernel_size=7)
output = tf.tanh(output)
return output
def create_discriminator(discrim_inputs, discrim_targets):
n_layers = 3
layers = []
# 2x [batch, height, width, in_channels] => [batch, height, width, in_channels * 2]
input = tf.concat([discrim_inputs, discrim_targets], axis=3)
# layer_1: [batch, 256, 256, in_channels * 2] => [batch, 128, 128, ndf]
with tf.variable_scope("layer_1"):
convolved = discrim_conv(input, self.ndf, stride=2)
rectified = lrelu(convolved, 0.2)
layers.append(rectified)
# layer_2: [batch, 128, 128, ndf] => [batch, 64, 64, ndf * 2]
# layer_3: [batch, 64, 64, ndf * 2] => [batch, 32, 32, ndf * 4]
# layer_4: [batch, 32, 32, ndf * 4] => [batch, 31, 31, ndf * 8]
for i in range(n_layers):
with tf.variable_scope("layer_%d" % (len(layers) + 1)):
out_channels = self.ndf * min(2 ** (i + 1), 8)
stride = 1 if i == n_layers - 1 else 2 # last layer here has stride 1
convolved = discrim_conv(layers[-1], out_channels, stride=stride)
normalized = batchnorm(convolved)
rectified = lrelu(normalized, 0.2)
layers.append(rectified)
# layer_5: [batch, 31, 31, ndf * 8] => [batch, 30, 30, 1]
with tf.variable_scope("layer_%d" % (len(layers) + 1)):
convolved = discrim_conv(rectified, out_channels=1, stride=1)
output = tf.sigmoid(convolved)
layers.append(output)
return layers[-1]
def create_generator(generator_inputs, generator_fg_inputs, generator_outputs_channels):
with tf.variable_scope("encoder_net"):
encode_feat = encoder_net(generator_fg_inputs[..., :3])
with tf.variable_scope("diffnet"):
feat0 = diffnet(generator_inputs[..., :3])
with tf.variable_scope("diffnet", reuse=True):
feat1 = diffnet(generator_inputs[..., 3:])
diff_feat = feat1 - feat0
pre_resnet_layers = [encode_feat]
layer_specs = [
self.ngf * 8, # encoder_2: [batch, 128, 128, ngf] => [batch, 64, 64, ngf * 2]
self.ngf * 8, # encoder_3: [batch, 64, 64, ngf * 2] => [batch, 32, 32, ngf * 4]
]
for out_channels in layer_specs:
with tf.variable_scope("pre_resnet_%d" % (len(pre_resnet_layers))):
output = resnet(pre_resnet_layers[-1], out_channels)
pre_resnet_layers.append(output)
diff_resnet_layers = [diff_feat]
layer_specs = [
self.ngf * 8, # encoder_2: [batch, 128, 128, ngf] => [batch, 64, 64, ngf * 2]
self.ngf * 8, # encoder_3: [batch, 64, 64, ngf * 2] => [batch, 32, 32, ngf * 4]
]
for out_channels in layer_specs:
with tf.variable_scope("diff_resnet_%d" % (len(diff_resnet_layers))):
output = resnet(diff_resnet_layers[-1], out_channels)
diff_resnet_layers.append(output)
with tf.variable_scope("decoder_net"):
added_layers = pre_resnet_layers[-1] + diff_resnet_layers[-1]
output = decoder_net(added_layers, generator_outputs_channels)
return output
nodes = {}
with tf.variable_scope("generator"):
output = create_generator(inputs, fg_inputs, generator_outputs_channels=4)
rgb = output[..., :3]
alpha = (output[..., 3:]+1)/2
alpha = tf.tile(alpha, [1,1,1,3])
# output = rgb * alpha + targets[..., 3:] * (1 - alpha)
output = rgb * alpha + alpha - 1
nodes.update({'Outputs': output})
nodes.update({'Alphas': alpha})
if(trainable):
# create two copies of discriminator, one for real pairs and one for fake pairs
# they share the same underlying variables
with tf.name_scope("real_discriminator"):
with tf.variable_scope("discriminator"):
# inputs = tf.reshape(inputs, [self.batch_size, inputs.shape[1], inputs.shape[2], 2, 3])
# inputs = tf.transpose(inputs, [3, 0, 1, 2, 4])
# inputs = tf.reshape(inputs, [-1, inputs.shape[2], inputs.shape[3], 3])
# foregrounds = tf.reshape(fg_inputs, [self.batch_size, fg_inputs.shape[1], fg_inputs.shape[2], 2, 3])
# foregrounds = tf.transpose(foregrounds, [3, 0, 1, 2, 4])
# foregrounds = tf.reshape(foregrounds, [-1, foregrounds.shape[2], foregrounds.shape[3], 3])
predict_real = create_discriminator(inputs[..., 3:], fg_inputs[..., 3:])
nodes.update({'Predict_real': predict_real})
with tf.name_scope("fake_discriminator"):
with tf.variable_scope("discriminator", reuse=True):
# predict_fake = create_discriminator(inputs, tf.concat([fg_inputs[..., 3:], output], axis=0))
predict_fake = create_discriminator(inputs[..., 3:], output)
nodes.update({'Predict_fake': predict_fake})
return nodes
def add_cost_function(self, predict_real, predict_fake, fg_inputs, outputs, alphas, masks):
nodes = {}
with tf.name_scope("discriminator_loss"):
# minimizing -tf.log will try to get inputs to 1
# predict_real => 1
# predict_fake => 0
discrim_loss = tf.reduce_mean(-(tf.log(predict_real + 1e-12) + tf.log(1 - predict_fake + 1e-12)))
nodes.update({'Discrim_loss': discrim_loss})
with tf.name_scope("generator_loss"):
# predict_fake => 1
# abs(targets - | |
"""
Classes to simplify and standardize the process of drawing samples from the posterior distribution in
Bayesian inference problems.
"""
import numpy as np
import scipy as sp
class Quad_Sampler(object):
"""
Class for drawing samples from an arbitrary one-dimensional probability distribution using numerical integration
and interpolation. In general this will be superior to more sophisticated sampling methods for 1D problems.
Assumes that priors are uniform.
Args:
ln_likelihood: Function which takes the independent variable x as its first argument and returns the log
of the likelihood function, p(d|x,I), up to a constant. May take other *args or **kwargs.
priors: List-type of the form [a,b], where a and b define the upper and lower bounds of the uniform
prior p(x|I).
optioinal:
vect: (bool) Set to true if the log-likelihood accepts a vectorized input.
"""
def __init__(self, ln_likelihood, priors, vect=False):
self._ln_likelihood = ln_likelihood
self._a, self._b = priors
self._vect = vect
# Default values
self.ln_Z = np.nan
self.mean = np.nan
self.std = np.nan
def fit(self, n_pts=200, args=(), **kwargs):
"""
Perform the fit.
Optional:
n_pts: (int) Number of evenly-spaced points over which to compute the probability.
args: (tuple) All additional arguments to be passed on the the likelihood function.
**kwargs: All other keywords are passed on the the likelihood function.
"""
# Evaluate the pdf
self.xs = np.linspace(self._a, self._b, num=n_pts)
if self._vect:
self.ln_pdf = self._ln_likelihood(self.xs, *args, **kwargs)
else:
self.ln_pdf = np.array([self._ln_likelihood(x, *args, **kwargs) for x in self.xs])
# Rescale with the maxima
ln_C = np.amax(self.ln_pdf)
pdf_scaled = np.exp(self.ln_pdf - ln_C)
# Compute the evidence and rescale
Z_scaled = np.trapz(pdf_scaled, x=self.xs)
self.ln_Z = np.log(Z_scaled) + ln_C
self.pdf = pdf_scaled / Z_scaled
self.cdf = sp.integrate.cumtrapz(self.pdf, x=self.xs, initial=0)
# Estimate summary statistics - assuming a normal distribution
samples = self.get_samples(1000)
self.mean = np.mean(samples)
self.std = np.std(samples)
def get_samples(self, n_samples):
"""
"""
u_samp = np.random.rand(n_samples)
return np.interp(u_samp, self.cdf, self.xs)
class Quad_Sampler_ND(object):
"""
Class for drawing samples from an arbitrary N-dimensional probability distribution using numerical integration
and interpolation. This can be useful for problems with a low number of dimensions (~3) for which the likelihood
function can be computed quickly (<< 1 second).
Assumes that priors are uniform. Currently does not support vectorized likelihoods.
Args:
ln_likelihood: Function which takes the independent variables (x1, x2, ..., xN) as its first argument and returns
the log of the likelihood function, p(d|x1,...,I), up to a constant. May take other *args or **kwargs.
priors: List of tuples, of the form [(a1,b1), (a2,b2), ..., (aN,bN)] where a and b define the upper and lower bounds
of the uniform prior p(x1,...|I).
optioinal:
vect: (bool) Set to true if the log-likelihood accepts a vectorized input.
"""
def __init__(self, ln_likelihood, ndim, priors):
self._ln_likelihood = ln_likelihood
self.ndim = ndim
self._a = np.zeros(self.ndim)
self._b = np.zeros(self.ndim)
for n in range(self.ndim):
self._a[n], self._b[n] = priors[n]
# Default values
self.ln_Z = np.nan
self.mean = np.nan
self.std = np.nan
def fit(self, n_pts=200, args=(), **kwargs):
"""
Perform the fit.
Optional:
n_pts: (int) Number of evenly-spaced points over which to compute the probability.
args: (tuple) All additional arguments to be passed on the the likelihood function.
**kwargs: All other keywords are passed on the the likelihood function.
This doesn't work yet.
"""
# Construct the evaluation grid
self.xs = np.zeros([self.ndim, n_pts])
for n in range(slef.ndim):
self.xs[n,:] = np.linspace(self._a[n], self._b[n], num=n_pts)
# Evaluate the pdf
self.ln_pdf = np.zeros([self.ndim, n_pts])
for n in range(slef.ndim):
self.ln_pdf[n] = np.array([self._ln_likelihood(x, *args, **kwargs) for x in self.xs[n]])
# Rescale with the maxima
ln_C = np.amax(self.ln_pdf)
pdf_scaled = np.exp(self.ln_pdf - ln_C)
# Compute the evidence and rescale
Z_scaled = np.trapz(pdf_scaled, x=self.xs)
self.ln_Z = np.log(Z_scaled) + ln_C
self.pdf = pdf_scaled / Z_scaled
self.cdf = sp.integrate.cumtrapz(self.pdf, x=self.xs, initial=0)
# Estimate summary statistics - assuming a normal distribution
samples = self.get_samples(1000)
self.mean = np.mean(samples)
self.std = np.std(samples)
def get_samples(self, n_samples):
"""
"""
u_samp = np.random.rand(n_samples)
return np.interp(u_samp, self.cdf, self.xs)
# ------------------------------------------------ MESXR Sampler ------------------------------------------------
import time
import emcee
import mst_ida.models.mesxr3 as m3
import mst_ida.data.mesxr as mesxr
import mst_ida.analysis.ida as ida
import mst_ida.analysis.emissivity as em
import mst_ida.models.base.response as rsp
from mst_ida.utilities.functions import identify_outliers
from mst_ida.models.base.geometry import flux_coords, sunflower_points
default_priors = {
'alpha':((10, 14), (0.1,18), (0.1,18))
}
class MESXR_Emiss_Sampler(object):
"""
"""
def __init__(self, shot, frame, flux=None, Ec_ref=3.0, priors=None, indices=np.arange(5,55), Ew=300.,
method='alpha', nwalkers=32, center=True, delta_a=0.06, delta_h=0.01, manual=None):
# Load the data
self.shot = shot
if manual is not None:
self.mesxr_data = manual['data']
self.mesxr_sigmas = manual['sigmas']
self.signed_ps = manual['impact_p']
self.thresholds = manual['thresholds']
else:
self.frame = frame
self.mesxr_data, self.mesxr_sigmas, self.signed_ps, self.thresholds = mesxr.get_8c_data(self.shot, self.frame, center=center)
# Model and geometry
if flux is None:
self.flux = flux_coords(delta_a=delta_a, delta_h=delta_h)
else:
self.flux = flux
self.method = method
self.p3det = m3.MESXR(shot=self.shot, center=center)
self.gij_set = {}
self.ss_set = {}
for Ec in self.thresholds:
self.gij_set[Ec], self.ss_set[Ec] = em.get_geometry_matrix(self.flux, self.p3det)
# Include specified data points
self.indices = np.arange(6,55)
z = {Ec:np.maximum(self.mesxr_data[Ec][self.indices]+1, 1) for Ec in self.thresholds}
self.ln_data_fact = {Ec:-np.sum(sp.special.loggamma(z[Ec])) for Ec in self.thresholds}
# Set up the priors
if priors is None:
self.priors = default_priors[self.method]
else:
self.priors = priors
# Sampler parameters
self.nwalkers = nwalkers
self.pos0 = self.get_pos0()
self.ndim = self.pos0.shape[1]
# Set up the samplers
moves = [(emcee.moves.DEMove(), 0.8), (emcee.moves.DESnookerMove(), 0.2),]
self.samplers = {}
for index, Ec in enumerate(self.thresholds):
self.samplers[Ec] = emcee.EnsembleSampler(self.nwalkers, self.ndim, self.ln_prob, moves=moves, kwargs={'Ec':Ec})
# Set up ratio curves
self.Ew = Ew
self.Ec_ref = Ec_ref
self.temps = np.linspace(10, 3000, num=6000)
self.ratios= {}
for Ec in self.thresholds:
if Ec != self.Ec_ref:
self.ratios[Ec] = np.array([self.model_ratio(Te,Ec*1000.) for Te in self.temps])
def fit(self, nsteps=10000, remove_outliers=True, resume=False, burn_step=3000, n_samples=5000, progress=True):
"""
"""
# MCMC sampling
if nsteps is not None:
for Ec in self.thresholds:
if not resume:
#print('Beginning sampling for Ec = ' + str(Ec) + ' keV')
time.sleep(1)
self.samplers[Ec].run_mcmc(self.pos0, nsteps, progress=progress)
else:
#print('Resuming sampling for Ec = ' + str(Ec) + ' keV')
time.sleep(1)
self.samplers[Ec].run_mcmc(None, nsteps, progress=progress)
self.samples = {Ec:self.samplers[Ec].get_chain(discard=burn_step, flat=True) for Ec in self.thresholds}
# Remove points from poorly-converged walkers
if remove_outliers:
for Ec in self.thresholds:
self.samples[Ec] = identify_outliers(self.samples[Ec])
# Save the average fit parameters
self.theta_avg = {Ec:np.average(self.samples[Ec], axis=0) for Ec in self.thresholds}
# Get the emissivity profile samples
self.n_samples = n_samples
self.emiss_samples = {Ec:self.get_emiss_samples(self.samples[Ec], Ec=Ec, n_samples=n_samples) for Ec in self.thresholds}
self.emiss_CIs = {Ec:ida.profile_confidence(self.emiss_samples[Ec]) for Ec in self.thresholds}
def get_Te_samples(self, slim=0.7, include=[4.0, 5.0]):
"""
"""
ss = self.ss_set[self.Ec_ref].ravel()
sn = np.argmin(np.abs(ss - slim))
s_vals = self.ss_set[self.Ec_ref].ravel()[:sn]
Te_avg_prof_samples = np.zeros([self.n_samples, sn])
for s_index in range(sn):
ratios = {Ec:self.emiss_samples[Ec][:,s_index]/self.emiss_samples[self.Ec_ref][:,s_index] for Ec in include}
Te_samples = {Ec:self.Te_from_R(ratios[Ec], Ec=Ec) for Ec in include}
Te_avg_prof_samples[:,s_index] = sum([Te_samples[Ec] for Ec in include]) / len(include)
Te_avg_CI = ida.profile_confidence(Te_avg_prof_samples)
return s_vals, Te_avg_prof_samples, Te_avg_CI
# ----------------------------------------------- Emissivity Model -----------------------------------------------
def emiss_model_alpha(self, ss, Xem, alpha, beta):
return (10.**Xem)*(1 - ss**alpha)**beta
def emiss_model(self, *args):
if self.method == 'alpha':
return self.emiss_model_alpha(*args)
else:
raise KeyError('Please select a valid fitting method.')
def get_model(self, theta, Ec=3.0):
gij = self.gij_set[Ec]
ss = self.ss_set[Ec]
emiss = self.emiss_model(ss, *theta)
bright = np.dot(gij, emiss).squeeze()
return self.p3det.etendue[Ec]*bright
# ----------------------------------------------- Bayesian Methods -----------------------------------------------
def ln_prob(self, theta, Ec=3.0):
lp = self.ln_prior(theta)
if np.isfinite(lp):
return lp + self.ln_likelihood(theta, Ec=Ec)
else:
return -np.inf
def ln_likelihood(self, theta, Ec=3.0):
data = self.mesxr_data[Ec][self.indices]
model = self.get_model(theta, Ec=Ec)[self.indices]
return -np.sum(model - data*np.log(model)) + self.ln_data_fact[Ec]
def ln_prior(self, theta):
if self.method == 'alpha':
return self.ln_prior_alpha(*theta)
else:
raise KeyError('Method not recognized.')
def ln_prior_alpha(self, Xem, alpha, beta):
X_min, X_max = self.priors[0]
al_min, al_max = self.priors[1]
bt_min, bt_max = self.priors[2]
if (X_min < Xem < X_max) and (al_min < alpha < al_max) and (bt_min < beta < bt_max):
return 0.0
else:
return - np.inf
def get_pos0(self):
if self.method == 'alpha':
X_min, X_max = self.priors[0]
al_min, al_max = self.priors[1]
bt_min, bt_max = self.priors[2]
pos0 = np.zeros([self.nwalkers, 3])
pos0[:,0] = (X_max - X_min)*np.random.random(size=self.nwalkers) + X_min
pos0[:,1] = (al_max - al_min)*np.random.random(size=self.nwalkers) + al_min
pos0[:,2] = (bt_max - bt_min)*np.random.random(size=self.nwalkers) + bt_min
return pos0
# ----------------------------------------------- Ratio Model -----------------------------------------------
def Te_from_R(self, rs, Ec=4.0):
if Ec > self.Ec_ref:
return np.interp(rs, self.ratios[Ec], self.temps)
else:
# Reverse to avoid interpolation error
return np.interp(rs, np.flip(self.ratios[Ec]), np.flip(self.temps))
def get_en_int(self, Te, Ec):
"""
Model the | |
<gh_stars>0
"""
partitioner.py
"""
from copy import copy
import numpy as np
import psi4
from dataclasses import dataclass
from pydantic import validator, BaseModel
from dask.distributed import Client
from ..inverter import Inverter
from ..grid.grider import Grider
from ..fragment import Fragment
from .pdft_scf import pdft_scf
from .energy import energy
from .partition_energy import partition_energy
from .ep_kinetic import ep_kinetic
from opt_einsum import contract
# Partition Methods
from .partition_potential import partition_potential
from .vp_kinetic import vp_kinetic
from .vp_hxc import vp_hxc
# from .util import get_from_grid, basis_to_grid #eval_vh
class PartitionerOptions(BaseModel):
vp_type : str = 'component'
hxc_type : str = 'exact'
kinetic_type : str = 'inversion'
hxc_type : str = 'exact'
inv_method : str = 'wuyang'
opt_method : str = 'bfgs'
k_family : str = 'gga'
plotting_grid : str = 'fine'
ke_func_id : int = 5
ke_param : dict = {}
verbose : bool = True
interacting : bool = True
@dataclass
class bucket:
pass
@dataclass
class V:
pass
@dataclass
class Vnm:
pass
@dataclass
class Plotter:
pass
@dataclass
class E:
pass
def _scf(mol_string,
method='svwn',
basis='cc-pvdz',
potential=None,
restricted="UKS"):
frag_info = bucket()
mol = psi4.geometry(mol_string)
wfn_base = psi4.core.Wavefunction.build(mol, basis)
wfn = psi4.proc.scf_wavefunction_factory(method, wfn_base, restricted)
wfn.initialize()
if potential is not None:
potential = [psi4.core.Matrix.from_array(i) for i in potential]
wfn.iterations(vp_matrix=potential)
else:
wfn.iterations()
wfn.finalize_energy()
basis_set = wfn.basisset()
mints = psi4.core.MintsHelper(basis_set)
T = mints.ao_kinetic()
V = mints.ao_potential()
# if potential is not None:
# exc = generate_exc( mol_string, basis, wfn.Da().np )
#Paste results to pdf_fragment
energies = {"enuc" : wfn.get_energies('Nuclear'),
"e1" : wfn.get_energies('One-Electron'),
"e2" : wfn.get_energies('Two-Electron'),
"exc" : wfn.get_energies('XC'),
"total": wfn.get_energies('Total Energy')
}
print("Initial Energy:", energies["total"])
# if potential is not None:
# pass
# # energies["exc"] = exc
# # full_matrix = wfn.Da().np + wfn.Db().np
# # full_matrix = psi4.core.Matrix.from_array( full_matrix )
# # p = psi4.core.Matrix.from_array( (potential[0] + potential[1])/2.0 )
# # # print("How much am I removing from Core matrix", (p0.vector_dot(full_matrix) + p1.vector_dot( full_matrix ) ))
# # energies["e1"] -= (p.vector_dot(full_matrix) )
frag_info.geometry = mol.geometry().np
frag_info.natoms = mol.natom()
frag_info.nalpha = wfn.nalpha()
frag_info.nbeta = wfn.nbeta()
frag_info.mol_str = mol_string
frag_info.Da = wfn.Da().np
frag_info.Db = wfn.Db().np
frag_info.Ca = wfn.Ca().np
frag_info.Cb = wfn.Cb().np
frag_info.Va = wfn.Va().np
frag_info.Vb = wfn.Vb().np
frag_info.T = T.np
frag_info.V = V.np
frag_info.Ca_occ = wfn.Ca_subset("AO", "OCC").np
frag_info.Cb_occ = wfn.Cb_subset("AO", "OCC").np
frag_info.Ca_vir = wfn.Ca_subset("AO", "VIR").np
frag_info.Cb_vir = wfn.Cb_subset("AO", "VIR").np
frag_info.eig_a = wfn.epsilon_a().np
frag_info.eig_b = wfn.epsilon_b().np
frag_info.energies = energies
frag_info.energy = wfn.get_energies('Total Energy')
return frag_info
class Partitioner(Grider):
def __init__(self, basis, method_str, frags_str=[], mol_str=None, ref=1, plot_things=True, optPart={}):
# Validate options
optPart = {k.lower(): v for k, v in optPart.items()}
for i in optPart.keys():
if i not in PartitionerOptions().dict().keys():
raise ValueError(f"{i} is not a valid option for Partitioner")
optPart = PartitionerOptions(**optPart)
self.optPart = optPart
self.basis_str = basis
self.mol_str = mol_str
self.method_str = method_str
self.frags_str = frags_str
self.frags = None
self.ref = ref
self.nfrags = len( frags_str )
self.ens = False
# Data buckets
self.V = V()
self.Vnm = Vnm()
self.E = E()
self.Plotter = Plotter()
#Client for Paralellization on fragments
# self.client = Client()
# Full Molecule
self.mol = psi4.geometry(self.mol_str)
_, mol_wfn = psi4.energy(self.method_str+'/'+self.basis_str, molecule=self.mol, return_wfn=True)
self.basis = mol_wfn.basisset()
self.nbf = self.basis.nbf()
self.mints = psi4.core.MintsHelper( self.basis )
A = self.mints.ao_overlap()
A.power( -0.5, 1e-16 )
self.A = np.array(A)
self.Tnm0 = self.mints.ao_kinetic()
self.Vnm0 = self.mints.ao_potential()
self.I = np.array( self.mints.ao_eri() )
# self.basis = psi4.core.BasisSet.build( self.mol, key='BASIS', target=self.basis_str)
# Full molecule
self.molE = E()
self.molE.Evxc = mol_wfn.get_energies("XC")
self.molE.Evha = mol_wfn.get_energies("Two-Electron")
self.molE.Etot = mol_wfn.get_energies("Total Energy")
self.molE.Enuc = mol_wfn.get_energies("Nuclear")
self.molE.Ekin = np.sum( self.Tnm0 * ( np.array(mol_wfn.Da()) + np.array(mol_wfn.Db()) ) )
self.molE.Eext = np.sum( self.Vnm0 * ( np.array(mol_wfn.Da()) + np.array(mol_wfn.Db()) ) )
mole = self.molE
assert np.isclose( mole.Evxc + mole.Evha + mole.Enuc + mole.Ekin + mole.Eext, self.molE.Etot)
self.molSCF = V()
self.molSCF.da = np.array(mol_wfn.Da())
self.molSCF.db = np.array(mol_wfn.Db())
self.molSCF.ca = np.array( mol_wfn.Ca_subset("AO", "ALL"))
self.molSCF.cb = np.array( mol_wfn.Cb_subset("AO", "ALL"))
# Grider & Plotting
self.grid = Grider(self.mol_str, self.basis_str, self.ref, self.optPart.plotting_grid)
self.plot_things = plot_things
# Generate fragments
self.generate_fragments(self.optPart.plotting_grid)
self.calc_nuclear_potential()
# Inverter
if mol_str is not None:
self.inverter = Inverter(self.mol, self.basis, self.ref, self.frags, self.grid)
self.inverter.plot_things = self.plot_things
self.inverter.I = self.I
# Full Molecule
# self.basis = psi4.core.BasisSet.build( self.mol, "ORBITAL", self.basis_str, quiet=True )
# ----> Methods
def generate_fragments(self, plotting_grid):
"""
Generate instance of Fragment for each fragment string
"""
self.frags = []
for i in self.frags_str:
frag = Fragment(i, self.basis_str, self.method_str, self.ref, plotting_grid)
frag.scf()
frag.A = self.A
frag.I = self.I
self.frags.append( frag )
for i in self.frags:
if self.plot_things:
i.plot_things = True
else:
i.plot_things = False
def calc_protomolecule(self):
"""
Calculate the protomolecular density
"""
# Evaluate sum of fragment densities and weiging functions
if not self.ens:
ifrag = self.frags
else:
pass
#ifrag = self.frags self.efrags
# union de fragmentos y efragmetnos
# Initialize fractional densities
for i in ifrag:
i.da_frac = np.zeros_like( i.da )
if self.ref == 2:
i.db_frac = np.zeros_like( i.db )
else:
i.db_frac = i.da_frac.copy()
# Spinflip (?)
# Scale for ensemble
for i in ifrag:
i.da_frac += i.da * i.scale
if self.ref == 2:
i.db_frac += i.db * i.scale
else:
i.db_frac = i.da_frac.copy()
# Sum of fragment densities
self.dfa = np.zeros_like( self.frags[0].da )
if self.ref == 2:
self.dfb = np.zeros_like( self.frags[0].da )
else:
self.dfb = self.dfa.copy()
for i in ifrag:
self.dfa += i.da_frac #+ i.db_frac
if self.ref == 2:
self.dfb += i.db_frac #+ i.da_frac
else:
self.dfb = self.dfa.copy()
self.df = self.dfa + self.dfb
def calc_nuclear_potential(self):
"""
Calculate external nuclear potential
"""
vnuc = np.zeros((self.grid.npoints))
plot_vnuc = np.zeros((self.grid.plot_npoints))
vnuc_nm = np.zeros((self.nbf, self.nbf))
for ifrag in self.frags:
vnuc += ifrag.V.vnuc.copy()
plot_vnuc += ifrag.Plotter.vnuc.copy()
vnuc_nm += ifrag.Vnm.V
# Plotting Grid
self.Vnm.V = vnuc_nm.copy()
self.V.vnuc = vnuc.copy()
self.Plotter.vnuc = plot_vnuc.copy()
def calc_Q(self):
"""
Calculates Q functions according to PDFT
"""
np.seterr(divide='ignore', invalid='ignore')
# Fragment density on the grid
if self.ref == 1:
df = self.grid.density(grid=None, Da=self.dfa, vpot=self.grid.vpot)
df = 2 * df
#Plotter
df_plotter = self.grid.density(Da=self.dfa, grid=self.grid.plot_points)
df_plotter = 2 * df_plotter
else:
df = self.grid.density(grid=None, Da=self.dfa, Db=self.dfb, vpot=self.grid.vpot)
df = df[:, 0] + df[:, 1]
#Plotter
df_plotter = self.grid.density(Da=self.dfa, Db=self.dfb, grid=self.grid.plot_points)
df_plotter = df_plotter[:,0] + df_plotter[:,1]
self.Plotter.df = df_plotter
for ifrag in self.frags:
if self.ref == 1:
d = self.grid.density(grid=None, Da=ifrag.da, vpot=self.grid.vpot)
d = 2 * d
d_plotter = self.grid.density(Da=ifrag.da, grid=self.grid.plot_points)
d_plotter = 2 * d_plotter
else:
d = self.grid.density(grid=None, Da=ifrag.da, Db=ifrag.db, vpot=self.grid.vpot)
d = d[:,0] + d[:,1]
d_plotter = self.grid.density(Da=ifrag.da, Db=ifrag.db, grid=self.grid.plot_points)
d_plotter = d_plotter[:,0] + d_plotter[:,1]
ifrag.Plotter.d = d_plotter
ifrag.Q = (ifrag.scale * d / df)[None,:]
ifrag.Plotter.Q = (ifrag.scale * d_plotter / df_plotter)[None,:]
# ifrag.Q = 1
# ifrag.Plotter.Q = 1
# Need to verify that q functions are functional.
def diagonalize(self, matrix, ndocc):
A = self.A
Fp = A.dot(matrix).dot(A)
eigvecs, Cp = np.linalg.eigh(Fp)
C = A.dot(Cp)
Cocc = C[:, :ndocc]
D = contract('pi,qi->pq', Cocc, Cocc)
return C, Cocc, D, eigvecs
def scf(self, maxiter=1):
pdft_scf(self, maxiter)
# ----> Potential Methods
def partition_potential(self):
return partition_potential(self)
def vp_kinetic(self):
vp_kinetic(self)
def vp_hxc(self):
vp_hxc(self)
# ----> Energy Methods
def energy(self):
"""
Gathers energies from all fragments
"""
energy(self)
def energy(self):
"""
Calculates the partition energy of the system
"""
partition_energy(self)
def ep_kinetic(self):
"""
Calculates ep_kinetic per fragment
"""
ep_kinetic(self)
# -----------------------------> OLD PARTITION
def generate_mints_matrices(self):
mints = psi4.core.MintsHelper( self.basis )
self.S = mints.ao_overlap().np
A = mints.ao_overlap()
A.power(-0.5, 1.e-14)
self.A = A
self.S3 = np.squeeze(mints.ao_3coverlap(self.basis,self.basis,self.basis))
self.jk = None
def scf_mol(self):
method = self.method_str
psi4.set_options({"maxiter" : 100})
ret = self.client.map( _scf, [self.mol_str], [method], [self.basis_str] )
data = [i.result() for i in ret]
self.mol = data[0]
def scf_frags(self,
vext = None,
evaluate = False):
method = self.method_str
frags = self.frags_str
method_it = [method for i in range(self.nfrags)]
basis_it = [self.basis_str for i in range(self.nfrags)]
vext_it = [vext for i in range(self.nfrags)]
assert len( method_it ) == len( basis_it ) == len( frags )
if evaluate == False:
psi4.set_options({"maxiter" : 100})
else:
print("Just evaluating")
psi4.set_options({"maxiter" : 1})
#Check wether or not there is a vext to be added for scf cycle
if vext is None:
ret = self.client.map( _scf, frags, method_it, basis_it )
else:
ret = self.client.map( _scf, frags, method_it, basis_it, vext_it, )
frag_data = [ i.result() for i in ret ]
self.frags = frag_data
def build_auxbasis(self, aux_basis_str):
self.aux_basis = self.build_basis( aux_basis_str )
| |
('nusseed', ''),
# ('nust2', ''),
# ('nustd', ''),
# ('nzp', ''),
# ('offset', ''),
# ('pacoil', ''),
# ('pc', ''),
# ('pexsel', ''),
# ('ph_mod', ''),
("phc0", "deg"),
("phc1", "deg"), # ('phlist', ''),
# ('pknl', ''),
# ('plstep', ''),
# ('plstrt', ''),
# ('plw', ''),
# ('plwmax', ''),
# ('pparmod', ''),
# ('ppdiag', ''),
# ('ppiptyp', ''),
# ('ppmpnum', ''),
# ('ppresol', ''),
# ('pqphase', ''),
# ('pqscale', ''),
# ('pscal', ''),
# ('psign', ''),
# ('pynm', ''),
# ('pynmp', ''),
# ('recpre', ''),
# ('recprfx', ''),
# ('recsel', ''),
("reverse", ""), # ('s_dev', ''),
# ('selrec', ''),
("sf", "MHz"), # ('si', ''),
# ('sigf1', ''),
# ('sigf2', ''),
# ('sino', ''),
# ('siold', ''),
# ('solvold', ''),
# ('spectyp', ''),
# ('spincnt', ''),
# ('spnam', ''),
# ('sppex', ''),
# ('spw', ''),
# ('sreglst', ''),
# ('ssb', ''),
# ('stsi', ''),
# ('stsr', ''),
# ('subnam', ''),
("sw_p", ""), # ('swfinal', ''),
# ('symm', ''),
# ('tdeff', ''),
# ('tdoff', ''),
# ('te1', ''),
# ('te4', ''),
# ('te_pidx', ''),
# ('te_stab', ''),
# ('ti', ''),
# ('tilt', ''),
# ('tm1', ''),
# ('tm2', ''),
# ('toplev', ''),
# ('userp1', ''),
# ('userp2', ''),
# ('userp3', ''),
# ('userp4', ''),
# ('userp5', ''),
# ('wdw', ''),
# ('xdim', ''),
# ('ymax_p', ''),
# ('ymin_p', ''),
]
# ======================================================================================================================
# Digital filter functions
# ======================================================================================================================
# Extracted from nmrglue.fileio.bruker.py (BSD License)
# Table of points to frequency shift Bruker data to remove digital filter
# (Phase is 360 degrees * num_pts)
# This table is an 'un-rounded' version base on the table by
# <NAME> and <NAME>'s offline processing note, online at:
# http://www.boc.chem.uu.nl/static/local/prospectnd/dmx_digital_filters.html
# and the updated table with additional entries at:
# http://sbtools.uchc.edu/help/nmr/nmr_toolkit/bruker_dsp_table.asp
# The rounding in the above tables appear to be based on k / (2*DECIM)
# for example 2: 44.75 = 44 + 3/4
# 4: 66.625 = 66 + 5/8
# 8: 68.563 ~= 68 + 9/16 = 68.5625
# Using this the un-rounded table was created by checking possible unrounded
# fracions which would round to those in the original table.
bruker_dsp_table = {
10: {
2: 44.75,
3: 33.5,
4: 66.625,
6: 59.083333333333333,
8: 68.5625,
12: 60.375,
16: 69.53125,
24: 61.020833333333333,
32: 70.015625,
48: 61.34375,
64: 70.2578125,
96: 61.505208333333333,
128: 70.37890625,
192: 61.5859375,
256: 70.439453125,
384: 61.626302083333333,
512: 70.4697265625,
768: 61.646484375,
1024: 70.48486328125,
1536: 61.656575520833333,
2048: 70.492431640625,
},
11: {
2: 46.0,
3: 36.5,
4: 48.0,
6: 50.166666666666667,
8: 53.25,
12: 69.5,
16: 72.25,
24: 70.166666666666667,
32: 72.75,
48: 70.5,
64: 73.0,
96: 70.666666666666667,
128: 72.5,
192: 71.333333333333333,
256: 72.25,
384: 71.666666666666667,
512: 72.125,
768: 71.833333333333333,
1024: 72.0625,
1536: 71.916666666666667,
2048: 72.03125,
},
12: {
2: 46.0,
3: 36.5,
4: 48.0,
6: 50.166666666666667,
8: 53.25,
12: 69.5,
16: 71.625,
24: 70.166666666666667,
32: 72.125,
48: 70.5,
64: 72.375,
96: 70.666666666666667,
128: 72.5,
192: 71.333333333333333,
256: 72.25,
384: 71.666666666666667,
512: 72.125,
768: 71.833333333333333,
1024: 72.0625,
1536: 71.916666666666667,
2048: 72.03125,
},
13: {
2: 2.75,
3: 2.8333333333333333,
4: 2.875,
6: 2.9166666666666667,
8: 2.9375,
12: 2.9583333333333333,
16: 2.96875,
24: 2.9791666666666667,
32: 2.984375,
48: 2.9895833333333333,
64: 2.9921875,
96: 2.9947916666666667,
},
}
def _remove_digital_filter(dic, data):
"""
Remove the digital filter from Bruker data.
nmrglue modified Digital Filter Processing
"""
if "acqus" not in dic:
raise KeyError("dictionary does not contain acqus parameters")
if "DECIM" not in dic["acqus"]:
raise KeyError("dictionary does not contain DECIM parameter")
decim = dic["acqus"]["DECIM"]
if "DSPFVS" not in dic["acqus"]:
raise KeyError("dictionary does not contain DSPFVS parameter")
dspfvs = dic["acqus"]["DSPFVS"]
if "GRPDLY" not in dic["acqus"]:
grpdly = 0
else:
grpdly = dic["acqus"]["GRPDLY"]
if grpdly > 0: # use group delay value if provided (not 0 or -1)
phase = grpdly
# Determine the phase correction
else:
if dspfvs >= 14: # DSPFVS greater than 14 give no phase correction.
phase = 0.0
else:
if dspfvs < 11:
dspfvs = 11 # default for DQD # loop up the phase in the table
if dspfvs not in bruker_dsp_table:
raise KeyError("dspfvs not in lookup table")
if decim not in bruker_dsp_table[dspfvs]:
raise KeyError("decim not in lookup table")
phase = bruker_dsp_table[dspfvs][decim]
# fft
si = data.shape[-1]
pdata = np.fft.fftshift(np.fft.fft(data, si, axis=-1), -1) / float(si / 2)
pdata = (pdata.T - pdata.T[0]).T # TODO: this allow generally to
# TODO: remove Bruker smiles, not so sure actually
# Phasing
si = float(pdata.shape[-1])
ph = 2.0j * np.pi * phase * np.arange(si) / si
pdata = pdata * np.exp(ph)
# ifft
data = np.fft.ifft(np.fft.ifftshift(pdata, -1), si, axis=-1) * float(si / 2)
# remove last points * 2
rp = 2 * (phase // 2)
td = dic["acqus"]["TD"] // 2
td = int(td) - int(rp)
dic["acqus"]["TD"] = td * 2
data = data[..., :td]
# debug_('Bruker digital filter : removed %s points' % rp)
return data
# def _scale(meta, dim=-1, reverse=None):
# """
# private function: Compute scale for a given axis.
# """
#
# # import parameter to convert units
# sw = float(meta.sw_h[dim])
# sfo1 = float(meta.sfo1[dim])
# bf1 = float(meta.bf1[dim])
# sf = float(meta.sf[dim])
# si = max(float(meta.si[dim])-1, 1)
# td = float(meta.td[dim])
#
# sr = (sf - bf1) * 1.0e6
# o1 = (sfo1 - bf1) * 1.0e6
#
# # set the spectral parameters
# # (si, sw_h, bf1, -sr + o1)
# # size, sw, obs, car) (correspondance with nmrglue udic)
#
# # derived units (these are in ppm)
#
# fact = 2.0 if meta.fnmode[dim] in [3, 4, 5, 6] or else 1.0
# if meta.isfreq[dim]:
# delta = -sw * fact / (si * bf1)
# first = (-sr + o1)/ bf1 - delta * si / 2.
#
# if reverse is None:
# reverse = meta.reverse
#
# if reverse:
# return scal()[::-1]
# else:
# return scal()
# ======================================================================================================================
# Bruker topspin import function
# ======================================================================================================================
def read_topspin(*paths, **kwargs):
"""
Open Bruker TOPSPIN (NMR) dataset.
Parameters
----------
*paths : str, optional
Paths of the Bruker directories to read.
**kwargs : dict
See other parameters.
Returns
--------
read_topspin
|NDDataset| or list of |NDDataset|.
Other Parameters
----------------
expno : int, optional
experiment number.
procno : int
processing number.
protocol : {'scp', 'omnic', 'opus', 'topspin', 'matlab', 'jcamp', 'csv', 'excel'}, optional
Protocol used for reading. If not provided, the correct protocol
is inferred (whnever it is possible) from the file name extension.
directory : str, optional
From where to read the specified `filename`. If not specified, read in the default ``datadir`` specified in
SpectroChemPy Preferences.
merge : bool, optional
Default value is False. If True, and several filenames have been provided as arguments,
then a single dataset with merged (stacked along the first
dimension) is returned (default=False).
sortbydate : bool, optional
Sort multiple spectra by acquisition date (default=True).
description : str, optional
A Custom description.
origin : {'omnic', 'tga'}, optional
In order to properly interpret CSV file it can be necessary to set the origin of the spectra.
Up to now only 'omnic' and 'tga' have been implemented.
csv_delimiter : str, optional
Set the column delimiter in CSV file.
By default it is the one set in SpectroChemPy ``Preferences``.
content : bytes object, optional
Instead of passing a filename for further reading, a bytes content can be directly provided as bytes objects.
The most convenient way is to use a dictionary. This feature is particularly useful for a GUI Dash application
to handle drag and drop of files into a Browser.
For exemples on how to use this feature, one can look in the ``tests/tests_readers`` directory.
listdir : bool, optional
If True and filename is None, all files present in the provided `directory` are returned (and merged if `merge`
is True. It is assumed that all the files correspond to current reading protocol (default=True)
recursive : bool, optional
Read also in subfolders. (default=False)
See Also
--------
read_topspin : Read TopSpin Bruker NMR spectra.
read_omnic : Read Omnic spectra.
read_opus : Read OPUS spectra.
read_labspec : Read Raman LABSPEC spectra.
read_spg : Read Omnic *.spg grouped spectra.
read_spa : Read Omnic *.Spa single spectra.
read_srs : Read Omnic series.
read_csv : Read CSV files.
read_zip : Read Zip files.
read_matlab : Read Matlab files.
| |
# coding: utf-8
"""Utilities for installing Javascript extensions for JupyterLab"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import os
import shutil
import sys
import tarfile
from os.path import join as pjoin, normpath
from jupyter_core.paths import (
jupyter_data_dir, jupyter_config_dir, jupyter_config_path,
SYSTEM_JUPYTER_PATH, ENV_JUPYTER_PATH, ENV_CONFIG_PATH, SYSTEM_CONFIG_PATH
)
from ipython_genutils.path import ensure_dir_exists
from ipython_genutils.py3compat import string_types, cast_unicode_py2
from . import __version__
from traitlets.config.manager import BaseJSONConfigManager
from traitlets.utils.importstring import import_item
from tornado.log import LogFormatter
from . import (
get_labextension_manifest_data_by_folder,
)
# Constants for pretty print extension listing function.
# Window doesn't support coloring in the commandline
GREEN_ENABLED = '\033[32m enabled \033[0m' if os.name != 'nt' else 'enabled '
RED_DISABLED = '\033[31mdisabled\033[0m' if os.name != 'nt' else 'disabled'
GREEN_OK = '\033[32mOK\033[0m' if os.name != 'nt' else 'ok'
RED_X = '\033[31m X\033[0m' if os.name != 'nt' else ' X'
#------------------------------------------------------------------------------
# Public API
#------------------------------------------------------------------------------
class ArgumentConflict(ValueError):
pass
def check_labextension(files, user=False, prefix=None, labextensions_dir=None, sys_prefix=False):
"""Check whether labextension files have been installed
Returns True if all files are found, False if any are missing.
Parameters
----------
files : list(paths)
a list of relative paths within labextensions.
user : bool [default: False]
Whether to check the user's .jupyter/labextensions directory.
Otherwise check a system-wide install (e.g. /usr/local/share/jupyter/labextensions).
prefix : str [optional]
Specify install prefix, if it should differ from default (e.g. /usr/local).
Will check prefix/share/jupyter/labextensions
labextensions_dir : str [optional]
Specify absolute path of labextensions directory explicitly.
sys_prefix : bool [default: False]
Install into the sys.prefix, i.e. environment
"""
labext = _get_labextension_dir(user=user, sys_prefix=sys_prefix, prefix=prefix, labextensions_dir=labextensions_dir)
# make sure labextensions dir exists
if not os.path.exists(labext):
return False
if isinstance(files, string_types):
# one file given, turn it into a list
files = [files]
return all(os.path.exists(pjoin(labext, f)) for f in files)
def install_labextension(path, name, overwrite=False, symlink=False,
user=False, prefix=None, labextensions_dir=None,
logger=None, sys_prefix=False
):
"""Install a Javascript extension for JupyterLab
Stages files and/or directories into the labextensions directory.
By default, this compares modification time, and only stages files that need updating.
If `overwrite` is specified, matching files are purged before proceeding.
Parameters
----------
path : path to file, directory, zip or tarball archive, or URL to install
Archives (zip or tarballs) will be extracted into the labextensions directory.
name : str
name the labextension is installed to. For example, if name is 'foo', then
the source file will be installed to 'labextensions/foo'.
overwrite : bool [default: False]
If True, always install the files, regardless of what may already be installed.
symlink : bool [default: False]
If True, create a symlink in labextensions, rather than copying files.
Not allowed with URLs or archives. Windows support for symlinks requires
Vista or above, Python 3, and a permission bit which only admin users
have by default, so don't rely on it.
user : bool [default: False]
Whether to install to the user's labextensions directory.
Otherwise do a system-wide install (e.g. /usr/local/share/jupyter/labextensions).
prefix : str [optional]
Specify install prefix, if it should differ from default (e.g. /usr/local).
Will install to ``<prefix>/share/jupyter/labextensions``
labextensions_dir : str [optional]
Specify absolute path of labextensions directory explicitly.
logger : Jupyter logger [optional]
Logger instance to use
sys_prefix : bool [default: False]
Install into the sys.prefix, i.e. environment
"""
# the actual path to which we eventually installed
full_dest = None
labext = _get_labextension_dir(user=user, sys_prefix=sys_prefix, prefix=prefix, labextensions_dir=labextensions_dir)
# make sure labextensions dir exists
ensure_dir_exists(labext)
# forcing symlink parameter to False if os.symlink does not exist (e.g., on Windows machines running python 2)
if not hasattr(os, 'symlink'):
symlink = False
if isinstance(path, (list, tuple)):
raise TypeError("path must be a string pointing to a single extension to install; call this function multiple times to install multiple extensions")
path = cast_unicode_py2(path)
if path.startswith(('https://', 'http://')):
raise NotImplementedError('Urls are not yet supported for labextensions')
elif path.endswith('.zip') or _safe_is_tarfile(path):
raise NotImplementedError('Archive files are not yet supported for labextensions')
else:
destination = cast_unicode_py2(name)
full_dest = normpath(pjoin(labext, destination))
if overwrite and os.path.lexists(full_dest):
if logger:
logger.info("Removing: %s" % full_dest)
if os.path.isdir(full_dest) and not os.path.islink(full_dest):
shutil.rmtree(full_dest)
else:
os.remove(full_dest)
if symlink:
path = os.path.abspath(path)
if not os.path.exists(full_dest):
if logger:
logger.info("Symlinking: %s -> %s" % (full_dest, path))
os.symlink(path, full_dest)
elif os.path.isdir(path):
path = pjoin(os.path.abspath(path), '') # end in path separator
for parent, dirs, files in os.walk(path):
dest_dir = pjoin(full_dest, parent[len(path):])
if not os.path.exists(dest_dir):
if logger:
logger.info("Making directory: %s" % dest_dir)
os.makedirs(dest_dir)
for file in files:
src = pjoin(parent, file)
dest_file = pjoin(dest_dir, file)
_maybe_copy(src, dest_file, logger=logger)
else:
src = path
_maybe_copy(src, full_dest, logger=logger)
return full_dest
def install_labextension_python(module, overwrite=False, symlink=False,
user=False, sys_prefix=False, prefix=None, labextensions_dir=None, logger=None):
"""Install a labextension bundled in a Python package.
Returns a list of installed/updated directories.
See install_labextension for parameter information."""
m, labexts = _get_labextension_metadata(module)
base_path = os.path.split(m.__file__)[0]
full_dests = []
for labext in labexts:
src = os.path.join(base_path, labext['src'])
name = labext['name']
if logger:
logger.info("Installing %s -> %s" % (src, name))
full_dest = install_labextension(
src, name=name, overwrite=overwrite, symlink=symlink,
user=user, sys_prefix=sys_prefix, prefix=prefix, labextensions_dir=labextensions_dir,
logger=logger
)
validate_labextension_folder(name, full_dest, logger)
full_dests.append(full_dest)
return full_dests
def uninstall_labextension(name, user=False, sys_prefix=False, prefix=None,
labextensions_dir=None, logger=None):
"""Uninstall a Javascript extension of JupyterLab
Removes staged files and/or directories in the labextensions directory and
removes the extension from the frontend config.
Parameters
----------
name: str
The name of the labextension.
user : bool [default: False]
Whether to uninstall from the user's labextensions directory.
Otherwise do a system-wide uninstall (e.g. /usr/local/share/jupyter/labextensions).
sys_prefix : bool [default: False]
Uninstall from the sys.prefix, i.e. environment
prefix : str [optional]
Specify prefix, if it should differ from default (e.g. /usr/local).
Will uninstall from ``<prefix>/share/jupyter/labextensions``
labextensions_dir : str [optional]
Specify absolute path of labextensions directory explicitly.
logger : Jupyter logger [optional]
Logger instance to use
"""
labext = _get_labextension_dir(user=user, sys_prefix=sys_prefix, prefix=prefix, labextensions_dir=labextensions_dir)
dest = cast_unicode_py2(name)
full_dest = pjoin(labext, dest)
if os.path.lexists(full_dest):
if logger:
logger.info("Removing: %s" % full_dest)
if os.path.isdir(full_dest) and not os.path.islink(full_dest):
shutil.rmtree(full_dest)
else:
os.remove(full_dest)
disable_labextension(name, user=user, sys_prefix=sys_prefix,
logger=logger)
def uninstall_labextension_python(module,
user=False, sys_prefix=False, prefix=None, labextensions_dir=None,
logger=None):
"""Uninstall a labextension bundled in a Python package.
See parameters of `install_labextension_python`
"""
m, labexts = _get_labextension_metadata(module)
for labext in labexts:
name = labext['name']
if logger:
logger.info("Uninstalling {}".format(name))
uninstall_labextension(name, user=user, sys_prefix=sys_prefix,
prefix=prefix, labextensions_dir=labextensions_dir, logger=logger)
def _set_labextension_state(name, state,
user=True, sys_prefix=False, logger=None):
"""Set whether the JupyterLab frontend should use the named labextension
Returns True if the final state is the one requested.
Parameters
name : string
The name of the extension.
state : bool
The state in which to leave the extension
user : bool [default: True]
Whether to update the user's .jupyter/labextensions directory
sys_prefix : bool [default: False]
Whether to update the sys.prefix, i.e. environment. Will override
`user`.
logger : Jupyter logger [optional]
Logger instance to use
"""
user = False if sys_prefix else user
config_dir = os.path.join(
_get_config_dir(user=user, sys_prefix=sys_prefix), 'labconfig')
cm = BaseJSONConfigManager(config_dir=config_dir)
if logger:
logger.info("{} extension {}...".format(
"Enabling" if state else "Disabling",
name
))
cfg = cm.get("jupyterlab_config")
labextensions = (
cfg.setdefault("LabApp", {})
.setdefault("labextensions", {})
)
old_enabled = labextensions.get(name, None)
new_enabled = state if state is not None else not old_enabled
if logger:
if new_enabled:
logger.info(u"Enabling: %s" % (name))
else:
logger.info(u"Disabling: %s" % (name))
labextensions[name] = new_enabled
if logger:
logger.info(u"- Writing config: {}".format(config_dir))
cm.update("jupyterlab_config", cfg)
if new_enabled:
validate_labextension(name, logger=logger)
return old_enabled == state
def _set_labextension_state_python(state, module, user, sys_prefix,
logger=None):
"""Enable or disable some labextensions stored in a Python package
Returns a list of whether the state was achieved (i.e. changed, or was
already right)
Parameters
----------
state : Bool
Whether the extensions should be enabled
module : str
Importable Python module exposing the
magic-named `_jupyter_labextension_paths` function
user : bool
Whether to enable in the user's labextensions directory.
sys_prefix : bool
Enable/disable in the sys.prefix, i.e. environment
logger : Jupyter logger [optional]
Logger instance to use
"""
m, labexts = _get_labextension_metadata(module)
return [_set_labextension_state(name=labext["name"],
state=state,
user=user, sys_prefix=sys_prefix,
logger=logger)
for labext in labexts]
def enable_labextension(name, user=True, sys_prefix=False,
logger=None):
"""Enable a named labextension
Returns True if the final state is the one requested.
Parameters
----------
name : string
The name of the extension.
user : bool [default: True]
Whether to enable in the user's labextensions directory.
sys_prefix : bool [default: False]
Whether | |
in (diskFiles - dbFiles):
fullPath = os.path.join(self.dataDir, path)
st = os.stat(longPathEnc(fullPath))
wikiWord = self._findNewWordForFile(path)
if wikiWord is not None:
fileSig = self.wikiDocument.getFileSignatureBlock(fullPath)
self.connWrap.execSql("insert into wikiwords(word, created, "
"modified, filepath, filenamelowercase, "
"filesignature, metadataprocessed) "
"values (?, ?, ?, ?, ?, ?, 0)",
(wikiWord, ti, st.st_mtime, path, path.lower(),
sqlite.Binary(fileSig)))
page = self.wikiDocument.getWikiPage(wikiWord)
page.refreshSyncUpdateMatchTerms()
except (IOError, OSError, sqlite.Error), e:
traceback.print_exc()
raise DbWriteAccessError(e)
def _getCachedWikiPageLinkTermDict(self):
"""
Function works for read-only wiki.
"""
class CachedWikiPageLinkTermDict(object):
def __init__(self, outer):
self.outer = outer
self.cache = {}
self.cacheNonExistent = set()
self.cacheComplete = False
def get(self, key, default=None):
if self.cacheComplete:
return self.cache.get(key, default)
if self.cache.has_key(key):
return self.cache.get(key, default)
if key in self.cacheNonExistent:
return default
try:
value = self.lookup(key)
self.cache[key] = value
return value
except KeyError:
self.cacheNonExistent.add(key)
return default
def lookup(self, key):
if self.outer.isDefinedWikiPageName(key):
return key
try:
value = self.outer.connWrap.execSqlQuerySingleItem(
"select word from wikiwordmatchterms "
"where matchterm = ? and (type & 2) != 0 ", (key,))
# Consts.WIKIWORDMATCHTERMS_TYPE_ASLINK == 2
except (IOError, OSError, sqlite.Error), e:
traceback.print_exc()
raise DbReadAccessError(e)
if value is None:
raise KeyError(key)
return value
def keys(self):
if not self.cacheComplete:
self.cache = dict(self.outer.connWrap.execSqlQuery(
"select word, word from wikiwords union "
"select matchterm, word from wikiwordmatchterms "
"where (type & 2) != 0 and not matchterm in "
"(select word from wikiwords)"))
# Consts.WIKIWORDMATCHTERMS_TYPE_ASLINK == 2
self.cacheComplete = True
self.cacheNonExistent = set()
return self.cache.keys()
try:
if self.cachedWikiPageLinkTermDict is None:
self.cachedWikiPageLinkTermDict = CachedWikiPageLinkTermDict(self)
return self.cachedWikiPageLinkTermDict
except (IOError, OSError, sqlite.Error), e:
traceback.print_exc()
raise DbReadAccessError(e)
# def _getCachedWikiPageLinkTermDict(self):
# """
# Function works for read-only wiki.
# """
# try:
# if self.cachedWikiPageLinkTermDict is None:
# self.cachedWikiPageLinkTermDict = dict(self.connWrap.execSqlQuery(
# "select word, word from wikiwords union "
# "select matchterm, word from wikiwordmatchterms "
# "where (type & 2) != 0 and not matchterm in "
# "(select word from wikiwords)"))
# # Consts.WIKIWORDMATCHTERMS_TYPE_ASLINK == 2
#
# return self.cachedWikiPageLinkTermDict
# except (IOError, OSError, sqlite.Error), e:
# traceback.print_exc()
# raise DbReadAccessError(e)
def _getAllWikiFileNamesFromDisk(self): # Used for rebuilding wiki
try:
files = glob.glob(join(self.dataDir, u'*' + self.pagefileSuffix))
return [pathDec(basename(fn)) for fn in files]
# result = []
# for file in files:
# word = pathDec(basename(file))
# if word.endswith(self.pagefileSuffix):
# word = word[:-len(self.pagefileSuffix)]
#
# result.append(word)
#
# return result
except (IOError, OSError, sqlite.Error), e:
traceback.print_exc()
raise DbReadAccessError(e)
def _getAllWikiFileNamesFromDb(self): # Used for rebuilding wiki
try:
return self.connWrap.execSqlQuerySingleColumn("select filepath "
"from wikiwords")
except (IOError, OSError, sqlite.Error), e:
traceback.print_exc()
raise DbReadAccessError(e)
def getWikiWordFileNameRaw(self, wikiWord):
"""
Not part of public API!
Function must work for read-only wiki.
"""
try:
path = self.connWrap.execSqlQuerySingleItem("select filepath "
"from wikiwords where word = ?", (wikiWord,))
except (IOError, OSError, sqlite.Error), e:
traceback.print_exc()
raise DbReadAccessError(e)
if path is None:
raise WikiFileNotFoundException(
_(u"Wiki page not found (no path information) for word: %s") %
wikiWord)
return path
def getWikiWordFileName(self, wikiWord, mustExist=True):
"""
Not part of public API!
Function must work for read-only wiki.
"""
try:
path = longPathEnc(join(self.dataDir,
self.getWikiWordFileNameRaw(wikiWord)))
if mustExist and \
(not os.path.exists(path) or not os.path.isfile(path)):
raise WikiFileNotFoundException(
_(u"Wiki page not found (bad path information) for word: %s") %
wikiWord)
except WikiFileNotFoundException:
if self.wikiDocument.getWikiConfig().getboolean("main",
"wikiPageFiles_gracefulOutsideAddAndRemove", True):
# Refresh content names and try again
self.refreshWikiPageLinkTerms(deleteFully=True)
path = longPathEnc(join(self.dataDir,
self.getWikiWordFileNameRaw(wikiWord)))
if mustExist and \
(not os.path.exists(path) or not os.path.isfile(path)):
raise WikiFileNotFoundException(
_(u"Wiki page not found (bad path information) for word: %s") %
wikiWord)
else:
raise
return path
def createWikiWordFileName(self, wikiWord):
"""
Create a filename for wikiWord which is not yet in the database or
a file with that name in the data directory
"""
asciiOnly = self.wikiDocument.getWikiConfig().getboolean("main",
"wikiPageFiles_asciiOnly", False)
maxFnLength = self.wikiDocument.getWikiConfig().getint("main",
"wikiPageFiles_maxNameLength", 120)
icf = iterCompatibleFilename(wikiWord, self.pagefileSuffix,
asciiOnly=asciiOnly, maxLength=maxFnLength)
for i in range(30): # "while True" would be too dangerous
fileName = icf.next()
existing = self.connWrap.execSqlQuerySingleColumn(
"select filenamelowercase from wikiwords "
"where filenamelowercase = ?", (fileName.lower(),))
if len(existing) > 0:
continue
if exists(longPathEnc(join(self.dataDir, fileName))):
continue
return fileName
return None
def _guessWikiWordFileName(self, wikiWord):
"""
Try to find an existing file in self.dataDir which COULD BE the page
file for wikiWord.
Called when external adding of files should be handled gracefully.
Returns either the filename relative to self.dataDir or None.
"""
try:
asciiOnly = self.wikiDocument.getWikiConfig().getboolean("main",
"wikiPageFiles_asciiOnly", False)
maxFnLength = self.wikiDocument.getWikiConfig().getint("main",
"wikiPageFiles_maxNameLength", 120)
# Try first with current ascii-only setting
icf = iterCompatibleFilename(wikiWord, self.pagefileSuffix,
asciiOnly=asciiOnly, maxLength=maxFnLength)
for i in range(2):
fileName = icf.next()
existing = self.connWrap.execSqlQuerySingleColumn(
"select filenamelowercase from wikiwords "
"where filenamelowercase = ?", (fileName.lower(),))
if len(existing) > 0:
continue
if not os.path.exists(longPathEnc(join(self.dataDir, fileName))):
continue
return fileName
# Then the same with opposite ascii-only setting
icf = iterCompatibleFilename(wikiWord, self.pagefileSuffix,
asciiOnly=not asciiOnly, maxLength=maxFnLength)
for i in range(2):
fileName = icf.next()
existing = self.connWrap.execSqlQuerySingleColumn(
"select filenamelowercase from wikiwords "
"where filenamelowercase = ?", (fileName.lower(),))
if len(existing) > 0:
continue
if not os.path.exists(longPathEnc(join(self.dataDir, fileName))):
continue
return fileName
return None
except (IOError, OSError, sqlite.Error), e:
traceback.print_exc()
raise DbReadAccessError(e)
def isDefinedWikiLinkTerm(self, word):
"check if a word is a valid wikiword (page name or alias)"
return bool(self.getWikiPageNameForLinkTerm(word))
# # TODO More reliably esp. for aliases
# def isDefinedWikiWord(self, word):
# "check if a word is a valid wikiword (page name or alias)"
# return self._getCachedWikiPageLinkTermDict().has_key(word)
def getAllProducedWikiLinks(self):
"""
Return all links stored by production (in contrast to resolution)
Function must work for read-only wiki.
"""
return self._getCachedWikiPageLinkTermDict().keys()
def getWikiPageLinkTermsStartingWith(self, thisStr, caseNormed=False):
"""
Get the list of wiki page link terms (page names or aliases)
starting with thisStr. Used for autocompletion.
"""
if caseNormed:
thisStr = sqlite.escapeForGlob(thisStr.lower()) # TODO More general normcase function
try:
return self.connWrap.execSqlQuerySingleColumn(
"select matchterm from wikiwordmatchterms "
"where matchtermnormcase glob (? || '*') and "
"(type & 2) != 0",
(thisStr,))
# Consts.WIKIWORDMATCHTERMS_TYPE_ASLINK == 2
except (IOError, OSError, sqlite.Error), e:
traceback.print_exc()
raise DbReadAccessError(e)
else:
try:
thisStr = sqlite.escapeForGlob(thisStr)
return self.connWrap.execSqlQuerySingleColumn(
"select matchterm from wikiwordmatchterms "
"where matchterm glob (? || '*') and "
"(type & 2) != 0 union "
"select word from wikiwords where word glob (? || '*')",
(thisStr,thisStr))
# Consts.WIKIWORDMATCHTERMS_TYPE_ASLINK == 2
# To ensure that at least all real wikiwords are found,
# the wikiwords table is also read
except (IOError, OSError, sqlite.Error), e:
traceback.print_exc()
raise DbReadAccessError(e)
def getWikiPageNamesModifiedWithin(self, startTime, endTime):
"""
Function must work for read-only wiki.
startTime and endTime are floating values as returned by time.time()
startTime is inclusive, endTime is exclusive
"""
try:
return self.connWrap.execSqlQuerySingleColumn(
"select word from wikiwords where modified >= ? and "
"modified < ?",
(startTime, endTime))
except (IOError, OSError, sqlite.Error), e:
traceback.print_exc()
raise DbReadAccessError(e)
_STAMP_TYPE_TO_FIELD = {
0: "modified",
1: "created"
}
def getTimeMinMax(self, stampType):
"""
Return the minimal and maximal timestamp values over all wiki words
as tuple (minT, maxT) of float time values.
A time value of 0.0 is not taken into account.
If there are no wikiwords with time value != 0.0, (None, None) is
returned.
stampType -- 0: Modification time, 1: Creation, 2: Last visit
"""
field = self._STAMP_TYPE_TO_FIELD.get(stampType)
if field is None:
# Visited not supported yet
return (None, None)
try:
result = self.connWrap.execSqlQuery(
("select min(%s), max(%s) from wikiwords where %s > 0") %
(field, field, field))
except (IOError, OSError, sqlite.Error), e:
traceback.print_exc()
raise DbReadAccessError(e)
if len(result) == 0:
# No matching wiki words found
return (None, None)
else:
return tuple(result[0])
def getWikiPageNamesBefore(self, stampType, stamp, limit=None):
"""
Get a list of tuples of wiki words and dates related to a particular
time before stamp.
stampType -- 0: Modification time, 1: Creation, 2: Last visit
limit -- How much words to return or None for all
"""
field = self._STAMP_TYPE_TO_FIELD.get(stampType)
if field is None:
# Visited not supported yet
return []
if limit is None:
limit = -1
try:
return self.connWrap.execSqlQuery(
("select word, %s from wikiwords where %s > 0 and %s < ? "
"order by %s desc limit ?") %
(field, field, field, field), (stamp, limit))
except (IOError, OSError, sqlite.Error), e:
traceback.print_exc()
raise DbReadAccessError(e)
def getWikiPageNamesAfter(self, stampType, stamp, limit=None):
"""
Get a list of of tuples of wiki words and dates related to a particular
time after OR AT | |
for k, v in df.groupby("ID")}
groupList = list(groups.keys())
# groupList = list(groups.groups)
pts = ['bbr_x','bbr_y','fbr_x','fbr_y','fbl_x','fbl_y','bbl_x', 'bbl_y']
n_car = len(groupList)
CX = np.ones((n_car, n_car)) * 999 # cone of X
loss = torch.nn.GaussianNLLLoss()
empty_id = set()
for i,c1 in enumerate(groupList):
print("\rTracklet {}/{}".format(i,n_car),end = "\r",flush = True)
# get the fitted line for c1
# track1 = groups.get_group(c1)
track1 = groups[c1]
t1 = track1["Frame #"].values
ct1 = np.nanmean(t1)
track1 = np.array(track1[pts])
x1 = (track1[:,0] + track1[:,2])/2
y1 = (track1[:,1] + track1[:,7])/2
notnan = ~np.isnan(x1)
t1 = t1[notnan]
x1 = x1[notnan]
y1 = y1[notnan]
if len(t1)<1 or (c1 in empty_id):
empty_id.add(c1)
continue
elif len(t1)<2:
v = np.sign(track1[0,2]-track1[0,0]) # assume 1/-1 m/frame = 30m/s
b = x1-v*ct1 # recalculate y-intercept
fitx = np.array([v,b[0]])
fity = np.array([0,y1[0]])
else:
X = np.vstack([t1,np.ones(len(t1))]).T # N x 2
fitx = np.linalg.lstsq(X, x1, rcond=None)[0]
fity = np.linalg.lstsq(X, y1, rcond=None)[0]
for j in range(i+1,n_car):
# track2 = groups.get_group(groupList[j])
track2 = groups[groupList[j]]
t2 = track2["Frame #"].values
track2 = np.array(track2[pts])
x2 = (track2[:,0] + track2[:,2])/2
y2 = (track2[:,1] + track2[:,7])/2
notnan = ~np.isnan(x2)
if sum(notnan)==0 or (groupList[j] in empty_id): # if all data is nan (missing)
empty_id.add(groupList[j])
continue
t2 = t2[notnan]
x2 = x2[notnan]
y2 = y2[notnan]
ct2 = np.mean(t2)
if len(t2)<2:
v = np.sign(track2[0,2]-track2[0,0])
b = x2-v*ct2 # recalculate y-intercept
fit2x = np.array([v,b[0]])
fit2y = np.array([0,y2[0]])
else:
# OLS faster
X = np.vstack([t2,np.ones(len(t2))]).T
fit2x = np.linalg.lstsq(X, x2, rcond=None)[0]
fit2y = np.linalg.lstsq(X, y2, rcond=None)[0]
nll = 999
if all(t2-t1[-1]>=0): # t1 comes first
if t2[0] - t1[-1] > time_out:
# print("time out {} and {}".format(c1, groupList[j]))
continue
# 1. project t1 forward to t2's time
# targetx = np.polyval(fitx, t2)
# targety = np.polyval(fity, t2)
targetx = np.matmul(X, fitx)
targety = np.matmul(X, fity)
pt1 = t1[-1]
varx = (t2-pt1) * VARX
vary = (t2-pt1) * VARY
input = torch.transpose(torch.tensor([x2,y2]),0,1)
target = torch.transpose(torch.tensor([targetx, targety]),0,1)
var = torch.transpose(torch.tensor([varx,vary]),0,1)
nll = min(nll, loss(input,target,var))
# print("{} and {}: {}".format(c1, groupList[j],nll))
# 2. project t2 backward to t1's time
targetx = np.polyval(fit2x, t1)
targety = np.polyval(fit2y, t1)
pt2 = t2[0]
varx = (pt2-t1) * VARX
vary = (pt2-t1) * VARY
input = torch.transpose(torch.tensor([x1,y1]),0,1)
target = torch.transpose(torch.tensor([targetx, targety]),0,1)
var = torch.transpose(torch.tensor([varx,vary]),0,1)
nll = min(nll, loss(input,target,var))
# print("{} and {}: {}".format(c1, groupList[j],nll))
elif all(t1-t2[-1]>=0): # t2 comes first:
if t1[0] - t2[-1] > time_out:
continue
# 3. project t1 backward to t2's time
targetx = np.polyval(fitx, t2)
targety = np.polyval(fity, t2)
pt1 = t1[0]
varx = (pt1-t2) * VARX
vary = (pt1-t2) * VARY
input = torch.transpose(torch.tensor([x2,y2]),0,1)
target = torch.transpose(torch.tensor([targetx, targety]),0,1)
var = torch.transpose(torch.tensor([varx,vary]),0,1)
nll = min(nll, loss(input,target,var))
# print("{} and {}: {}".format(c1, groupList[j],nll))
# 4. project t2 forward to t1's time
targetx = np.polyval(fit2x, t1)
targety = np.polyval(fit2y, t1)
pt2 = t2[-1]
varx = (t1-pt2) * VARX
vary = (t1-pt2) * VARY
input = torch.transpose(torch.tensor([x1,y1]),0,1)
target = torch.transpose(torch.tensor([targetx, targety]),0,1)
var = torch.transpose(torch.tensor([varx,vary]),0,1)
nll = min(nll, loss(input,target,var))
# print("{} and {}: {}".format(c1, groupList[j],nll))
CX[i,j] = nll
# for debugging only
o.CX = CX
o.groupList = groupList
o.empty_id = empty_id
BX = CX < THRESHOLD_C
for i in range(len(CX)): # make CX CY symmetrical
BX[i,:] = np.logical_or(BX[i,:], BX[:,i])
# 4. start by sorting CX
a,b = np.unravel_index(np.argsort(CX, axis=None), CX.shape)
path = {idx: {idx} for idx in range(n_car)} # disjoint set
for i in range(len(a)):
if CX[a[i],b[i]] > THRESHOLD_C:
break
else:
path_a, path_b = list(path[a[i]]),list(path[b[i]])
if np.all(BX[np.ix_(path_a, path_b)]): # if no conflict with any path
path[a[i]] = path[a[i]].union(path[b[i]])
for aa in path[a[i]]:
path[aa] = path[a[i]].copy()
o.path = path
# delete IDs that are empty
df = df.groupby("ID").filter(lambda x: (x["ID"].iloc[0] not in empty_id))
path = path.copy()
# modify ID
while path:
key = list(path.keys())[0]
reid = {groupList[old]: groupList[key] for old in path[key]} # change id=groupList[v] to groupList[key]
df = df.replace({'ID': reid})
for v in list(path[key]) + [key]:
try:
path.pop(v)
except KeyError:
pass
df = df.sort_values(by=['Frame #','ID']).reset_index(drop=True)
o.df = df
print("\n")
print("Before DA: {} unique IDs".format(len(groupList)))
print("After DA: {} unique IDs".format(df.groupby("ID").ngroups))
print("True: {} unique IDs".format(len([id for id in groupList if id<1000])))
return o
def stitch_objects_tsmn_online(o, THRESHOLD_MIN, THRESHOLD_MAX=3, VARX=0.03, VARY=0.03, time_out = 500):
'''
pitfall: strictly lowest-cost match, pairs occur later in time cannot be matched because they're not ready. Pairs occurs first may not be matched beacuase lower in priority
THRESHOLD_MIN: below which pairs have to be matched
THRESHOLD_MAX: aobve which pairs should never be matched
online version of stitch_objects_tsmn_ll
track: dict with key: id, t, x, y
{"id": 20,
"t": [frame1, frame2,...],
"x":[x1,x2,...],
"y":[y1,y2...],
"fitx": [vx, bx], least square fit
"fity": [vy, by]}
tracks come incrementally as soon as they end
'''
# define cost
def _getCost(track1, track2):
'''
track1 always ends before track2 ends
999: mark as conflict
-1: invalid
'''
if track1['id']==track2['id']:
return -1
# if (track1['id'] in empty_id) or (track1['id'] in empty_id):
# return -1
if track2["t"][0] < track1['t'][-1]: # if track2 starts before track1 ends
return 999
if track2['t'][0] - track1['t'][-1] > time_out: # if track2 starts TIMEOUT after track1 ends
return -1
xx = np.vstack([track2['t'],np.ones(len(track2['t']))]).T # N x 2
targetx = np.matmul(xx, track1['fitx'])
targety = np.matmul(xx, track1['fity'])
pt1 = track1['t'][-1]
varx = (track2['t']-pt1) * VARX
vary = (track2['t']-pt1) * VARY
input = torch.transpose(torch.tensor([track2['x'],track2['y']]),0,1)
target = torch.transpose(torch.tensor([targetx, targety]),0,1)
var = torch.transpose(torch.tensor([varx,vary]),0,1)
nll = loss(input,target,var)
return nll.item()
def _addEdge(graph,u,v):
# add undirected edge
graph[u].add(v)
graph[v].add(u)
def _first(s):
'''Return the first element from an ordered collection
or an arbitrary element from an unordered collection.
Raise StopIteration if the collection is empty.
'''
return next(iter(s.values()))
df = o.df
# sort tracks by start/end time - not for real deployment
groups = {k: v for k, v in df.groupby("ID")}
ids = list(groups.keys())
ordered_tracks = deque() # list of dictionaries
all_tracks = {}
S = []
E = []
for id, car in groups.items():
t = car["Frame #"].values
x = (car.bbr_x.values + car.bbl_x.values)/2
y = (car.bbr_y.values + car.bbl_y.values)/2
notnan = ~np.isnan(x)
t,x,y = t[notnan], x[notnan],y[notnan]
if len(t)>1: # ignore empty or only has 1 frame
S.append([t[0], id])
E.append([t[-1], id])
track = {"id":id, "t": t, "x": x, "y": y}
# ordered_tracks.append(track)
all_tracks[id] = track
heapq.heapify(S) # min heap (frame, id)
heapq.heapify(E)
EE = E.copy()
while EE:
e, id = heapq.heappop(EE)
ordered_tracks.append(all_tracks[id])
# Initialize
X = defaultdict(set) # exclusion graph
curr_tracks = deque() # tracks in view. list of tracks. should be sorted by end_time
path = {} # oldid: newid. to store matching assignment
C = [] # min heap. {cost: (id1, id2)} cost to match start of id1 to end of id2
past_tracks = set() # set of ids indicate end of track ready to be matched
processed = set() # set of ids whose tails are matched
matched = 0 # count matched pairs
running_tracks = OrderedDict() # tracks that start but not end at e
for track in ordered_tracks:
print("\n")
curr_id = track['id'] # last_track = track['id']
path[curr_id] = curr_id
print('at end of: ',curr_id)
right = track['t'][-1] # right pointer: current time
# get tracks that started but not end - used to define the window left pointer
while S and S[0][0] < right: # append all the tracks that already starts
started_time, started_id = heapq.heappop(S)
running_tracks[started_id] = started_time
print('running tracks: ', running_tracks.keys())
# compute track statistics
t,x,y = track['t'],track['x'],track['y']
ct = np.nanmean(t)
# empty_id.add(track['id'])
# continue
if len(t)<2:
v = np.sign(x[-1]-x[0]) # assume 1/-1 m/frame = 30m/s
b = x-v*ct # recalculate y-intercept
fitx = np.array([v,b[0]])
fity = np.array([0,y[0]])
else:
xx = np.vstack([t,np.ones(len(t))]).T # N x 2
fitx = np.linalg.lstsq(xx,x, rcond=None)[0]
fity = np.linalg.lstsq(xx,y, rcond=None)[0]
track['t'] = t
track['x'] = x
track['y'] = y
track['fitx'] = fitx
track['fity'] = fity
try: left = max(0,_first(running_tracks) - time_out)
| |
| wget -i - -O /tmp/speedtestcheck.png"
testspeed =str(subprocess.call(testspeedcmd, shell = True,encoding='utf-8'))
bot.send_chat_action(config.tg, "upload_photo")
testspeedfile = open('/tmp/speedtestcheck.png', 'rb')
bot.send_photo(config.tg, testspeedfile, reply_markup=markupspeedtest)
except:
bot.send_message(config.tg, text=_("Network speed test check failed"), reply_markup=markupspeedtest)
else:
pass
# Network speed Hungary
# Network speed Italy
@bot.message_handler(func=lambda message: message.text == lt_italyflspdt)
def command_testspeed_italy(message):
if message.from_user.id == config.tg:
try:
bot.send_chat_action(config.tg, "typing")
testspeedcmd = "python3 " + config.tontgpath + "/speedtest-cli --share --server 11842 | grep -i 'Share results' | awk '{print $3}' | wget -i - -O /tmp/speedtestcheck.png"
testspeed =str(subprocess.call(testspeedcmd, shell = True,encoding='utf-8'))
bot.send_chat_action(config.tg, "upload_photo")
testspeedfile = open('/tmp/speedtestcheck.png', 'rb')
bot.send_photo(config.tg, testspeedfile, reply_markup=markupspeedtest)
except:
bot.send_message(config.tg, text=_("Network speed test check failed"), reply_markup=markupspeedtest)
else:
pass
# Network speed Italy
# Network speed Liechtenstein
@bot.message_handler(func=lambda message: message.text == lt_liechtnspdt)
def command_testspeed_liechtenstein(message):
if message.from_user.id == config.tg:
try:
bot.send_chat_action(config.tg, "typing")
testspeedcmd = "python3 " + config.tontgpath + "/speedtest-cli --share --server 20255 | grep -i 'Share results' | awk '{print $3}' | wget -i - -O /tmp/speedtestcheck.png"
testspeed =str(subprocess.call(testspeedcmd, shell = True,encoding='utf-8'))
bot.send_chat_action(config.tg, "upload_photo")
testspeedfile = open('/tmp/speedtestcheck.png', 'rb')
bot.send_photo(config.tg, testspeedfile, reply_markup=markupspeedtest)
except:
bot.send_message(config.tg, text=_("Network speed test check failed"), reply_markup=markupspeedtest)
else:
pass
# Network speed Liechtenstein
# Network speed Luxembourg
@bot.message_handler(func=lambda message: message.text == lt_luxmbrgspdt)
def command_testspeed_luxembourg(message):
if message.from_user.id == config.tg:
try:
bot.send_chat_action(config.tg, "typing")
testspeedcmd = "python3 " + config.tontgpath + "/speedtest-cli --share --server 4769 | grep -i 'Share results' | awk '{print $3}' | wget -i - -O /tmp/speedtestcheck.png"
testspeed =str(subprocess.call(testspeedcmd, shell = True,encoding='utf-8'))
bot.send_chat_action(config.tg, "upload_photo")
testspeedfile = open('/tmp/speedtestcheck.png', 'rb')
bot.send_photo(config.tg, testspeedfile, reply_markup=markupspeedtest)
except:
bot.send_message(config.tg, text=_("Network speed test check failed"), reply_markup=markupspeedtest)
else:
pass
# Network speed Luxembourg
# Network speed Netherlands
@bot.message_handler(func=lambda message: message.text == lt_nthlndsspdt)
def command_testspeed_netherlands(message):
if message.from_user.id == config.tg:
try:
bot.send_chat_action(config.tg, "typing")
testspeedcmd = "python3 " + config.tontgpath + "/speedtest-cli --share --server 20005 | grep -i 'Share results' | awk '{print $3}' | wget -i - -O /tmp/speedtestcheck.png"
testspeed =str(subprocess.call(testspeedcmd, shell = True,encoding='utf-8'))
bot.send_chat_action(config.tg, "upload_photo")
testspeedfile = open('/tmp/speedtestcheck.png', 'rb')
bot.send_photo(config.tg, testspeedfile, reply_markup=markupspeedtest)
except:
bot.send_message(config.tg, text=_("Network speed test check failed"), reply_markup=markupspeedtest)
else:
pass
# Network speed Netherlands
# Network speed Poland
@bot.message_handler(func=lambda message: message.text == lt_polandfspdt)
def command_testspeed_poland(message):
if message.from_user.id == config.tg:
try:
bot.send_chat_action(config.tg, "typing")
testspeedcmd = "python3 " + config.tontgpath + "/speedtest-cli --share --server 5326 | grep -i 'Share results' | awk '{print $3}' | wget -i - -O /tmp/speedtestcheck.png"
testspeed =str(subprocess.call(testspeedcmd, shell = True,encoding='utf-8'))
bot.send_chat_action(config.tg, "upload_photo")
testspeedfile = open('/tmp/speedtestcheck.png', 'rb')
bot.send_photo(config.tg, testspeedfile, reply_markup=markupspeedtest)
except:
bot.send_message(config.tg, text=_("Network speed test check failed"), reply_markup=markupspeedtest)
else:
pass
# Network speed Poland
# Network speed Serbia
@bot.message_handler(func=lambda message: message.text == lt_serbiafspdt)
def command_testspeed_serbia(message):
if message.from_user.id == config.tg:
try:
bot.send_chat_action(config.tg, "typing")
testspeedcmd = "python3 " + config.tontgpath + "/speedtest-cli --share --server 3800 | grep -i 'Share results' | awk '{print $3}' | wget -i - -O /tmp/speedtestcheck.png"
testspeed =str(subprocess.call(testspeedcmd, shell = True,encoding='utf-8'))
bot.send_chat_action(config.tg, "upload_photo")
testspeedfile = open('/tmp/speedtestcheck.png', 'rb')
bot.send_photo(config.tg, testspeedfile, reply_markup=markupspeedtest)
except:
bot.send_message(config.tg, text=_("Network speed test check failed"), reply_markup=markupspeedtest)
else:
pass
# Network speed Serbia
# Network speed Slovakia
@bot.message_handler(func=lambda message: message.text == lt_slovakispdt)
def command_testspeed_slovakia(message):
if message.from_user.id == config.tg:
try:
bot.send_chat_action(config.tg, "typing")
testspeedcmd = "python3 " + config.tontgpath + "/speedtest-cli --share --server 7069 | grep -i 'Share results' | awk '{print $3}' | wget -i - -O /tmp/speedtestcheck.png"
testspeed =str(subprocess.call(testspeedcmd, shell = True,encoding='utf-8'))
bot.send_chat_action(config.tg, "upload_photo")
testspeedfile = open('/tmp/speedtestcheck.png', 'rb')
bot.send_photo(config.tg, testspeedfile, reply_markup=markupspeedtest)
except:
bot.send_message(config.tg, text=_("Network speed test check failed"), reply_markup=markupspeedtest)
else:
pass
# Network speed Slovakia
# Network speed Slovenia
@bot.message_handler(func=lambda message: message.text == lt_slovenispdt)
def command_testspeed_slovenia(message):
if message.from_user.id == config.tg:
try:
bot.send_chat_action(config.tg, "typing")
testspeedcmd = "python3 " + config.tontgpath + "/speedtest-cli --share --server 3560 | grep -i 'Share results' | awk '{print $3}' | wget -i - -O /tmp/speedtestcheck.png"
testspeed =str(subprocess.call(testspeedcmd, shell = True,encoding='utf-8'))
bot.send_chat_action(config.tg, "upload_photo")
testspeedfile = open('/tmp/speedtestcheck.png', 'rb')
bot.send_photo(config.tg, testspeedfile, reply_markup=markupspeedtest)
except:
bot.send_message(config.tg, text=_("Network speed test check failed"), reply_markup=markupspeedtest)
else:
pass
# Network speed Slovenia
# Network speed Spain
@bot.message_handler(func=lambda message: message.text == lt_spainflspdt)
def command_testspeed_spain(message):
if message.from_user.id == config.tg:
try:
bot.send_chat_action(config.tg, "typing")
testspeedcmd = "python3 " + config.tontgpath + "/speedtest-cli --share --server 14979 | grep -i 'Share results' | awk '{print $3}' | wget -i - -O /tmp/speedtestcheck.png"
testspeed =str(subprocess.call(testspeedcmd, shell = True,encoding='utf-8'))
bot.send_chat_action(config.tg, "upload_photo")
testspeedfile = open('/tmp/speedtestcheck.png', 'rb')
bot.send_photo(config.tg, testspeedfile, reply_markup=markupspeedtest)
except:
bot.send_message(config.tg, text=_("Network speed test check failed"), reply_markup=markupspeedtest)
else:
pass
# Network speed Spain
# Network speed Switzerland
@bot.message_handler(func=lambda message: message.text == lt_swtzlndspdt)
def command_testspeed_switzerland(message):
if message.from_user.id == config.tg:
try:
bot.send_chat_action(config.tg, "typing")
testspeedcmd = "python3 " + config.tontgpath + "/speedtest-cli --share --server 24389 | grep -i 'Share results' | awk '{print $3}' | wget -i - -O /tmp/speedtestcheck.png"
testspeed =str(subprocess.call(testspeedcmd, shell = True,encoding='utf-8'))
bot.send_chat_action(config.tg, "upload_photo")
testspeedfile = open('/tmp/speedtestcheck.png', 'rb')
bot.send_photo(config.tg, testspeedfile, reply_markup=markupspeedtest)
except:
bot.send_message(config.tg, text=_("Network speed test check failed"), reply_markup=markupspeedtest)
else:
pass
# Network speed Switzerland
# Network speed United Kingdom
@bot.message_handler(func=lambda message: message.text == lt_unitedkspdt)
def command_testspeed_uk(message):
if message.from_user.id == config.tg:
try:
bot.send_chat_action(config.tg, "typing")
testspeedcmd = "python3 " + config.tontgpath + "/speedtest-cli --share --server 11123 | grep -i 'Share results' | awk '{print $3}' | wget -i - -O /tmp/speedtestcheck.png"
testspeed =str(subprocess.call(testspeedcmd, shell = True,encoding='utf-8'))
bot.send_chat_action(config.tg, "upload_photo")
testspeedfile = open('/tmp/speedtestcheck.png', 'rb')
bot.send_photo(config.tg, testspeedfile, reply_markup=markupspeedtest)
except:
bot.send_message(config.tg, text=_("Network speed test check failed"), reply_markup=markupspeedtest)
else:
pass
# Network speed United Kingdom
# Back to linux tools
@bot.message_handler(func=lambda message: message.text == lt_backlinux)
def command_backtolinux(message):
if message.from_user.id == config.tg:
bot.send_message(config.tg, text=_("Be careful. Some processes need time ") + " \U000023F3", reply_markup=markuplinux)
else:
pass
# /Back to linux tools
# Network speed tool
#######################################################
# Main menu
@bot.message_handler(func=lambda message: message.text == lt_mainmenu)
def command_srvstart(message):
if message.from_user.id == config.tg:
bot.send_message(config.tg, text=_("Start menu"), reply_markup=markup)
else:
pass
# /Main menu
# Except proc kill
def kill(proc_pid):
process = psutil.Process(proc_pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
def StakeChange():
hch = 0
while True:
if hch == config.stakecheck:
hch = 0
try:
minstake = config.minstakes
hstnm = str(os.uname()[1].split('.')[0])
wlt = "head -1 " + config.tk + hstnm + ".addr"
wlt = str(subprocess.check_output(wlt, shell = True,encoding='utf-8').rstrip())
acctoncli = "cd " + config.ud + " && " + config.ud + "/tonos-cli account " + wlt + " | grep -i 'balance' | awk '{print $2}'"
acctoncli = str(subprocess.check_output(acctoncli, shell = True,encoding='utf-8'))
acctonclibal = str(int(acctoncli) / 1000000000)
stake = int(float(acctonclibal)) - 15
if stake > 100:
stakes = str(stake)
updatestakecmd = "crontab -l | sed 's/validator_msig.sh \([0-9]\+\)/validator_msig.sh " + stakes + "/' | crontab -"
updatestakecmd = str(subprocess.call(updatestakecmd, shell = True,encoding='utf-8'))
else:
pass
except:
try:
bot.send_message(config.tg,_("Update stake ERROR"))
except:
pass
else:
hch += 60
time.sleep(60)
# Alerts Validator node
def AlertsNotifications():
td = 0
hch = 0
t,p,c = 5,2,15
#q = [t * p ** (i - 1) for i in range(1, c + 1)]
alrtprdvnr = 5
while True:
if td == 5:
td = 0
# Check validator node running
try:
valnodecheck = str(subprocess.check_output(["pidof", "validator-engine"], encoding='utf-8'))
alrtprdvnr =5
except subprocess.CalledProcessError as i:
if i.output != None:
if alrtprdvnr in config.repeattimealarmnode:
#try:
#bot.send_message(config.tg, text="\U0001F6A8 " + _("Validator node is not running!!! Tap restart validator, to run your node"), parse_mode="Markdown", reply_markup=markupValidator)
#except:
#pass
try:
bot.send_message(config.tg, text="\U0001F6A8 " + _("Validator node is not running!!! Restart node in process."), parse_mode="Markdown", reply_markup=markupValidator)
bot.send_chat_action(config.tg, "typing")
nodelogbr = str(subprocess.check_output(["du -msh " + config.tw + "/node.log | awk '{print $1}'"], shell = True,encoding='utf-8'))
nodelogbr = _("*Node.log size before restart :* _") + nodelogbr + "_"
bot.send_message(config.tg, text = nodelogbr, parse_mode="Markdown")
bot.send_chat_action(config.tg, "typing")
killvproc = "ps -eo pid,cmd | grep -i 'validator-engine' | grep -iv 'grep' | awk '{print $1}' | xargs kill -9 $1"
killvproc = str(subprocess.call(killvproc, shell = True,encoding='utf-8'))
bot.send_message(config.tg, text = _("Node stopped. RAM & node.log clean. Starting node"), reply_markup=markupValidator)
bot.send_chat_action(config.tg, "typing")
time.sleep(1)
if config.nodelogressave == 1:
tms = str(datetime.datetime.today().strftime("%b_%d_%Y-%H_%M_%S"))
nodelogsavelog = str(subprocess.call(["mv " + config.tw + "/node.log " + config.tw + "/node_before_" + tms + ".log"], shell = True,encoding='utf-8'))
else:
pass
time.sleep(2)
try:
master, slave = pty.openpty()
stdout = None
stderr = None
#runvproc = config.tontgpath + "/run.sh"
runvproc = "/bin/bash " + config.tf + "scripts/run.sh"
runvprocc = subprocess.Popen(runvproc, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, encoding='utf-8', close_fds=True)
stdout, stderr = runvprocc.communicate(timeout=5)
os.close(slave)
os.close(master)
bot.send_message(config.tg, text = stdout, reply_markup=markupValidator)
except Exception as i:
kill(runvprocc.pid)
os.close(slave)
os.close(master)
bot.send_message(config.tg, text = _("Start error. Try to start your node manually"), reply_markup=markupValidator)
except:
bot.send_message(config.tg, text = _("Restart error. Try to restart your node manually"), reply_markup=markupValidator)
alrtprdvnr +=5
else:
alrtprdvnr +=5
if hch == config.balchecks:
hch = 0
try:
minstake | |
<reponame>AllenInstitute/OpenScope_CA_Analysis
"""
corr_analys.py
This script contains functions for USI correlation analysis.
Authors: <NAME>
Date: January, 2021
Note: this code uses python 3.7.
"""
import copy
import logging
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
import scipy.ndimage as scind
from util import logger_util, gen_util, math_util, rand_util
from sess_util import sess_ntuple_util
from analysis import misc_analys, usi_analys
logger = logging.getLogger(__name__)
TAB = " "
#############################################
def get_corr_pairs(sess_df, consec_only=True):
"""
get_corr_pairs(sess_df)
Returns correlation pairs.
Required args:
- sess_df (pd.DataFrame):
dataframe containing session information, including the following
keys: "sess_ns", "lines", "planes"
Optional args:
- consec_only (bool):
if True, only consecutive session numbers are correlated
default: True
Returns:
- corr_ns (list):
session number pairs, e.g. [[s1, s2], [s2, s3], ...]
"""
# identify correlation pairs
corr_ns = []
for _, lp_df in sess_df.groupby(["lines", "planes"]):
sess_ns = np.sort(lp_df["sess_ns"].unique())
if len(sess_ns) == 1:
continue
for i, sess1 in enumerate(sess_ns):
for sess2 in sess_ns[i + 1:]:
if consec_only and (sess2 - sess1 != 1):
continue
corr_pair = [sess1, sess2]
if corr_pair not in corr_ns:
corr_ns.append(corr_pair)
if len(corr_ns) == 0:
raise RuntimeError("No session pairs found.")
return corr_ns
#############################################
def set_multcomp(permpar, sessions, analyspar, consec_only=True, factor=1):
"""
set_multcomp(permpar, sessions, analyspar)
Returns permpar updated with the number of comparisons computed from the
sessions.
Required args:
- permpar (PermPar or dict):
named tuple containing permutation parameters
- sessions (list):
Session objects
- analyspar (AnalysPar):
named tuple containing analysis parameters
Optional args:
- consec_only (bool):
if True, only consecutive session numbers are correlated
default: True
- factor (int):
multiplicative factor
default: 1
Returns:
- permpar (PermPar):
updated permutation parameter named tuple
"""
sess_df = misc_analys.get_check_sess_df(sessions, analyspar=analyspar)
n_comps = 0
for _, lp_df in sess_df.groupby(["lines", "planes"]):
corr_ns = get_corr_pairs(lp_df, consec_only=consec_only)
n_comps += len(corr_ns)
n_comps = n_comps * factor
permpar = sess_ntuple_util.get_modif_ntuple(permpar, "multcomp", n_comps)
return permpar
#############################################
def get_corr_info(permpar, corr_type="corr", permute="sess", norm=True):
"""
get_corr_info(permpar)
Returns updated correlation parameters.
Required args:
- permpar (PermPar):
named tuple containing permutation parameters.
Optional args:
- corr_type (str):
type of correlation to run, i.e. "corr" or "R_sqr"
default: "corr"
- permute (str):
type of permutation to due ("tracking", "sess" or "all")
default: "sess"
- norm (bool):
if True, normalized correlation data is returned, if corr_type if
"diff_corr"
default: True
Returns:
- corr_type (str):
type of correlation to run, i.e. "corr" or "R_sqr"
default: "corr"
- paired (bool):
type of permutation pairing
default: True
- norm (bool):
if True, normalized correlation data is returned, if corr_type if
"diff_corr"
default: True
"""
# determine type of randomization to use
if permute == "sess":
paired = True
elif permute == "tracking":
paired = "within"
elif permute == "all":
paired = False
else:
gen_util.accepted_values_error(
"permute", permute, ["sess", "tracking", "all"]
)
# get permutation information
if permute in ["sess", "all"] and "diff_" not in corr_type:
corr_type = f"diff_{corr_type}"
if corr_type != "diff_corr":
norm = False # does not apply
if "R_sqr" in corr_type and permpar.tails != "hi":
raise NotImplementedError(
"For R-squared analyses, permpar.tails should be set to 'hi'."
)
corr_types = ["corr", "diff_corr", "R_sqr", "diff_R_sqr"]
if corr_type not in corr_types:
gen_util.accepted_values_error("corr_type", corr_type, corr_types)
return corr_type, paired, norm
#############################################
def get_norm_corrs(corr_data, med=0, corr_type="diff_corr"):
"""
get_norm_corrs(corr_data)
Returns normalized correlation values.
Required args:
- corr_data (1D array):
values to normalize
Optional args:
- med (float):
null distribution median for normalization
default: 0
- corr_type (str):
type of correlation run (for checking), i.e. "diff_corr"
default: "corr"
Returns:
- norm_corr_data (1D array): normalized correlations
"""
if corr_type != "diff_corr":
raise ValueError("Normalization should only be used with 'diff_corr'.")
corr_data = np.asarray(corr_data)
# normalize all data
if np.absolute(med) > 1:
raise RuntimeError(
"Absolute correlations should not be greater than 1."
)
lens_to_bound = np.asarray([np.absolute(med + 1), np.absolute(1 - med)])
corr_sides = (corr_data > med).astype(int)
norm_corr_data = (corr_data - med) / lens_to_bound[corr_sides]
return norm_corr_data
#############################################
def corr_bootstrapped_std(data, n_samples=1000, randst=None, corr_type="corr",
return_rand=False, nanpol=None, med=0, norm=True):
"""
corr_bootstrapped_std(data)
Returns bootstrapped standard deviation for Pearson correlations.
Required args:
- data (2D array):
values to correlate for each of 2 groups (2, n)
Optional args:
- n (int):
number of datapoints in dataset. Required if proportion is True.
default: None
- n_samples (int):
number of samplings to take for bootstrapping
default: 1000
- randst (int or np.random.RandomState):
seed or random state to use when generating random values.
default: None
- return_rand (bool): if True, random correlations are returned
default: False
- nanpol (str):
policy for NaNs, "omit" or None
default: None
- med (float):
null distribution median for normalization, if norm is True
default: 0
- norm (bool):
if True, normalized correlation data is returned
default: True
Returns:
- bootstrapped_std (float):
bootstrapped standard deviation of correlations,
normalized if norm is True
if return_rand:
- rand_corrs (1D array):
randomly generated correlations, normalized if norm is True
"""
randst = rand_util.get_np_rand_state(randst, set_none=True)
n_samples = int(n_samples)
data = np.asarray(data)
if len(data.shape) != 2 or data.shape[0] != 2:
raise ValueError(
"data must have 2 dimensions, with the first having length 2."
)
n = data.shape[1]
# random choices
choices = np.arange(n)
# random corrs
rand_corrs = math_util.calc_op(
list(data[:, randst.choice(choices, (n, n_samples), replace=True)]),
op=corr_type, nanpol=nanpol, axis=0,
)
if norm:
rand_corrs = get_norm_corrs(rand_corrs, med=med, corr_type=corr_type)
bootstrapped_std = math_util.error_stat(
rand_corrs, stats="mean", error="std", nanpol=nanpol
)
if return_rand:
return bootstrapped_std, rand_corrs
else:
return bootstrapped_std
#############################################
def get_corr_data(sess_pair, data_df, analyspar, permpar,
corr_type="corr", permute="sess", absolute=False, norm=True,
return_data=False, return_rand=False, n_rand_ex=1,
randst=None, raise_no_pair=True):
"""
get_corr_data(sess_pair, data_df, analyspar, permpar)
Returns correlation data for a session pair.
Required args:
- sess_pair (list):
sessions to correlate, e.g. [1, 2]
- data_df (pd.DataFrame):
dataframe with one row per line/plane/session, and the following
columns, in addition to the basic sess_df columns:
- roi_idxs (list): index for each ROI
- analyspar (AnalysPar):
named tuple containing analysis parameters
- permpar (PermPar):
named tuple containing permutation parameters.
Optional args:
- corr_type (str):
type of correlation to run, i.e. "corr" or "R_sqr"
default: "corr"
- permute (str):
type of permutation to due ("tracking", "sess" or "all")
default: "sess"
- absolute (bool):
if True, absolute USIs are used for correlation calculation instead
of signed USIs
default: False
- norm (bool):
if True, normalized correlation data is returned, if corr_type if
"diff_corr"
default: True
- return_data (bool):
if True, data to correlate is returned
default: False
- return_rand (bool):
if True, random normalized correlation values are returned, along
with random data to correlate for one example permutation
default: False
- n_rand_ex (int):
number of examples to return, if return_rand is True
default: 1
- randst (int or np.random.RandomState):
random state or seed value to use. (-1 treated as None)
default: None
- raise_no_pair (bool):
if True, if sess_pair session numbers are not found, an error is
raised. Otherwise, None is returned.
default: True
Returns:
- roi_corr (float):
(normalized) correlation between sessions
- roi_corr_std (float):
bootstrapped standard deviation for the (normalized) correlation
between sessions
- null_CI (1D array):
adjusted, null CI for the (normalized) correlation between sessions
- p_val (float):
uncorrected p-value for the correlation between sessions
if return_data:
- corr_data (2D array):
data to correlate (grps (2) x datapoints)
if return_rand:
- rand_corrs (1D array):
(normalized) random correlation between sessions
- rand_ex (3D array):
example randomized data pairs to correlate
(grps (2) x datapoints x n_rand_ex)
- rand_ex_corr (1D array):
correlation for example randomized data pairs
"""
nanpol = None if analyspar.rem_bad else "omit"
if analyspar.stats != "mean" or analyspar.error != "std":
raise NotImplementedError(
"analyspar.stats must be set to 'mean', and "
"analyspar.error must be set to 'std'."
)
roi_idxs = []
for sess_n in sess_pair:
row = data_df.loc[data_df["sess_ns"] == sess_n]
if len(row) < 1:
continue
elif len(row) > 1:
raise RuntimeError("Expected at most one row.")
data = np.asarray(row.loc[row.index[0], "roi_idxs"])
roi_idxs.append(data)
if len(roi_idxs) != 2:
if raise_no_pair:
raise RuntimeError("Session pairs not found.")
else:
return None
if roi_idxs[0].shape != roi_idxs[1].shape:
raise RuntimeError(
| |
for function pointers.
Returns
-------
pyx : str
Cython pyx header file as in-memory string.
"""
ts = ts or TypeSystem()
m = {'extra': mod.get('extra', ''),
'docstring': mod.get('docstring', "no docstring, please file a bug report!"),
"pyx_filename": mod.get("pyx_filename", "")}
attrs = []
import_tups = set()
cimport_tups = set()
classnames = _classnames_in_mod(mod, ts)
with ts.local_classes(classnames):
for name, desc in mod.items():
if isvardesc(desc):
i_tup, ci_tup, attr_str = varpyx(desc, ts=ts)
elif isfuncdesc(desc):
i_tup, ci_tup, attr_str = funcpyx(desc, ts=ts)
elif isclassdesc(desc):
i_tup, ci_tup, attr_str = classpyx(desc, classes=classes, ts=ts,
max_callbacks=max_callbacks)
else:
continue
import_tups |= i_tup
cimport_tups |= ci_tup
attrs.append(attr_str)
# Add dispatcher for template functions
template_funcs = _template_funcnames_in_mod(mod)
template_dispatcher = _gen_template_func_dispatcher(template_funcs, ts)
attrs.append(template_dispatcher)
# Add dispatcher for template classes
template_classes = _template_classnames_in_mod(mod)
template_dispatcher = _gen_template_class_dispatcher(template_classes, ts)
attrs.append(template_dispatcher)
import_tups.discard((mod["name"],))
#cimport_tups.discard((mod["name"],)) # remain commented for decls
if mod.get('language', None) == 'c':
import_tups.discard((ts.stlcontainers,))
cimport_tups.discard((ts.stlcontainers,))
m['imports'] = "\n".join(sorted(ts.cython_import_lines(import_tups)))
m['cimports'] = "\n".join(sorted(ts.cython_cimport_lines(cimport_tups)))
if 'numpy' in m['cimports']:
m['imports'] += "\n\nnp.import_array()"
m['attrs_block'] = "\n".join(attrs)
t = '\n\n'.join([AUTOGEN_WARNING, '{cimports}', '{attrs_block}', '{extra}'])
pyx = _pyx_mod_template.format(**m)
return pyx
def _gen_template_pyfill(arg, kind, ts):
"""Generates the fill values for an argument of a type into a template type t.
"""
if kind is Arg.TYPE:
rtn = ts.cython_pytype(arg)
elif kind is Arg.LIT:
rnt = str(arg)
elif kind is Arg.VAR:
rtn = arg
elif isinstance(arg, Number):
rtn = str(arg)
elif isinstance(arg, basestring):
try:
rtn = ts.cython_pytype(arg)
except TypeError:
rtn = arg
return rtn
def _gen_template_func_dispatcher(templates, ts):
"""Generates a dictionary-based dispacher for template functions.
"""
if 0 == len(templates):
return ""
templates = sorted(templates)
disp = ['', "#", "# Function Dispatchers", "#",]
alreadyinitd = set()
for t in templates:
initline = "{0} = {{}}".format(t[0])
if initline not in alreadyinitd:
disp.append("")
disp.append("# {0} dispatcher".format(t[0]))
disp.append(initline)
alreadyinitd.add(initline)
args = t[1:]
pytype = ts.cython_funcname(t)
kinds = ts.argument_kinds.get(t, ((Arg.NONE,))*(len(t)-1))
if 0 == len(args):
raise ValueError("type {0!r} not a template".format(t))
elif 1 == len(args):
disp.append("{0}[{1!r}] = {2}".format(t[0], t[1], pytype))
disp.append("{0}[{1}] = {2}".format(t[0],
_gen_template_pyfill(t[1], kinds[0], ts), pytype))
else:
rs = [repr(_) for _ in t[1:]]
pyts = [_gen_template_pyfill(x, k, ts) for x, k in zip(t[1:], kinds)]
disp.append("{0}[{1}] = {2}".format(t[0], ", ".join(rs), pytype))
disp.append("{0}[{1}] = {2}".format(t[0], ", ".join(pyts), pytype))
return "\n".join(disp)
def _gen_template_class_dispatcher(templates, ts):
"""Generates a dictionary-based dispacher for template classes.
"""
if 0 == len(templates):
return ""
templates = sorted(templates)
disp = ['', "#", "# Class Dispatchers", "#",]
alreadyinitd = set()
for t in templates:
initline = "{0} = {{}}".format(t[0])
if initline not in alreadyinitd:
disp.append("")
disp.append("# {0} Dispatcher".format(t[0]))
disp.append(initline)
alreadyinitd.add(initline)
args = t[1:-1]
pytype = ts.cython_pytype(t)
kinds = ts.argument_kinds.get(t, ((Arg.NONE,))*(len(t)-1))
if 0 == len(args):
raise ValueError("type {0!r} not a template".format(t))
elif 1 == len(args):
disp.append("{0}[{1!r}] = {2}".format(t[0], t[1], pytype))
disp.append("{0}[{1}] = {2}".format(t[0],
_gen_template_pyfill(t[1], kinds[0], ts), pytype))
else:
rs = [repr(_) for _ in t[1:-1]]
pyts = [_gen_template_pyfill(x, k, ts) for x, k in zip(t[1:], kinds)]
disp.append("{0}[{1}] = {2}".format(t[0], ", ".join(rs), pytype))
disp.append("{0}[{1}] = {2}".format(t[0], ", ".join(pyts), pytype))
return "\n".join(disp)
def _gen_property_get(name, t, ts, cached_names=None, inst_name="self._inst",
classes=()):
"""This generates a Cython property getter for a variable of a given
name and type."""
lines = ['def __get__(self):']
decl, body, rtn, iscached = ts.cython_c2py(name, t, inst_name=inst_name)
if decl is not None:
if _isclassptr(t, classes):
decl, _, _, _ = ts.cython_c2py(name, t[0], inst_name=inst_name)
lines += indent(decl, join=False)
if body is not None:
lines += indent(body, join=False)
if iscached and cached_names is not None:
cached_names.append(rtn)
lines += indent("return {0}".format(rtn), join=False)
return lines
def _gen_property_set(name, t, ts, inst_name="self._inst", cached_name=None,
classes=()):
"""This generates a Cython property setter for a variable of a given
name and type."""
lines = ['def __set__(self, value):']
decl, body, rtn = ts.cython_py2c('value', t)
if decl is not None:
lines += indent(decl, join=False)
if body is not None:
lines += indent(body, join=False)
lines += indent("{0}.{1} = {2}".format(inst_name, name, rtn), join=False)
if cached_name is not None:
lines += indent("{0} = None".format(cached_name), join=False)
return lines
def _gen_property(name, t, ts, doc=None, cached_names=None, inst_name="self._inst",
classes=()):
"""This generates a Cython property for a variable of a given name and type."""
lines = ['property {0}:'.format(name)]
lines += [] if doc is None else indent('\"\"\"{0}\"\"\"'.format(doc), join=False)
oldcnlen = 0 if cached_names is None else len(cached_names)
lines += indent(_gen_property_get(name, t, ts, cached_names=cached_names,
inst_name=inst_name, classes=classes), join=False)
lines += ['']
newcnlen = 0 if cached_names is None else len(cached_names)
cached_name = cached_names[-1] if newcnlen == 1 + oldcnlen else None
lines += indent(_gen_property_set(name, t, ts, inst_name=inst_name,
cached_name=cached_name, classes=classes), join=False)
lines += ['', ""]
return lines
def _gen_function_pointer_property(name, t, ts, doc=None, cached_names=None,
inst_name="self._inst", classname='', max_callbacks=8):
"""This generates a Cython property for a function pointer variable."""
lines = ['property {0}:'.format(name)]
# get section
lines += [] if doc is None else indent('\"\"\"{0}\"\"\"'.format(doc), join=False)
oldcnlen = 0 if cached_names is None else len(cached_names)
lines += indent(_gen_property_get(name, t, ts, cached_names=cached_names,
inst_name=inst_name), join=False)
# set section
mczeropad = int(math.log10(max_callbacks)) + 1
lines += [""]
newcnlen = 0 if cached_names is None else len(cached_names)
cached_name = cached_names[-1] if newcnlen == 1 + oldcnlen else None
setlines = indent(_gen_property_set(name, ('void', '*'), ts, inst_name=inst_name,
cached_name=cached_name), join=False)
lines += setlines[:1]
lines += indent(indent(['if not callable(value):',
(' raise ValueError("{0!r} is not callable but ' + classname +
'.' + name + ' is a function pointer!".format(value))')],
join=False), join=False)
#lines += setlines[1:]
pyname, cname = _mangle_function_pointer_name(name, classname)
pynames = [pyname + "{0:0{1}}".format(i, mczeropad) for i in \
range(max_callbacks)]
cnames = [cname + "{0:0{1}}".format(i, mczeropad) for i in \
range(max_callbacks)]
if max_callbacks == 1:
suffix = '0'
extraset = ('global {pyname}\n'
'{cached_name} = value\n'
'{pyname} = value\n'
'{inst_name}.{name} = {cname}\n'
).format(name=name, pyname=pyname + suffix, cname=cname + suffix,
cached_name=cached_name, inst_name=inst_name)
elif max_callbacks > 1:
extraset = ['cdef unsigned int vtab_i',
'{cached_name} = value'.format(cached_name=cached_name),
"global " + ', '.join(pynames) + \
', _current_{0}_vtab_i'.format(pyname),]
selectlines = []
for i, pyname_i in enumerate(pynames):
selectlines.append("elif {0} is None:".format(pyname_i))
selectlines.append(" vtab_i = {0}".format(i))
selectlines[0] = selectlines[0][2:]
extraset += selectlines
extraset += ['else:',
(' warnings.warn("Ran out of available callbacks for '
'{0}.{1}, overriding existing callback.", RuntimeWarning)'
).format(classname, name),
' vtab_i = _current_{0}_vtab_i'.format(pyname),
' _current_{0}_vtab_i = (_current_{0}_vtab_i+1)%{1}'.format(
pyname, max_callbacks),
'self._{0}_vtab_i = vtab_i'.format(name),]
setvallines = []
for i, (pyname_i, cname_i) in enumerate(zip(pynames, cnames)):
setvallines.append("elif vtab_i == {0}:".format(i))
setvallines.append(" {pyname} = value".format(pyname=pyname_i))
setvallines.append(" {inst_name}.{name} = {cname}".format(
inst_name=inst_name, name=name, cname=cname_i))
setvallines[0] = setvallines[0][2:]
extraset += setvallines
else:
msg = "The max number of callbacks for {0} must be >=1, got {1}."
raise RuntimeError(msg.format(classname, max_callbacks))
lines += indent(indent(extraset, join=False), join=False)
lines.append('')
lines += ["def _deref_{0}_callback(self):".format(name),
' "Warning: this can have dangerous side effects!"',
' cdef unsigned int vtab_i',
' {cached_name} = None'.format(cached_name=cached_name),
" if self._{0}_vtab_i < {1}:".format(name, max_callbacks+1),
' vtab_i = self._{0}_vtab_i'.format(name),
" self._{0}_vtab_i = {1}".format(name, max_callbacks+1), ]
dereflines = []
for i, pyname_i in enumerate(pynames):
dereflines.append("elif vtab_i == {0}:".format(i))
dereflines.append(" global {0}".format(pyname_i))
dereflines.append(" {0} = None".format(pyname_i))
dereflines[0] = dereflines[0][2:]
lines += indent(indent(dereflines, join=False), join=False)
lines += ['', ""]
return lines
def _gen_function_pointer_wrapper(name, t, ts, classname='', max_callbacks=8):
"""This generates a Cython wrapper for a function pointer variable."""
pyname, cname = _mangle_function_pointer_name(name, classname)
mczeropad = int(math.log10(max_callbacks)) + 1
lines = ["#\n# Function pointer helpers for {1}.{0}\n#".format(name, classname),
"_current_{0}_vtab_i = 0".format(pyname), ""]
for i in range(max_callbacks):
suffix = "{0:0{1}}".format(i, mczeropad)
pyname_i, cname_i = pyname + suffix, cname + suffix
decl, body, rtn = ts.cython_py2c(pyname_i, t, proxy_name=cname_i)
lines += [pyname_i + " = None", '']
lines += rtn.splitlines()
lines.append('')
lines += ['', ""]
return lines
def _gen_argfill(args, defaults):
"""Generate argument list for a function, and return (argfill, names).
If any argument names or empty, the corresponding entry in names will
be '_n' for some integer n.
"""
counter = 0
taken = frozenset(a[0] for a in args)
names = []
afill = []
for (name, t), (kind, default) in zip(args, defaults):
if not name: # Empty name, generate a fresh dummy symbol
while 1:
name = '_%d'%counter
counter += 1
if name not in taken:
break
names.append(name)
if kind is Arg.NONE:
afillval = name
elif kind is Arg.LIT:
afillval = "{0}={1!r}".format(name, default)
elif kind is Arg.VAR:
afillval = "{0}={1}".format(name, default)
elif kind is Arg.TYPE:
raise ValueError("default argument value cannot be a type: "
"{0}".format(name))
else:
raise | |
"Define basic subroutines useful for all AI players"
from ..board import black, white, empty, Board, InvalidMoveError
import numpy as np
import unittest
class Playerlibrary(object):
"""
A library class that holds basic subroutines that are useful for all
kinds of artificial-intelligence-type (AI-type) players, e.g. the
function ``win_if_possible`` that checks if the game can be won in
the next move.
All the functions are written to take the same arguments as
``Player.make_move`` such that the call from within ``make_move``
looks like e.g. ``self.win_if_possible(gui)``.
"""
def line_getter_functions(self, gui, length=5):
return [lambda x,y: gui.board.get_column(x,y,length=length), lambda x,y: gui.board.get_row(x,y, length=length),
lambda x,y: gui.board.get_diagonal_upleft_to_lowright(x,y, length=length),
lambda x,y: gui.board.get_diagonal_lowleft_to_upright(x,y, length=length)]
def random_move(self, gui):
moves_left = gui.board.moves_left
while moves_left == gui.board.moves_left:
x = np.random.randint(gui.board.width)
y = np.random.randint(gui.board.height)
try:
gui.board[y,x] = self.color
except InvalidMoveError:
continue
def extend_one(self, gui):
"Place a stone next to another one but only if extendable to five."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# search pattern: one of own color and four empty
if len(np.where(line == empty)[0]) == 4 and len(np.where(line == self.color)[0]) == 1:
index_own_color = np.where(line == self.color)[0][0]
if index_own_color == 0:
gui.board[positions[1]] = self.color
return True
else:
gui.board[positions[index_own_color - 1]] = self.color
return True
return False
def block_open_four(self, gui):
"Block a line of four stones if at least one end open."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search four of opponent's color and one empty
if len(np.where(line == empty)[0]) == 1 and len(np.where(line == -self.color)[0]) == 4:
index_of_empty = np.where(line == empty)[0][0]
gui.board[positions[index_of_empty]] = self.color
return True
return False
def block_doubly_open_two(self, gui):
"Block a line of two if both sides are open."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# select pattern [<all empty>, <opponent's color>, <opponent's color>, <all empty>]
if ( line == (empty, -self.color, -self.color, empty, empty) ).all():
gui.board[positions[3]] = self.color
return True
elif ( line == (empty, empty, -self.color, -self.color, empty) ).all():
gui.board[positions[1]] = self.color
return True
return False
def block_twice_to_three_or_more(self, gui):
'Prevent opponent from closing two lines of three or more simultaneously.'
line_getter_functions = self.line_getter_functions(gui)
line_positions = []
getter_functions = []
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in line_getter_functions:
try:
line, positions = f(i,j)
except IndexError:
continue
# search two of opponent's color and three empty in two crossing lines at an empty position
opponent_stones_in_line = len(np.where(line == -self.color)[0])
if opponent_stones_in_line >= 2 and len(np.where(line == empty)[0]) == 5 - opponent_stones_in_line:
for oldpos, old_getter in zip(line_positions, getter_functions):
for pos in positions:
if f != old_getter and pos in oldpos and gui.board[pos] == empty:
gui.board[pos] = self.color
return True
line_positions.append(positions)
getter_functions.append(f)
return False
def block_open_three(self, gui):
"Block a line of three."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search three of opponent's color and two empty
if len(np.where(line == empty)[0]) == 2 and len(np.where(line == -self.color)[0]) == 3:
indices_opponent = np.where(line == -self.color)[0]
if not (indices_opponent[1] == indices_opponent[0] + 1 and \
indices_opponent[2] == indices_opponent[1] + 1):
continue
if 0 not in indices_opponent:
gui.board[positions[indices_opponent[0] - 1]] = self.color
return True
else:
gui.board[positions[3]] = self.color
return True
return False
def block_open_two(self, gui):
"Block a line of two."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search pattern [<all empty or bpundary>, opponent, opponent, <all empty or boundary>]
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == -self.color)[0]) == 2:
indices_opponent = np.where(line == -self.color)[0]
if indices_opponent[1] == indices_opponent[0] + 1:
if indices_opponent[0] == 0:
gui.board[positions[3]] = self.color
return True
else:
gui.board[positions[indices_opponent[0]-1]] = self.color
return True
return False
def block_doubly_open_three(self, gui):
"Block a line of three but only if both sides are open."
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
if ( line == (empty, -self.color, -self.color, -self.color, empty) ).all():
gui.board[positions[0]] = self.color
return True
return False
def extend_three_to_four(self, gui):
"""
Extend a line of three stones to a line of four stones but only
if there is enough space to be completed to five.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search three of own color and two empty
if len(np.where(line == empty)[0]) == 2 and len(np.where(line == self.color)[0]) == 3:
indices_empty = np.where(line == empty)[0]
if 0 not in indices_empty:
gui.board[positions[indices_empty[0]]] = self.color
return True
else:
gui.board[positions[indices_empty[1]]] = self.color
return True
return False
def block_to_doubly_open_four(self, gui):
"""
Prevent the opponent from getting a line of four with both ends
open.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui, length=6):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search pattern [empty, <extendable to 4 times opponent>, empty]
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == -self.color)[0]) == 3:
indices_empty = np.where(line == empty)[0]
if not (line[0] == empty and line[-1] == empty):
continue
else:
gui.board[positions[indices_empty[1]]] = self.color
return True
return False
def extend_three_to_doubly_open_four(self, gui):
"""
Extend a line of three stones to a line of four stones but only
if there is enough space to be completed to five ON BOTH SIDES.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui, length=6):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search pattern [empty, <extendable to 4 times own>, empty]
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == self.color)[0]) == 3:
indices_empty = np.where(line == empty)[0]
if not (line[0] == empty and line[-1] == empty):
continue
else:
gui.board[positions[indices_empty[1]]] = self.color
return True
return False
def extend_two_to_three(self, gui):
"""
Extend a line of two stones to a line of three stones but only
if there is enough space to be completed to five.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection: search two of own color and three empty
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == self.color)[0]) == 2:
indices_empty = np.where(line == empty)[0]
gui.board[positions[indices_empty[np.random.randint(3)]]] = self.color
return True
return False
def extend_twice_two_to_three(self, gui):
"""
Extend two crossing lines of two stones to two lines of three
stones but only if there is enough space to be completed to five.
"""
line_positions = []
getter_functions = []
for f in self.line_getter_functions(gui):
for i in range(gui.board.height):
for j in range(gui.board.width):
try:
line, positions = f(i,j)
except IndexError:
continue
# search two of own color and three empty in two crossing lines at an empty position
if len(np.where(line == empty)[0]) == 3 and len(np.where(line == self.color)[0]) == 2:
for oldpos, old_getter in zip(line_positions, getter_functions):
for pos in positions:
if f != old_getter and pos in oldpos and gui.board[pos] == empty:
gui.board[pos] = self.color
return True
line_positions.append(positions)
getter_functions.append(f)
return False
def check_if_immediate_win_possible(self, gui):
"""
Check if it is possible to place a stone such thath the player wins
immediately.
Return the position to place the stone if possible, otherwise return None.
"""
for i in range(gui.board.height):
for j in range(gui.board.width):
for f in self.line_getter_functions(gui):
try:
line, positions = f(i,j)
except IndexError:
continue
# selection:
# - can only place stones where field is ``empty``
# - line must sum to "+" or "-" 4 (4 times black=+1 or white=-1 and once empty=0)
# place stone if that leads to winning the game
if empty in line and line.sum() == self.color * 4:
for pos in positions:
if gui.board[pos] == empty:
return pos
raise RuntimeError("Check the implementation of ``check_if_immediate_win_possible``.")
# control reaches this point | |
0, 0))))
self.assertTrue((2, 0, 1) in context._agent_manager._ghosted_agents)
ghosted_to_ranks = context._agent_manager._ghosted_agents[(2, 0, 1)].ghost_ranks
self.assertTrue(3 in ghosted_to_ranks)
self.assertTrue(g.graph.has_edge(context.ghost_agent((0, 0, 3)), context.agent((2, 0, 1))))
self.assertTrue(1 in ghosted_to_ranks)
self.assertTrue(g.graph.has_edge(context.agent((2, 0, 1)), context.ghost_agent((4, 0, 1))))
self.assertEqual(42, g.graph.edges[context.agent((2, 0, 1)),
context.ghost_agent((4, 0, 1))]['weight'])
# original 10 + moved 2,0,1 + requested 1,0,2 + 0,0,3 and 0,0,0 and 1,0,3 through
# synchronized edges
self.assertEqual(15, g.node_count)
elif rank == 1:
# print(g.graph.edges())
# 2,0,1 moved
self.assertIsNone(context.agent((2, 0, 1)))
self.assertIsNotNone(context.ghost_agent((2, 0, 1)))
# these were ghost agents through edges with 2,0,1
# but should now be removed
self.assertIsNone(context.ghost_agent((0, 0, 3)))
self.assertIsNone(context.ghost_agent((0, 0, 0)))
self.assertFalse(g.graph.has_edge(context.ghost_agent((4, 0, 1)), context.agent((0, 0, 3))))
self.assertFalse(g.graph.has_edge(context.ghost_agent((4, 0, 1)), context.agent((0, 0, 0))))
# ghosted from 0 to 1 because of 2,0,1 -> 4,0,1 edge
self.assertIsNotNone(context.ghost_agent((2, 0, 1)))
self.assertTrue(g.graph.has_edge(context.ghost_agent((2, 0, 1)), context.agent((4, 0, 1))))
elif rank == 3:
self.assertIsNone(context.agent((2, 0, 1)))
self.assertIsNotNone(context.ghost_agent((2, 0, 1)))
# Test: update edge(0,0,3 - 2,0,1) with new data
# ghost edge on 0 reflects change
if rank == 3:
g.update_edge(context.agent((0, 0, 3)), context.ghost_agent((2, 0, 1)), weight=12)
self.assertEqual(12, g.graph.edges[context.agent((0, 0, 3)),
context.ghost_agent((2, 0, 1))]['weight'])
context.synchronize(restore_agent)
if rank == 0:
self.assertEqual(12, g.graph.edges[context.ghost_agent((0, 0, 3)),
context.agent((2, 0, 1))]['weight'])
def test_with_oob(self):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
context = ctx.SharedContext(comm)
agents = []
for i in range(20):
a = EAgent(i, 0, rank, 1)
agents.append(a)
context.add(a)
box = space.BoundingBox(xmin=0, xextent=90, ymin=0, yextent=120, zmin=0, zextent=0)
cspace = space.SharedCSpace("shared_space", bounds=box, borders=BorderType.Sticky,
occupancy=OccupancyType.Multiple, buffer_size=2, comm=comm, tree_threshold=100)
grid = space.SharedGrid("shared_grid", bounds=box, borders=BorderType.Sticky,
occupancy=OccupancyType.Multiple, buffer_size=2, comm=comm)
net = DirectedSharedNetwork('network', comm)
context.add_projection(cspace)
context.add_projection(grid)
context.add_projection(net)
random.init(42)
bounds = grid.get_local_bounds()
xs = random.default_rng.integers(low=bounds.xmin, high=bounds.xmin + bounds.xextent, size=20)
ys = random.default_rng.integers(low=bounds.ymin, high=bounds.ymin + bounds.yextent, size=20)
for i, agent in enumerate(agents):
grid.move(agent, dpt(xs[i], ys[i]))
cspace.move(agent, cpt(xs[i], ys[i]))
# TEST:
# 1. request agents from neighboring ranks
# 2. make edges between local agent and ghosts
# 3. move agents oob, such that
# a. former ghost is now local
# b. ghost is now on different rank
# 4. do tests
requests = []
if rank == 0:
requests.append(((1, 0, 1), 1))
requests.append(((1, 0, 2), 2))
requests.append(((2, 0, 1), 1))
elif rank == 3:
requests.append(((1, 0, 0), 0))
requests.append(((4, 0, 2), 2))
requests.append(((2, 0, 1), 1))
context.request_agents(requests, restore_agent)
if rank == 0:
net.add_edge(agents[1], context.ghost_agent((2, 0, 1)), color='red')
net.add_edge(agents[10], context.ghost_agent((1, 0, 1)))
elif rank == 1:
net.add_edge(agents[2], agents[1])
elif rank == 3:
net.add_edge(agents[1], context.ghost_agent((1, 0, 0)))
net.add_edge(agents[5], context.ghost_agent((2, 0, 1)))
context.synchronize(restore_agent)
# TESTS edges
if rank == 0:
self.assertEqual(3, net.edge_count)
self.assertTrue(net.graph.has_edge(agents[10], context.ghost_agent((1, 0, 1))))
self.assertTrue(net.graph.has_edge(agents[1], context.ghost_agent((2, 0, 1))))
self.assertTrue(net.graph.has_edge(context.ghost_agent((1, 0, 3)), agents[1]))
elif rank == 1:
self.assertEqual(4, net.edge_count)
self.assertTrue(net.graph.has_edge(context.ghost_agent((10, 0, 0)), agents[1], ))
self.assertTrue(net.graph.has_edge(agents[2], agents[1]))
self.assertTrue(net.graph.has_edge(context.ghost_agent((1, 0, 0)), agents[2]))
self.assertTrue(net.graph.has_edge(context.ghost_agent((5, 0, 3)), agents[2]))
self.assertEqual('red', net.graph.edges[context.ghost_agent((1, 0, 0)), agents[2]]['color'])
elif rank == 2:
self.assertEqual(0, net.edge_count)
elif rank == 3:
self.assertEqual(2, net.edge_count)
self.assertTrue(net.graph.has_edge(agents[1], context.ghost_agent((1, 0, 0))))
self.assertTrue(net.graph.has_edge(agents[5], context.ghost_agent((2, 0, 1))))
# Bounds:
# print('{}: bounds: {}'.format(rank, grid.get_local_bounds()), flush=True)
# 0: bounds: BoundingBox(xmin=0, xextent=45, ymin=0, yextent=60, zmin=0, zextent=0)
# 1: bounds: BoundingBox(xmin=0, xextent=45, ymin=60, yextent=60, zmin=0, zextent=0)
# 2: bounds: BoundingBox(xmin=45, xextent=45, ymin=0, yextent=60, zmin=0, zextent=0)
# 3: bounds: BoundingBox(xmin=45, xextent=45, ymin=60, yextent=60, zmin=0, zextent=0)
# Move (2, 0, 1) to 2's bounds
if rank == 1:
grid.move(agents[2], dpt(46, 35))
cspace.move(agents[2], cpt(46.2, 35.1))
context.synchronize(restore_agent)
if rank == 0:
self.assertEqual(3, net.edge_count)
self.assertTrue(net.graph.has_edge(agents[10], context.ghost_agent((1, 0, 1))))
self.assertTrue(net.graph.has_edge(agents[1], context.ghost_agent((2, 0, 1))))
self.assertTrue(net.graph.has_edge(context.ghost_agent((1, 0, 3)), agents[1]))
elif rank == 1:
self.assertEqual(2, net.edge_count)
self.assertTrue(net.graph.has_edge(context.ghost_agent((10, 0, 0)), agents[1]))
self.assertTrue(net.graph.has_edge(context.ghost_agent((2, 0, 1)), agents[1]))
elif rank == 2:
agent_201 = context.agent((2, 0, 1))
self.assertIsNotNone(agent_201)
self.assertEqual(3, net.edge_count)
self.assertTrue(net.graph.has_edge(context.ghost_agent((1, 0, 0)), agent_201))
self.assertTrue(net.graph.has_edge(context.ghost_agent((5, 0, 3)), agent_201))
self.assertTrue(net.graph.has_edge(agent_201, context.ghost_agent((1, 0, 1))))
self.assertEqual('red', net.graph.edges[context.ghost_agent((1, 0, 0)), agent_201]['color'])
elif rank == 3:
self.assertEqual(2, net.edge_count)
self.assertTrue(net.graph.has_edge(agents[1], context.ghost_agent((1, 0, 0))))
self.assertTrue(net.graph.has_edge(agents[5], context.ghost_agent((2, 0, 1))))
if rank == 0:
agents[1].energy = 101
elif rank == 2:
# 201 to 3
agent_201 = context.agent((2, 0, 1))
grid.move(agent_201, dpt(46, 80))
cspace.move(agent_201, cpt(46.2, 80.1))
context.synchronize(restore_agent)
# print(f'{rank}: {net.graph.edges()}', flush=True)
if rank == 0:
self.assertEqual(3, net.edge_count)
self.assertTrue(net.graph.has_edge(agents[10], context.ghost_agent((1, 0, 1))))
self.assertTrue(net.graph.has_edge(agents[1], context.ghost_agent((2, 0, 1))))
self.assertTrue(net.graph.has_edge(context.ghost_agent((1, 0, 3)), agents[1]))
elif rank == 1:
self.assertEqual(2, net.edge_count)
self.assertTrue(net.graph.has_edge(context.ghost_agent((10, 0, 0)), agents[1]))
self.assertTrue(net.graph.has_edge(context.ghost_agent((2, 0, 1)), agents[1]))
elif rank == 2:
self.assertEqual(0, net.edge_count)
elif rank == 3:
self.assertEqual(4, net.edge_count)
agent_201 = context.agent((2, 0, 1))
agent_100 = context.ghost_agent((1, 0, 0))
self.assertIsNotNone(agent_201)
self.assertTrue(net.graph.has_edge(agents[1], agent_100))
self.assertTrue(net.graph.has_edge(agents[5], agent_201))
self.assertTrue(net.graph.has_edge(agent_201, context.ghost_agent((1, 0, 1))))
self.assertTrue(net.graph.has_edge(agent_100, agent_201))
self.assertEqual(101, context.ghost_agent((1, 0, 0)).energy)
def test_in_buffer(self):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
context = ctx.SharedContext(comm)
agents = []
for i in range(20):
a = EAgent(i, 0, rank, 1)
agents.append(a)
context.add(a)
box = space.BoundingBox(xmin=0, xextent=90, ymin=0, yextent=120, zmin=0, zextent=0)
cspace = space.SharedCSpace("shared_space", bounds=box, borders=BorderType.Sticky,
occupancy=OccupancyType.Multiple, buffer_size=2, comm=comm, tree_threshold=100)
grid = space.SharedGrid("shared_grid", bounds=box, borders=BorderType.Sticky,
occupancy=OccupancyType.Multiple, buffer_size=2, comm=comm)
net = DirectedSharedNetwork('network', comm)
context.add_projection(net)
context.add_projection(cspace)
context.add_projection(grid)
random.init(42)
bounds = grid.get_local_bounds()
xs = random.default_rng.integers(low=bounds.xmin, high=bounds.xmin + bounds.xextent, size=20)
ys = random.default_rng.integers(low=bounds.ymin, high=bounds.ymin + bounds.yextent, size=20)
for i, agent in enumerate(agents):
grid.move(agent, dpt(xs[i], ys[i]))
cspace.move(agent, cpt(xs[i], ys[i]))
# Bounds:
# print('{}: bounds: {}'.format(rank, grid.get_local_bounds()))
# 0: bounds: BoundingBox(xmin=0, xextent=45, ymin=0, yextent=60, zmin=0, zextent=0)
# 1: bounds: BoundingBox(xmin=0, xextent=45, ymin=60, yextent=60, zmin=0, zextent=0)
# 2: bounds: BoundingBox(xmin=45, xextent=45, ymin=0, yextent=60, zmin=0, zextent=0)
# 3: bounds: BoundingBox(xmin=45, xextent=45, ymin=60, yextent=60, zmin=0, zextent=0)
# TEST:
# Request agent that's in buffer, then moves off of buffer.
# Is still properly ghosted?
if rank == 1:
agent_201 = context.agent((2, 0, 1))
grid.move(agent_201, dpt(10, 60))
cspace.move(agent_201, cpt(10.2, 60.1))
context.synchronize(restore_agent)
if rank == 0:
agent_201 = context.ghost_agent((2, 0, 1))
self.assertIsNotNone(agent_201)
requests = []
if rank == 0:
requests.append(((2, 0, 1), 1))
context.request_agents(requests, restore_agent)
if rank == 0:
agent_201 = context.ghost_agent((2, 0, 1))
self.assertIsNotNone(agent_201)
if rank == 1:
# move off of buffer
agent_201 = context.agent((2, 0, 1))
grid.move(agent_201, dpt(10, 66))
cspace.move(agent_201, cpt(10.2, 66.1))
context.synchronize(restore_agent)
if rank == 0:
self.assertIsNotNone(context.ghost_agent((2, 0, 1)))
class SharedUndirectedNetworkTests(unittest.TestCase):
long_message = True
def test_add_remove(self):
# make 1 rank comm for basic add remove tests
new_group = MPI.COMM_WORLD.Get_group().Incl([0])
comm = MPI.COMM_WORLD.Create_group(new_group)
if comm != MPI.COMM_NULL:
g = UndirectedSharedNetwork('network', comm)
self.assertEqual('network', g.name)
self.assertFalse(g.is_directed)
self.assertEqual(0, g.node_count)
self.assertEqual(0, g.edge_count)
agents = [EAgent(x, 0, comm.Get_rank(), x) for x in range(10)]
g.add(agents[0])
g.add_nodes(agents[1:4])
self.assertEqual(4, g.node_count)
nodes = [x for x in g.graph.nodes]
self.assertEqual(nodes, [x for x in agents[0:4]])
g.add_edge(agents[0], agents[1])
g.add_edge(agents[0], agents[3])
g.add_edge(agents[5], agents[6], weight=12)
# 2 nodes added via edge
self.assertEqual(6, g.node_count)
self.assertEqual(3, g.edge_count)
edges = [x for x in g.graph.edges(agents[5])]
self.assertEqual(edges, [(agents[5], agents[6])])
edges = [x for x in g.graph.edges(agents[0])]
self.assertEqual(edges, [(agents[0], agents[1]), (agents[0], agents[3])])
edges = [x for x in g.graph.edges(agents[6])]
self.assertEqual(edges, [(agents[6], agents[5])])
edge = g.graph.edges[agents[5], agents[6]]
self.assertEqual(12, edge['weight'])
self.assertTrue(g.contains_edge(agents[0], agents[1]))
self.assertTrue(g.contains_edge(agents[1], agents[0]))
self.assertTrue(not g.contains_edge(agents[7], agents[6]))
g.remove(agents[0])
self.assertEqual(5, g.node_count)
self.assertEqual(1, g.edge_count)
g.add_edge(agents[4], agents[5])
self.assertEqual(2, g.num_edges(agents[5]))
self.assertEqual(1, g.num_edges(agents[4]))
# Note (5, 4) because getting edges by node from
# undirected network, returns the asked for node first
exp = {(agents[5], agents[6]), (agents[5], agents[4])}
for edge in g._edges(agents[5]):
exp.remove(edge)
self.assertEqual(0, len(exp))
def test_sync_1(self):
# Tests add, update and remove edge
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
context = ctx.SharedContext(comm)
g = UndirectedSharedNetwork('network', comm)
context.add_projection(g)
self.assertEqual(0, g.node_count)
agents = [EAgent(x, 0, rank, x) for x in range(10)]
for a in agents:
context.add(a)
self.assertEqual(rank, a.local_rank)
self.assertEqual(10, g.node_count)
requests = []
if rank == 0:
requests.append(((1, 0, 1), 1))
requests.append(((1, 0, 2), 2))
requests.append(((2, 0, 1), 1))
elif rank == 3:
requests.append(((1, 0, 0), 0))
requests.append(((4, 0, 2), 2))
requests.append(((2, 0, 1), 1))
context.request_agents(requests, restore_agent)
if rank == 0 or rank == 3:
self.assertEqual(13, g.node_count)
# Edges: 0: (0, 0, 0) -> (1, 0, 1)
# 0: (2, 0, 1) -> (0, 0, 0)
# 1: (0, 0, 1) -> (1, 0, 1)
# 3: (0, 0, 3) -> (2, 0, 1)
# 3: (1, 0, 0) -> (1, 0, 3)
if rank == 0:
other = context.ghost_agent((1, 0, 1))
g.add_edge(agents[0], other, weight=2)
self.assertEqual(1, g.edge_count)
edges = [x for x in g.graph.edges(agents[0], data=True)]
self.assertEqual(edges, [(agents[0], other, {'weight': 2})])
other = context.ghost_agent((2, 0, 1))
g.add_edge(other, agents[0], rate=2)
self.assertEqual(2, g.edge_count)
edges = [x for x in g.graph.edges(agents[0], data=True)]
self.assertEqual(edges[1], (agents[0], other, {'rate': 2}))
elif rank == 1:
g.add_edge(agents[0], agents[1], weight=3)
self.assertEqual(1, g.edge_count)
elif rank | |
<filename>gazoo_device/manager.py<gh_stars>0
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manager module.
- detects devices
- creates devices
- get props and sets optional props
"""
import atexit
import copy
import datetime
import difflib
import fnmatch
import inspect
import json
import logging
import multiprocessing
import os
import queue
import re
import shutil
import signal
import subprocess
import textwrap
import time
from typing import Dict, Optional, Union
from gazoo_device import config
from gazoo_device import custom_types
from gazoo_device import device_detector
from gazoo_device import errors
from gazoo_device import extensions
from gazoo_device import gdm_logger
from gazoo_device.capabilities import event_parser_default
from gazoo_device.log_parser import LogParser
from gazoo_device.switchboard import communication_types
from gazoo_device.switchboard import switchboard
from gazoo_device.usb_port_map import UsbPortMap
from gazoo_device.utility import common_utils
from gazoo_device.utility import host_utils
from gazoo_device.utility import parallel_utils
from gazoo_device.utility import usb_utils
logger = gdm_logger.get_logger()
class Manager():
"""Manages the setup and communication of smart devices."""
def __init__(self,
device_file_name=None,
device_options_file_name=None,
testbeds_file_name=None,
gdm_config_file_name=config.DEFAULT_GDM_CONFIG_FILE,
log_directory=None,
gdm_log_file=None,
gdm_log_formatter=None,
adb_path=None,
debug_level=logging.DEBUG,
stream_debug=False,
stdout_logging=True,
max_log_size=100000000):
self._open_devices = {}
self.max_log_size = max_log_size
# b/141476623: exception queue must not share multiprocessing.Manager()
common_utils.run_before_fork()
self._exception_queue_manager = multiprocessing.Manager()
common_utils.run_after_fork_in_parent()
self._exception_queue = self._exception_queue_manager.Queue()
# Backwards compatibility for older debug_level=string style __init__
if not isinstance(debug_level, int):
if debug_level in ["debug", "developer"]:
debug_level = logging.DEBUG
else:
debug_level = logging.INFO
logger.level = debug_level
if stream_debug:
gdm_logger.stream_debug()
if not stdout_logging:
gdm_logger.silence_progress_messages()
if gdm_log_file:
if not gdm_log_formatter:
gdm_log_formatter = logging.Formatter(
gdm_logger.FMT, datefmt=gdm_logger.DATEFMT)
self.gdm_log_handler = logging.FileHandler(gdm_log_file)
self.gdm_log_handler.setLevel(debug_level)
self.gdm_log_handler.setFormatter(gdm_log_formatter)
gdm_logger.add_handler(self.gdm_log_handler)
self.device_file_name = None
self.device_options_file_name = None
self.testbeds_file_name = None
self.log_directory = None
self._load_configuration(device_file_name, device_options_file_name,
testbeds_file_name, gdm_config_file_name,
log_directory, adb_path)
# Register USR1 signal to get exception messages from exception_queue
signal.signal(signal.SIGUSR1,
common_utils.MethodWeakRef(self._process_exceptions))
atexit.register(common_utils.MethodWeakRef(self.close))
def backup_configs(self):
"""Backs up existing configuration files to a timestamped directory.
Raises:
DeviceError: unable to overwrite config files.
Notes:
Backs up configuration files to 'backup'
"""
timestamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
if not os.path.exists(config.BACKUP_PARENT_DIRECTORY) or not os.access(
config.BACKUP_PARENT_DIRECTORY, os.X_OK):
raise errors.DeviceError(
"Device overwrite error. "
"Directory {} does not exist or is not executable. "
"Unable to overwrite configs".format(config.BACKUP_PARENT_DIRECTORY))
self.backup_directory = os.path.join(config.BACKUP_PARENT_DIRECTORY,
"backup-%s" % str(timestamp))
logger.info("Moving config files to the backup directory " +
self.backup_directory)
if not os.path.exists(self.backup_directory):
os.makedirs(self.backup_directory)
shutil.copyfile(self.device_file_name,
os.path.join(self.backup_directory, "devices.json"))
shutil.copyfile(self.device_options_file_name,
os.path.join(self.backup_directory, "device_options.json"))
shutil.copyfile(self.testbeds_file_name,
os.path.join(self.backup_directory, "testbeds.json"))
shutil.copyfile(self.gdm_config_file_name,
os.path.join(self.backup_directory, "gdm.json"))
def close(self):
"""Stops logger and closes all devices."""
self.close_open_devices()
gdm_logger.flush_queue_messages()
gdm_logger.silence_progress_messages()
if hasattr(self, "gdm_log_handler") and self.gdm_log_handler:
gdm_logger.remove_handler(self.gdm_log_handler)
self.gdm_log_handler.close()
del self.gdm_log_handler
if hasattr(self, "_exception_queue"):
del self._exception_queue
if hasattr(self, "_exception_queue_manager"):
self._exception_queue_manager.shutdown()
del self._exception_queue_manager
def close_open_devices(self):
"""Closes all open devices."""
for device in list(self._open_devices.values()):
device.close()
def close_device(self, identifier):
"""Closes open device via identifier.
Args:
identifier (str): device identifier. Name, serial_number, etc
"""
device_name = self._get_device_name(identifier, raise_error=True)
if device_name not in self._open_devices:
return
else:
self._open_devices[device_name].close()
def create_device(self,
identifier,
new_alias=None,
log_file_name=None,
log_directory=None,
log_to_stdout=None,
skip_recover_device=False,
make_device_ready="on",
filters=None,
log_name_prefix="") -> custom_types.Device:
"""Returns created device object by identifier specified.
Args:
identifier (str): The identifier string to identify a single device.
For simulators, the identifier can be the simulator device type
new_alias (str): A string to replace device's alias kept in file.
log_file_name (str): A string log file name to use for log results.
log_directory (str): A directory path to use for storing log file.
log_to_stdout (bool): Enable streaming of log results to stdout
(DEPRECATED).
skip_recover_device (bool): Don't recover device if it fails ready
check.
make_device_ready (str): "on", "check_only", "off". Toggles
make_device_ready.
filters (list): paths to custom Parser filter files or directories to
use.
log_name_prefix (str): string to prepend to log filename.
Returns:
The device found and created by the identifier specified.
Raises:
ValueError: If identifier specified does not match a known device or
device is not currently connected.
DeviceError: Device not connected
"""
logger.debug("In create_device")
if identifier.endswith("sim"):
return self.create_device_sim(
device_type=identifier,
log_file_name=log_file_name,
log_directory=log_directory,
skip_recover_device=skip_recover_device,
make_device_ready="off",
filters=filters,
log_name_prefix=log_name_prefix)
if log_to_stdout is not None:
logger.warn(
"DEPRECATION WARNING: Support for the log_to_stdout argument is "
"ending soon. To continue seeing the same output, please set "
"debug_level to logging.INFO and remove log_to_stdout")
if log_file_name is not None:
logger.warn(
"DEPRECATION WARNING: Support for log_file_name argument is "
"ending soon. Please start using log_name_prefix argument instead.")
self._type_check("identifier", identifier)
device_name = self._get_device_name(identifier, raise_error=True)
if device_name in self._open_devices:
raise errors.DeviceError(
"Device {name} already created. Call manager.get_open_device('{name}')."
.format(name=device_name))
if new_alias is not None:
self.set_prop(device_name, "alias", new_alias)
device_config = self.get_device_configuration(device_name)
self._update_device_config(device_config, skip_recover_device,
make_device_ready, log_name_prefix, filters)
device_type = device_config["persistent"]["device_type"]
if not log_directory:
# sets the device log directory to manager's log_directory
log_directory = self.log_directory
logger.info("Creating {}".format(device_name))
device_class = self.get_supported_device_class(device_type)
track_device = device_type not in self.get_supported_auxiliary_device_types(
)
device_inst = self._get_device_class(device_class, device_config,
log_file_name, log_directory,
track_device)
try:
device_inst.make_device_ready(make_device_ready)
except errors.DeviceError:
# ensure connections are closed down.
device_inst.close()
raise
return device_inst
def create_device_sim(self,
device_type,
log_file_name=None,
log_directory=None,
skip_recover_device=False,
make_device_ready="off",
filters=None,
log_name_prefix="",
build_info_kwargs=None):
"""Returns created simulated object by device_type specified.
Args:
device_type (str): The device type of the simulator.
log_file_name (str): A string log file name to use for log results.
log_directory (str): A directory path to use for storing log file.
skip_recover_device (bool): Don't recover device if it fails ready
check.
make_device_ready (str): "on", "check_only", "off". Toggles
make_device_ready.
filters (list): paths to custom Parser filter files or directories to
use.
log_name_prefix (str): string to prepend to log filename.
build_info_kwargs (dict): build info args by name to pass to upgrade
method.
Returns:
Object: The device found and created by the device_type specified.
Raises:
ValueError: If identifier specified does not match a known device_type
DeviceError: Device not connected
"""
logger.info("In create_device_sim")
if not log_directory:
# sets the device log directory to manager's log_directory
log_directory = self.log_directory
device_config = {}
self._update_device_config(device_config, skip_recover_device,
make_device_ready, log_name_prefix, filters)
supported_device_class = self.get_supported_device_class(device_type)
device_class = self._get_device_sim_class(supported_device_class,
device_config, log_file_name,
log_directory, build_info_kwargs)
return device_class
def create_devices(self,
device_list=None,
device_type=None,
log_to_stdout=None,
category="gazoo",
make_device_ready="on",
log_name_prefix=""):
"""Returns list of created device objects from device_list or connected devices.
Args:
device_list (list): list of mobly configs.
device_type (str): filter to just return device instances of list
type.
log_to_stdout (bool): Enable streaming of log results to stdout
(DEPRECATED).
category (str): 'gazoo', 'other' or 'all' to filter connected devices.
make_device_ready (str): "on", "check_only", "off". Toggles
make_device_ready.
log_name_prefix (str): string to prepend to log filename.
Returns:
list: device instances successfully created.
Raises:
ValueError: If an identifier specified does not match a known device
or device is not currently connected.
"""
logger.debug("In create_devices")
if log_to_stdout is not None:
logger.warn(
"DEPRECATION WARNING: Support for the log_to_stdout argument is "
"ending soon. To continue seeing the same output, please set "
"debug_level to logging.INFO and remove log_to_stdout")
devices = []
if device_list is None:
device_list = self.get_connected_devices(category)
alias = None
identifier = None
for args in device_list:
if isinstance(args, dict): # translating potential mobly arguments
if "id" in args:
identifier = args["id"]
elif "name" in args:
identifier = args["name"]
if "label" in args:
alias = args["label"]
elif "alias" in args:
alias = args["alias"]
elif isinstance(args, str):
identifier = args
# check if this device is the right type:
if device_type is None or device_type.lower() == self.get_device_prop(
identifier, "device_type"):
devices.append(
self.create_device(
identifier,
alias,
make_device_ready=make_device_ready,
log_name_prefix=log_name_prefix))
return devices
def create_log_parser(self, log_filename, filter_list=None):
"""Creates a LogParser object given a specified device type and filter list.
Args:
log_filename (str): filename containing raw, log event data
filter_list (list): List of files or directories containing JSON
filter files.
Returns:
LogParser: object which creates an event file by parsing a log file
of the device type specified using the provided filter list
"""
parser = event_parser_default.EventParserDefault(
filters=filter_list,
event_file_path="unknown.txt",
device_name="unknown")
return LogParser(parser, log_filename)
def create_switchboard(
self,
communication_address,
communication_type,
device_name="unknown",
log_path=None,
force_slow=False,
event_parser=None,
**kwargs):
"""Creates a switchboard instance.
Args:
communication_address (str): primary device address for communication.
For example, "192.168.127.12", ADB serial number, or serial port path.
communication_type (str): identifier for the type of communication.
device_name (str): device identifier. Used in stdout.
log_path (str): path to write GDM device logs to.
force_slow (bool): send device input at human speed. Used for devices with
input speed | |
156, 213, 217);")
self.pushButton_10.setObjectName("pushButton_10")
self.textEdit_84 = QtWidgets.QTextEdit(self.frame_10)
self.textEdit_84.setGeometry(QtCore.QRect(0, 70, 561, 41))
self.textEdit_84.setStyleSheet("background-color: rgba(255, 255, 255, 0);\n"
"border:false;")
self.textEdit_84.setObjectName("textEdit_84")
self.textBrowser_20 = QtWidgets.QTextBrowser(self.frame_10)
self.textBrowser_20.setGeometry(QtCore.QRect(0, 20, 561, 41))
self.textBrowser_20.setStyleSheet("background-color: rgba(255, 255, 255, 0);\n"
"border:false;")
self.textBrowser_20.setObjectName("textBrowser_20")
self.label_9 = QtWidgets.QLabel(self.frame_10)
self.label_9.setGeometry(QtCore.QRect(430, 0, 131, 111))
self.label_9.setStyleSheet("background-color: rgba(255, 255, 255, 0);\n"
"border-radius: 8px;\n"
"border:false;")
self.label_9.setObjectName("label_9")
self.label_9.raise_()
self.textEdit_80.raise_()
self.textEdit_81.raise_()
self.textEdit_82.raise_()
self.textEdit_83.raise_()
self.textEdit_84.raise_()
self.textBrowser_20.raise_()
self.pushButton_10.raise_()
self.label = QtWidgets.QLabel(self.frame)
self.label.setGeometry(QtCore.QRect(0, -10, 991, 581))
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap("ASD_Fotor.png"))
self.label.setObjectName("label")
self.label.raise_()
self.frame_12.raise_()
self.textEdit_18.raise_()
self.textEdit_16.raise_()
self.textEdit_19.raise_()
self.textEdit.raise_()
self.textEdit_2.raise_()
self.widget.raise_()
self.widget_2.raise_()
self.widget_3.raise_()
self.widget_4.raise_()
self.widget_5.raise_()
self.frame_7.raise_()
self.frame_8.raise_()
self.frame_9.raise_()
self.frame_10.raise_()
self.pushButton.raise_()
self.pushButton_2.raise_()
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.textEdit_13.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt; font-weight:600; color:#000000;\"> Activity:</span></p></body></html>"))
self.checkBox_30.setText(_translate("Dialog", " Nature"))
self.checkBox_31.setText(_translate("Dialog", " Religious"))
self.checkBox_32.setText(_translate("Dialog", " Theatre"))
self.checkBox_33.setText(_translate("Dialog", " Shopping"))
self.checkBox_34.setText(_translate("Dialog", " Picnic"))
self.checkBox_35.setText(_translate("Dialog", " Sports"))
self.textEdit_14.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt; font-weight:600; color:#6c6c6c;\"> </span><span style=\" font-size:14pt; font-weight:600; color:#000000;\">Rating:</span></p></body></html>"))
self.checkBox_36.setText(_translate("Dialog", " above 4.5"))
self.checkBox_37.setText(_translate("Dialog", " above 4.0"))
self.checkBox_38.setText(_translate("Dialog", " above 3.5"))
self.checkBox_39.setText(_translate("Dialog", " above 3.0"))
self.checkBox_40.setText(_translate("Dialog", " above 2.5"))
self.textEdit_15.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:24pt; color:#000000;\"> Filter :</span></p></body></html>"))
self.pushButton_3.setText(_translate("Dialog", "Search"))
self.textEdit_18.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:36pt; font-weight:600;\">Travel Planner</span></p></body></html>"))
self.textEdit_16.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:24pt; color:#fdfdff;\">Best-fit Plan</span></p></body></html>"))
self.textEdit_19.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:24pt; color:#fdfdff;\">Restaurants</span></p></body></html>"))
self.textEdit_4.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"right\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">4.7/5.0</p></body></html>"))
self.textEdit_5.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"right\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt;\">$$$$</span></p></body></html>"))
self.textEdit_3.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt;\">Tel:</span></p></body></html>"))
self.textBrowser.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt; text-decoration: underline; color:#4275ad;\">Name</span></p></body></html>"))
self.textBrowser_7.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt; text-decoration: underline; color:#4275ad;\">Name</span></p></body></html>"))
self.textEdit_27.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"right\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">4.7/5.0</p></body></html>"))
self.textEdit_28.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"right\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt;\">$$$$</span></p></body></html>"))
self.textEdit_29.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt;\">Tel:</span></p></body></html>"))
self.textBrowser_8.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt; text-decoration: underline; color:#4275ad;\">Name</span></p></body></html>"))
self.textEdit_30.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"right\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">4.7/5.0</p></body></html>"))
self.textEdit_31.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"right\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt;\">$$$$</span></p></body></html>"))
self.textEdit_32.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt;\">Tel:</span></p></body></html>"))
self.textBrowser_10.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt; text-decoration: underline; color:#4275ad;\">Name</span></p></body></html>"))
self.textEdit_36.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"right\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">4.7/5.0</p></body></html>"))
self.textEdit_37.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"right\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt;\">$$$$</span></p></body></html>"))
self.textEdit_38.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt;\">Tel:</span></p></body></html>"))
self.textBrowser_12.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt; text-decoration: underline; color:#4275ad;\">Name</span></p></body></html>"))
self.textEdit_42.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"right\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">4.7/5.0</p></body></html>"))
self.textEdit_43.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"right\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt;\">$$$$</span></p></body></html>"))
self.textEdit_44.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt;\">Tel:</span></p></body></html>"))
self.textEdit_65.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt; color:#191819;\">Day</span></p></body></html>"))
self.textEdit_66.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt; color:#000000;\">Rating</span></p></body></html>"))
self.textEdit_67.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt; color:#000000;\">Phone</span></p></body></html>"))
self.textEdit_68.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt; color:#000000;\">Type</span></p></body></html>"))
self.pushButton_7.setText(_translate("Dialog", "->"))
self.textEdit_69.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt; color:#000000;\">Address</span></p></body></html>"))
self.textBrowser_17.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" text-decoration: underline; color:#405aae;\">Name</span></p></body></html>"))
self.label_6.setText(_translate("Dialog", " Photo"))
self.pushButton.setText(_translate("Dialog", "< Previous"))
self.pushButton_2.setText(_translate("Dialog", "Next >"))
self.textEdit_70.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:14pt; color:#191819;\">Day</span></p></body></html>"))
self.textEdit_71.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" | |
V30 47 C 13.107 12.648 5.739 0\n',
'M V30 48 C 13.377 13.456 4.479 0\n',
'M V30 49 C 13.104 12.616 3.239 0\n',
'M V30 50 H 12.995 15.351 8.695 0\n',
'M V30 51 H 14.647 12.422 7.586 0\n',
'M V30 52 H 13.724 11.889 5.786 0\n',
'M V30 53 H 14.302 13.752 4.474 0\n',
'M V30 54 H 12.809 14.243 4.469 0\n',
'M V30 55 H 13.29 13.134 2.44 0\n',
'M V30 56 H 13.685 11.837 3.234 0\n',
'M V30 57 N 13.515 6.89 13.304 0\n',
'M V30 58 C 15.133 9.221 13.731 0\n',
'M V30 59 C 14.027 8.885 14.496 0\n',
'M V30 60 C 13.212 7.715 14.173 0\n',
'M V30 61 C 12.648 5.739 13.107 0\n',
'M V30 62 C 13.456 4.479 13.377 0\n',
'M V30 63 C 12.616 3.239 13.104 0\n',
'M V30 64 H 15.351 8.695 12.995 0\n',
'M V30 65 H 12.422 7.586 14.647 0\n',
'M V30 66 H 11.889 5.786 13.724 0\n',
'M V30 67 H 13.752 4.474 14.302 0\n',
'M V30 68 H 14.243 4.469 12.809 0\n',
'M V30 69 H 13.134 2.44 13.29 0\n',
'M V30 70 H 11.837 3.234 13.685 0\n',
'M V30 71 N 11.285 6.89 11.496 0\n',
'M V30 72 C 9.667 9.221 11.069 0\n',
'M V30 73 C 10.773 8.885 10.304 0\n',
'M V30 74 C 11.588 7.715 10.627 0\n',
'M V30 75 C 12.152 5.739 11.693 0\n',
'M V30 76 C 11.344 4.479 11.423 0\n',
'M V30 77 C 12.184 3.239 11.696 0\n',
'M V30 78 H 9.449 8.695 11.805 0\n',
'M V30 79 H 12.378 7.586 10.153 0\n',
'M V30 80 H 12.911 5.786 11.076 0\n',
'M V30 81 H 11.048 4.474 10.498 0\n',
'M V30 82 H 10.557 4.469 11.991 0\n',
'M V30 83 H 11.666 2.44 11.51 0\n',
'M V30 84 H 12.963 3.234 11.115 0\n',
'M V30 85 N 11.285 17.91 13.304 0\n',
'M V30 86 C 9.667 15.579 13.731 0\n',
'M V30 87 C 10.773 15.915 14.496 0\n',
'M V30 88 C 11.588 17.085 14.173 0\n',
'M V30 89 C 12.152 19.061 13.107 0\n',
'M V30 90 C 11.344 20.321 13.377 0\n',
'M V30 91 C 12.184 21.561 13.104 0\n',
'M V30 92 H 9.449 16.105 12.995 0\n',
'M V30 93 H 12.378 17.214 14.647 0\n',
'M V30 94 H 12.911 19.014 13.724 0\n',
'M V30 95 H 11.048 20.326 14.302 0\n',
'M V30 96 H 10.557 20.331 12.809 0\n',
'M V30 97 H 11.666 22.36 13.29 0\n',
'M V30 98 H 12.963 21.566 13.685 0\n',
'M V30 99 N 13.515 17.91 11.496 0\n',
'M V30 100 C 15.133 15.579 11.069 0\n',
'M V30 101 C 14.027 15.915 10.304 0\n',
'M V30 102 C 13.212 17.085 10.627 0\n',
'M V30 103 C 12.648 19.061 11.693 0\n',
'M V30 104 C 13.456 20.321 11.423 0\n',
'M V30 105 C 12.616 21.561 11.696 0\n',
'M V30 106 H 15.351 16.105 11.805 0\n',
'M V30 107 H 12.422 17.214 10.153 0\n',
'M V30 108 H 11.889 19.014 11.076 0\n',
'M V30 109 H 13.752 20.326 10.498 0\n',
'M V30 110 H 14.243 20.331 11.991 0\n',
'M V30 111 H 13.134 22.36 11.51 0\n',
'M V30 112 H 11.837 21.566 11.115 0\n',
'M V30 113 N 6.89 11.496 11.285 0\n',
'M V30 114 C 9.221 11.069 9.667 0\n',
'M V30 115 C 8.885 10.304 10.773 0\n',
'M V30 116 C 7.715 10.627 11.588 0\n',
'M V30 117 C 5.739 11.693 12.152 0\n',
'M V30 118 C 4.479 11.423 11.344 0\n',
'M V30 119 C 3.239 11.696 12.184 0\n',
'M V30 120 H 8.695 11.805 9.449 0\n',
'M V30 121 H 7.586 10.153 12.378 0\n',
'M V30 122 H 5.786 11.076 12.911 0\n',
'M V30 123 H 4.474 10.498 11.048 0\n',
'M V30 124 H 4.469 11.991 10.557 0\n',
'M V30 125 H 2.44 11.51 11.666 0\n',
'M V30 126 H 3.234 11.115 12.963 0\n',
'M V30 127 N 6.89 13.304 13.515 0\n',
'M V30 128 C 9.221 13.731 15.133 0\n',
'M V30 129 C 8.885 14.496 14.027 0\n',
'M V30 130 C 7.715 14.173 13.212 0\n',
'M V30 131 C 5.739 13.107 12.648 0\n',
'M V30 132 C 4.479 13.377 13.456 0\n',
'M V30 133 C 3.239 13.104 12.616 0\n',
'M V30 134 H 8.695 12.995 15.351 0\n',
'M V30 135 H 7.586 14.647 12.422 0\n',
'M V30 136 H 5.786 13.724 11.889 0\n',
'M V30 137 H 4.474 14.302 13.752 0\n',
'M V30 138 H 4.469 12.809 14.243 0\n',
'M V30 139 H 2.44 13.29 13.134 0\n',
'M V30 140 H 3.234 13.685 11.837 0\n',
'M V30 141 N 17.91 11.496 13.515 0\n',
'M V30 142 C 15.579 11.069 15.133 0\n',
'M V30 143 C 15.915 10.304 14.027 0\n',
'M V30 144 C 17.085 10.627 13.212 0\n',
'M V30 145 C 19.061 11.693 12.648 0\n',
'M V30 146 C 20.321 11.423 13.456 0\n',
'M V30 147 C 21.561 11.696 12.616 0\n',
'M V30 148 H 16.105 11.805 15.351 0\n',
'M V30 149 H 17.214 10.153 12.422 0\n',
'M V30 150 H 19.014 11.076 11.889 0\n',
'M V30 151 H 20.326 10.498 13.752 0\n',
'M V30 152 H 20.331 11.991 14.243 0\n',
'M V30 153 H 22.36 11.51 13.134 0\n',
'M V30 154 H 21.566 11.115 11.837 0\n',
'M V30 155 N 17.91 13.304 11.285 0\n',
'M V30 156 C 15.579 13.731 9.667 0\n',
'M V30 157 C 15.915 14.496 10.773 0\n',
'M V30 158 C 17.085 14.173 11.588 0\n',
'M V30 159 C 19.061 13.107 12.152 0\n',
'M V30 160 C 20.321 13.377 11.344 0\n',
'M V30 161 C 21.561 13.104 12.184 0\n',
'M V30 162 H 16.105 12.995 9.449 0\n',
'M V30 163 H 17.214 14.647 12.378 0\n',
'M V30 164 H 19.014 13.724 12.911 0\n',
'M V30 165 H 20.326 14.302 11.048 0\n',
'M V30 166 H 20.331 12.809 10.557 0\n',
'M V30 167 H 22.36 13.29 11.666 0\n',
'M V30 168 H 21.566 13.685 12.963 0\n',
'M V30 END ATOM\n',
'M V30 BEGIN BOND\n',
'M V30 1 2 1 4\n',
'M V30 2 1 1 5\n',
'M V30 3 2 2 3\n',
'M V30 4 1 2 8\n',
'M V30 5 1 2 87\n',
'M V30 6 1 3 4\n',
'M V30 7 1 3 128\n',
'M V30 8 1 4 9\n',
'M V30 9 1 5 6\n',
'M V30 10 1 5 10\n',
'M V30 11 1 5 19\n',
'M V30 12 1 6 7\n',
'M V30 13 1 6 11\n',
'M V30 14 1 6 12\n',
'M V30 15 1 7 13\n',
'M V30 16 1 7 14\n',
'M V30 17 1 7 21\n',
'M V30 18 2 15 18\n',
'M V30 19 1 15 19\n',
'M V30 20 2 16 17\n',
'M V30 21 1 16 22\n',
'M V30 22 1 16 59\n',
'M V30 23 1 17 18\n',
'M V30 24 1 17 142\n',
'M V30 25 1 18 23\n',
'M V30 26 1 19 20\n',
'M V30 27 1 19 24\n',
'M V30 28 1 20 21\n',
'M V30 29 1 20 25\n',
'M V30 30 1 20 26\n',
'M V30 31 1 21 27\n',
'M V30 32 1 21 28\n',
'M V30 33 2 29 32\n',
'M V30 34 1 29 33\n',
'M V30 35 2 30 31\n',
'M V30 36 1 30 36\n',
'M V30 37 1 30 73\n',
'M V30 38 1 31 32\n',
'M V30 39 1 31 114\n',
'M V30 40 1 32 37\n',
'M V30 41 1 33 34\n',
'M V30 42 1 33 38\n',
'M V30 43 1 33 47\n',
'M V30 44 1 34 35\n',
'M V30 45 1 34 39\n',
'M V30 46 1 34 40\n',
'M V30 47 1 35 41\n',
'M V30 48 1 35 42\n',
'M V30 49 1 35 49\n',
'M V30 50 | |
If you specify nothing, the default is to assume the file is
located as follows: Output_Storage\Scenario_Name\sets\pysedsim_ref_set.ref, where output storage is named
appropriately in the input_file_name file.
:return:
'''
[num_scenarios, simulation_titles_list, imported_specs, main_input_files_dir, main_output_file_dir,
os_fold] = import_specs(file_name=input_file_name)
# Loop through as many optimization scenarios as user has specified.
tradeoff_plot_pref_dict = {}
plot_list = [] # names of all plots (subplots do not count; subplots make up a plot). Goes across scenarios.
opt_dicts_dict = {}
plot_list_dict = {}
num_plots_dict = {}
num_axes_dict = {}
sp_list = {}
start_row_par_axis_data = 11
for j in range(num_scenarios):
simulation_title = simulation_titles_list[j]
output_location = main_output_file_dir + os_fold + simulation_title + os_fold + 'sets'
plot_list_dict[simulation_title] = []
# 1. Create reference set file if not already created. Run this code before running any of the figure generation code
# below.
[Borg_dict, DPS_dict] = Reference_Set(input_file_name=input_file_name, create_ref_set='No')
if Borg_dict['optimization approach'] == 'DPS':
Borg_dict['n_vars'] = DPS_dict['total_vars'] # Store num decision variables in Borg_dict, copied from
# DPS_dict.
sp_list[simulation_title] = []
# Import plot preferences
opt_dicts_dict[simulation_title] = {'Borg_dict': Borg_dict, 'DPS_dict': DPS_dict}
Input_Data_File = Load_Input_File(simulation_title, main_input_files_dir, imported_specs)
# Read in preferences related to tradeoff plotting from the "Parallel Axis Plotting" worksheet in input file.
try:
tradeoff_plot_pref_dict[simulation_title] = {}
num_plots_dict[simulation_title] = Input_Data_File['Parallel Axis Plotting']['B1'].value
num_plots = num_plots_dict[simulation_title]
num_axes_dict[simulation_title] = Input_Data_File['Parallel Axis Plotting']['B3'].value
num_axes = num_axes_dict[simulation_title]
# Position of data in 'Parallel Axis Plotting' sheet for each axis
axis_offset_list = [int(start_row_par_axis_data+7*i) for i in range(num_axes)]
axis_list = ['Axis ' + str(i) for i in range(num_axes)]
axis_loc_dict = {}
for i in range(len(axis_list)):
axis_loc_dict[axis_list[i]] = i
for p in range(num_plots):
plot_name = Input_Data_File['Parallel Axis Plotting'].cell(row = 2, column = 2 + p).value
if plot_name is None:
plot_name = 'parallel_axis_plot' # default figure name
if plot_name not in plot_list:
plot_list.append(plot_name)
if plot_name not in plot_list_dict[simulation_title]:
plot_list_dict[simulation_title].append(plot_name)
tradeoff_plot_pref_dict[simulation_title][plot_name] = {}
#subplot_number = str(Input_Data_File['Parallel Axis Plotting'].cell(row = 3, column = 2 + p).value)
subplot_number = 0 # For now, no subplots. Just single parallel axis plots.
sp_list[simulation_title] = [subplot_number]
tradeoff_plot_pref_dict[simulation_title][plot_name][subplot_number] = {}
if subplot_number is not None:
tradeoff_plot_pref_dict[simulation_title][plot_name][subplot_number]['Color Axis Num'] = \
Input_Data_File['Parallel Axis Plotting'].cell(row = 5, column = 2 + p).value
tradeoff_plot_pref_dict[simulation_title][plot_name][subplot_number]['Colormap Name'] = \
Input_Data_File['Parallel Axis Plotting'].cell(row = 6, column = 2 + p).value
tradeoff_plot_pref_dict[simulation_title][plot_name][subplot_number]['Colorbar Title'] = \
Input_Data_File['Parallel Axis Plotting'].cell(row = 7, column = 2 + p).value
tradeoff_plot_pref_dict[simulation_title][plot_name][subplot_number]['Policies to Brush'] = \
Input_Data_File['Parallel Axis Plotting'].cell(row = 8, column = 2 + p).value
tradeoff_plot_pref_dict[simulation_title][plot_name][subplot_number]['Policy Labels'] = \
Input_Data_File['Parallel Axis Plotting'].cell(row = 9, column = 2 + p).value
# For this subplot, load preferences for each axis.
for axis in axis_list:
ax_ind = axis_list.index(axis)
row_ind = axis_offset_list[ax_ind]
if Input_Data_File['Parallel Axis Plotting'].cell(row = row_ind, column = 2 + p).value is not None:
tradeoff_plot_pref_dict[simulation_title][plot_name][subplot_number][axis] = {}
tradeoff_plot_pref_dict[simulation_title][plot_name][subplot_number][axis]['objective_name'] = \
Input_Data_File['Parallel Axis Plotting'].cell(row = row_ind, column = 2 + p).value
tradeoff_plot_pref_dict[simulation_title][plot_name][subplot_number][axis]['unit_conv'] = \
Input_Data_File['Parallel Axis Plotting'].cell(row = row_ind+1, column = 2 + p).value
tradeoff_plot_pref_dict[simulation_title][plot_name][subplot_number][axis]['perc_conv'] = \
Input_Data_File['Parallel Axis Plotting'].cell(row = row_ind+2, column = 2 + p).value
tradeoff_plot_pref_dict[simulation_title][plot_name][subplot_number][axis]['invert'] = \
Input_Data_File['Parallel Axis Plotting'].cell(row = row_ind+3, column = 2 + p).value
tradeoff_plot_pref_dict[simulation_title][plot_name][subplot_number][axis]['brush_range']\
= Input_Data_File['Parallel Axis Plotting'].cell(row=row_ind + 4, column=2 + p).value
tradeoff_plot_pref_dict[simulation_title][plot_name][subplot_number][axis]['title'] = \
Input_Data_File['Parallel Axis Plotting'].cell(row = row_ind+5, column = 2 + p).value
# Subplot title: Same for every axis
tradeoff_plot_pref_dict[simulation_title][plot_name][subplot_number][axis][
'subplot_title'] = Input_Data_File['Parallel Axis Plotting'].cell(row=4, column=2 + p).value
except KeyError:
print('Proper Parallel Axis Plotting worksheet does not exist to generate plots')
ref_set_pref_dict = {}
for plot_name in plot_list:
# Create all of plots user specifies
scenario_counter = 0
for j in range(num_scenarios):
simulation_title = simulation_titles_list[j]
sub_plt_counter = 0
if ref_set_file_name is None:
ref_set_file_name = main_output_file_dir + os_fold + simulation_title + os_fold + 'sets' + os_fold + \
'pysedsim_ref_set.ref'
objs_to_plot = opt_dicts_dict[simulation_title]['Borg_dict']['opt_dict']['Objective Names Ordered List']
ref_set_pref_dict['ref_set_file_name'] = ref_set_file_name
ref_set_pref_dict['num_objs'] = opt_dicts_dict[simulation_title]['Borg_dict']['n_objs']
ref_set_pref_dict['num_dec_vars'] = opt_dicts_dict[simulation_title]['DPS_dict']['total_vars']
ref_set_pref_dict['Borg_dict'] = opt_dicts_dict[simulation_title]['Borg_dict']
for subplot_number in sp_list[simulation_title]:
ref_set_pref_dict['unit_conv'] = [1 for i in range(len(objs_to_plot))]
ref_set_pref_dict['perc_conv'] = ['No' for i in range(len(objs_to_plot))]
ref_set_pref_dict['invert'] = ['No' for i in range(len(objs_to_plot))]
plot_dict = {}
spn_str = subplot_number
plot_dict['num_axes'] = num_axes
ref_set_pref_dict['num_objs_to_plot'] = num_axes
plot_dict['brush_range'] = [[] for i in range(ref_set_pref_dict['num_objs'])]
plot_dict['plot_order'] = [i for i in range(ref_set_pref_dict['num_objs_to_plot'])]
# Import preferences for colorbar if user provided an axis number for colorbar.
if tradeoff_plot_pref_dict[simulation_title][plot_name][spn_str]['Color Axis Num'] is not None:
plot_dict['Color Axis Num'] = tradeoff_plot_pref_dict[simulation_title][plot_name][spn_str][
'Color Axis Num']
if tradeoff_plot_pref_dict[simulation_title][plot_name][spn_str]['Colormap Name'] is not None:
plot_dict['Colormap Name'] = tradeoff_plot_pref_dict[simulation_title][plot_name][spn_str][
'Colormap Name']
if tradeoff_plot_pref_dict[simulation_title][plot_name][spn_str]['Colorbar Title'] is not None:
plot_dict['Colorbar Title'] = tradeoff_plot_pref_dict[simulation_title][plot_name][spn_str][
'Colorbar Title']
if tradeoff_plot_pref_dict[simulation_title][plot_name][spn_str]['Policies to Brush'] is not None:
plot_dict['Policies to Brush'] = tradeoff_plot_pref_dict[simulation_title][plot_name][spn_str][
'Policies to Brush']
if tradeoff_plot_pref_dict[simulation_title][plot_name][spn_str]['Policy Labels'] is not None:
plot_dict['Policy Labels'] = tradeoff_plot_pref_dict[simulation_title][plot_name][spn_str][
'Policy Labels']
try:
if tradeoff_plot_pref_dict[simulation_title][plot_name][spn_str]['x_axis']['subplot_title'] is \
not None:
plot_dict['subplot_title'] = \
tradeoff_plot_pref_dict[simulation_title][plot_name][spn_str]['x_axis']['subplot_title']
except KeyError:
pass
for obj in objs_to_plot:
for axis in axis_list:
try:
if obj == tradeoff_plot_pref_dict[simulation_title][plot_name][spn_str][axis]['objective_name']:
try:
ref_set_pref_dict['unit_conv'][objs_to_plot.index(obj)] = \
tradeoff_plot_pref_dict[simulation_title][plot_name][spn_str][axis]['unit_conv']
except KeyError:
pass
try:
ref_set_pref_dict['perc_conv'][objs_to_plot.index(obj)] = \
tradeoff_plot_pref_dict[simulation_title][plot_name][spn_str][axis]['perc_conv']
except KeyError:
pass
try:
ref_set_pref_dict['invert'][objs_to_plot.index(obj)] = \
tradeoff_plot_pref_dict[simulation_title][plot_name][spn_str][axis]['invert']
except KeyError:
pass
# Order matters
plot_dict[axis + ' label'] = tradeoff_plot_pref_dict[simulation_title][plot_name][
spn_str][axis]['title']
plot_dict['plot_order'][axis_loc_dict[axis]] = objs_to_plot.index(obj)
if type(tradeoff_plot_pref_dict[simulation_title][plot_name][spn_str][axis][
'brush_range']) in [str, unicode]:
# User has specified a list of numbers for the brushing range, which will be
# imported as one long string, including commas.
ax_rng = tradeoff_plot_pref_dict[simulation_title][plot_name][spn_str][axis][
'brush_range'].split(', ') # Create list of policies as strings
# Convert to list
plot_dict['brush_range'][objs_to_plot.index(obj)] = []
for z in range(len(ax_rng)):
try:
plot_dict['brush_range'][objs_to_plot.index(obj)].append(float(ax_rng[z]))
except ValueError:
# User provided a string (either "min, min" or "max, max"). Can't do float.
plot_dict['brush_range'][objs_to_plot.index(obj)].append(str(ax_rng[z]))
except KeyError:
pass
# Create the parallel axis plot
parallel_axis_plot(ref_set_pref_dict, plot_name=plot_name, plot_dict=plot_dict)
# To keep track of where in main figure to place each subplot.
sub_plt_counter += 1
# To keep track of where in main figure to place each subplot.
scenario_counter += 1
def parallel_axis_plot(ref_set_pref_dict, plot_dict = None, parse_objs = None, objs_to_plot = None, plot_name=None):
#import seaborn.apionly as sns
# Load basic information about the simulated scenarios, including input file directory and simulation names.
os_fold = Op_Sys_Folder_Operator()
# Unpack ref_set_pref_dict
try:
ref_set_file_name = ref_set_pref_dict['ref_set_file_name']
num_objs = ref_set_pref_dict['num_objs']
except KeyError:
ref_set_file_name = None
num_objs = None
try:
num_dec_vars = ref_set_pref_dict['num_dec_vars']
except KeyError:
pass
try:
unit_conv = ref_set_pref_dict['unit_conv']
except KeyError:
unit_conv = None
try:
perc_conv = ref_set_pref_dict['perc_conv']
except KeyError:
perc_conv = None
try:
invert = ref_set_pref_dict['invert']
except KeyError:
invert = None
try:
num_axes = plot_dict['num_axes']
except KeyError:
print("User did not specify number of axes")
if ref_set_file_name is not None:
# User wishes to import reference set here.
[ref_set_array, objective_values, dec_var_values] = Initial_Processing(num_objs, num_dec_vars, ref_set_file_name,
parse_objs=parse_objs, perc_conv=perc_conv,
invert=invert, unit_conv=unit_conv,
reverse_sign_all_objs = 'Yes')
# Get positive values of objective values as well
obj_vals_pos = Initial_Processing(num_objs, num_dec_vars, ref_set_file_name,
parse_objs=parse_objs, perc_conv=perc_conv,
invert=invert, unit_conv=unit_conv, reverse_sign_all_objs = 'Yes')[1]
else:
# User has provided reference set information.
ref_set_array = ref_set_pref_dict['ref_set_array']
objective_values = ref_set_pref_dict['objective_values']
dec_var_values = ref_set_pref_dict['dec_var_values']
# In case user specified a policy number to be brushed instead of a brushing range, specify the objective values
policies_to_brush = None
try:
if plot_dict['Policies to Brush'] is not None:
# Create list of policy number strings
if type(plot_dict['Policies to Brush']) in [str, unicode]:
policies_to_brush = plot_dict['Policies to Brush'].split(', ')
else:
policies_to_brush = [plot_dict['Policies to Brush']]
# Create list of policy number integers
policies_to_brush = [int(policies_to_brush[pol]) for pol in range(len(policies_to_brush))]
for pol in policies_to_brush:
for obj in range(num_objs):
# Append objective value for specified policy twice, so range is limited to exactly this value.
plot_dict['brush_range'][obj].append(obj_vals_pos[obj][pol])
plot_dict['brush_range'][obj].append(obj_vals_pos[obj][pol])
# Replace list of policy strings with policies_to_brush integer list
plot_dict['Policies to Brush'] = policies_to_brush
except KeyError:
plot_dict['label_policies'] = 'No' # No policies to label of none are being brushed
try:
if plot_dict['Policy Labels'] is not None:
# Create list of policy labels for those policies being highlighted
plot_dict['label_policies'] = 'Yes'
# Create list of policy number strings
if type(plot_dict['Policy Labels']) in [str, unicode]:
pol_names = plot_dict['Policy Labels'].split(', ')
else:
pol_names = None
if pol_names is not None:
plot_dict['Policy Labels'] = pol_names
plot_dict['Label Locations'] = [(0.45, 0.05), (0.20, 0.95), (0.3, 0.8)]
#(0.2, 0.95), (0.6, 0.95), (0.8, 0.95)
plot_dict['Data Point Location'] = [(0.69, 0.33), (0.94, 0.71), (0.5,0.65)]
| |
<reponame>artuchavez/ArtificialIntelligence
# MIT 6.034 Lab 9: Boosting (Adaboost)
from tester import make_test, get_tests
from utils import *
lab_number = 9 #for tester.py
F = Fraction #lazy alias
def initialize_2_getargs() : #TEST 1
return [["PointA"]]
initialize_2_expected = {"PointA":1}
def initialize_2_testanswer(val, original_val = None) :
return val == initialize_2_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = initialize_2_getargs,
testanswer = initialize_2_testanswer,
expected_val = str(initialize_2_expected),
name = 'initialize_weights')
def initialize_3_getargs() : #TEST 2
return [["-6","-5","-4","-3","-2","-1","0","1","2","3","4","5"]]
initialize_3_expected = {"-6":F(1,12),"-5":F(1,12),"-4":F(1,12),
"-3":F(1,12),"-2":F(1,12),"-1":F(1,12),
"0":F(1,12),"1":F(1,12),"2":F(1,12),
"3":F(1,12),"4":F(1,12),"5":F(1,12)}
def initialize_3_testanswer(val, original_val = None) :
return val == initialize_3_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = initialize_3_getargs,
testanswer = initialize_3_testanswer,
expected_val = str(initialize_3_expected),
name = 'initialize_weights')
# TEST 0 FOR CALCULATE_ERROR_RATE - ALL POINTS CORRECTLY CLASSIFIED
# only one classifier
def calculate_error_rates_0_getargs() : #TEST 3
return [{"0" : F(1,4), "1": F(1,4), "2": F(1,4), "3": F(1,4)}, {"classifier_0":[]}]
calculate_error_rates_0_expected = {"classifier_0" : 0}
def calculate_error_rates_0_testanswer(val, original_val = None) :
return val == calculate_error_rates_0_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = calculate_error_rates_0_getargs,
testanswer = calculate_error_rates_0_testanswer,
expected_val = str(calculate_error_rates_0_expected),
name = 'calculate_error_rates')
# TEST 2 FOR CALCULATE_ERROR_RATE - SOME POINTS MISCLASSIFIED
def calculate_error_rates_2_getargs() : #TEST 4
return [{"0" : F(1,8), "1": F(1,8), "2": F(1,8), "3": F(1,8), "4": F(1,2)},
{"classifier_0":["0", "1", "4"], "classifier_1":["0", "1", "2", "3"]}]
calculate_error_rates_2_expected = {"classifier_0" : F(3,4), "classifier_1": F(1,2)}
def calculate_error_rates_2_testanswer(val, original_val = None) :
return val == calculate_error_rates_2_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = calculate_error_rates_2_getargs,
testanswer = calculate_error_rates_2_testanswer,
expected_val = str(calculate_error_rates_2_expected),
name = 'calculate_error_rates')
def pick_best_classifier_0_getargs() : #TEST 5
#have a perfect test!
classifier_to_error_rate = {}
classifier_to_error_rate["classifier_0"] = 0
classifier_to_error_rate["classifier_1/10"] = F(1,10)
classifier_to_error_rate["classifier_1/2"] = F(1,2)
classifier_to_error_rate["classifier_9/10"] = F(9,10)
return [classifier_to_error_rate]
pick_best_classifier_0_expected = "classifier_0"
def pick_best_classifier_0_testanswer(val, original_val = None) :
return val == pick_best_classifier_0_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = pick_best_classifier_0_getargs,
testanswer = pick_best_classifier_0_testanswer,
expected_val = str(pick_best_classifier_0_expected),
name = 'pick_best_classifier')
def pick_best_classifier_1_getargs() : #TEST 6
#have a pretty good test
classifier_to_error_rate = {}
classifier_to_error_rate["classifier_1/10"] = F(1,10)
classifier_to_error_rate["classifier_1/2"] = F(1,2)
classifier_to_error_rate["classifier_9/10"] = F(9,10)
return [classifier_to_error_rate]
pick_best_classifier_1_expected = "classifier_1/10"
def pick_best_classifier_1_testanswer(val, original_val = None) :
return val == pick_best_classifier_1_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = pick_best_classifier_1_getargs,
testanswer = pick_best_classifier_1_testanswer,
expected_val = str(pick_best_classifier_1_expected),
name = 'pick_best_classifier')
def pick_best_classifier_2_getargs() : #TEST 7
#no good tests; raise error
classifier_to_error_rate = {}
classifier_to_error_rate["classifier_1/2"] = F(1,2)
classifier_to_error_rate["classifier_6/10"] = F(6,10)
classifier_to_error_rate["classifier_9/10"] = F(9,10)
return [classifier_to_error_rate]
pick_best_classifier_2_expected = NoGoodClassifiersError
def pick_best_classifier_2_testanswer(val, original_val = None) :
return val == pick_best_classifier_2_expected
make_test(type = 'FUNCTION_EXPECTING_EXCEPTION',
getargs = pick_best_classifier_2_getargs,
testanswer = pick_best_classifier_2_testanswer,
expected_val = str(pick_best_classifier_2_expected),
name = 'pick_best_classifier')
def pick_best_classifier_2a_getargs() : #TEST 8
#no good tests; raise error
return [dict(cl1=F(1,2), cl2=F(1,2)), False]
pick_best_classifier_2a_expected = NoGoodClassifiersError
def pick_best_classifier_2a_testanswer(val, original_val = None) :
return val == pick_best_classifier_2a_expected
make_test(type = 'FUNCTION_EXPECTING_EXCEPTION',
getargs = pick_best_classifier_2a_getargs,
testanswer = pick_best_classifier_2a_testanswer,
expected_val = str(pick_best_classifier_2a_expected),
name = 'pick_best_classifier')
def pick_best_classifier_2b_getargs() : #TEST 9
#lowest error rate is 1/2, but best test is 9/10
classifier_to_error_rate = {}
classifier_to_error_rate["classifier_1/2"] = F(1,2)
classifier_to_error_rate["classifier_6/10"] = F(6,10)
classifier_to_error_rate["classifier_9/10"] = F(9,10)
return [classifier_to_error_rate, False]
pick_best_classifier_2b_expected = "classifier_9/10"
def pick_best_classifier_2b_testanswer(val, original_val = None) :
return val == pick_best_classifier_2b_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = pick_best_classifier_2b_getargs,
testanswer = pick_best_classifier_2b_testanswer,
expected_val = str(pick_best_classifier_2b_expected),
name = 'pick_best_classifier')
def pick_best_classifier_4_getargs() : #TEST 10
#have perfectly wrong test
classifier_to_error_rate = {}
classifier_to_error_rate["classifier_1/10"] = F(1,10)
classifier_to_error_rate["classifier_6/10"] = F(6,10)
classifier_to_error_rate["classifier_9/10"] = F(9,10)
classifier_to_error_rate["classifier_1"] = 1
return [classifier_to_error_rate, False]
pick_best_classifier_4_expected = "classifier_1"
def pick_best_classifier_4_testanswer(val, original_val = None) :
return val == pick_best_classifier_4_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = pick_best_classifier_4_getargs,
testanswer = pick_best_classifier_4_testanswer,
expected_val = str(pick_best_classifier_4_expected),
name = 'pick_best_classifier')
#check tie-breaking
def pick_best_classifier_5_getargs() : #TEST 11
return [dict(B=F(3,10), A=F(4,10), C=F(3,10))]
pick_best_classifier_5_expected = "B"
def pick_best_classifier_5_testanswer(val, original_val = None) :
return val == pick_best_classifier_5_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = pick_best_classifier_5_getargs,
testanswer = pick_best_classifier_5_testanswer,
expected_val = str(pick_best_classifier_5_expected) \
+' (Hint: This test checks tie-breaking.)',
name = 'pick_best_classifier')
#check not comparing floats
def pick_best_classifier_6_getargs() : #TEST 12
return [dict(cl_1=F(2,3), cl_2=F(1,3)), False]
pick_best_classifier_6_expected = "cl_1"
def pick_best_classifier_6_testanswer(val, original_val = None) :
return val == pick_best_classifier_6_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = pick_best_classifier_6_getargs,
testanswer = pick_best_classifier_6_testanswer,
expected_val = str(pick_best_classifier_6_expected) \
+" (Hint: Make sure you're using Fractions, and not comparing floats!)",
name = 'pick_best_classifier')
def calculate_voting_power_0_getargs() : #TEST 13
return [.001]
calculate_voting_power_0_expected = 3.453377389324277
def calculate_voting_power_0_testanswer(val, original_val = None) :
return approx_equal(val, calculate_voting_power_0_expected)
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = calculate_voting_power_0_getargs,
testanswer = calculate_voting_power_0_testanswer,
expected_val = str(calculate_voting_power_0_expected),
name = 'calculate_voting_power')
def calculate_voting_power_3_getargs() : #TEST 14
return [.3]
calculate_voting_power_3_expected = 0.42364893019360184
def calculate_voting_power_3_testanswer(val, original_val = None) :
return approx_equal(val, calculate_voting_power_3_expected)
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = calculate_voting_power_3_getargs,
testanswer = calculate_voting_power_3_testanswer,
expected_val = str(calculate_voting_power_3_expected),
name = 'calculate_voting_power')
def calculate_voting_power_4_getargs() : #TEST 15
return [.7]
calculate_voting_power_4_expected = -0.4236489301936017
def calculate_voting_power_4_testanswer(val, original_val = None) :
return approx_equal(val, calculate_voting_power_4_expected)
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = calculate_voting_power_4_getargs,
testanswer = calculate_voting_power_4_testanswer,
expected_val = str(calculate_voting_power_4_expected),
name = 'calculate_voting_power')
#perfect classifier -> INF
def calculate_voting_power_5_getargs() : #TEST 16
return [0]
calculate_voting_power_5_expected = INF
def calculate_voting_power_5_testanswer(val, original_val = None) :
return val == calculate_voting_power_5_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = calculate_voting_power_5_getargs,
testanswer = calculate_voting_power_5_testanswer,
expected_val = str(calculate_voting_power_5_expected),
name = 'calculate_voting_power')
#perfectly wrong classifier -> -INF
def calculate_voting_power_6_getargs() : #TEST 17
return [1]
calculate_voting_power_6_expected = -INF
def calculate_voting_power_6_testanswer(val, original_val = None) :
return val == calculate_voting_power_6_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = calculate_voting_power_6_getargs,
testanswer = calculate_voting_power_6_testanswer,
expected_val = str(calculate_voting_power_6_expected),
name = 'calculate_voting_power')
def get_overall_misclassifications_0_getargs() : #TEST 18
return [[("h1", 1)], ['ptA','ptB'], {'h1':['ptA','ptB'],'h2':['ptA']}]
get_overall_misclassifications_0_expected = set(['ptA', 'ptB'])
def get_overall_misclassifications_0_testanswer(val, original_val = None) :
return val == get_overall_misclassifications_0_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = get_overall_misclassifications_0_getargs,
testanswer = get_overall_misclassifications_0_testanswer,
expected_val = str(get_overall_misclassifications_0_expected),
name = 'get_overall_misclassifications')
#All classifiers included in H
#h with voting power of 0
#H misclassifies A
def get_overall_misclassifications_1_getargs() : #TEST 19
return [[("h1", 1),("h2", 0)], ['A','B'], {'h1': ['A'], 'h2': ['B']}]
get_overall_misclassifications_1_expected = set('A')
def get_overall_misclassifications_1_testanswer(val, original_val = None) :
return val == get_overall_misclassifications_1_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = get_overall_misclassifications_1_getargs,
testanswer = get_overall_misclassifications_1_testanswer,
expected_val = str(get_overall_misclassifications_1_expected),
name = 'get_overall_misclassifications')
# Not all points misclassified by any classifier
# H misclassifies A & B
def get_overall_misclassifications_2_getargs() : #TEST 20
return [[("h1", .5),("h2", .3),("h3", .76)], ['A','B','C','D'],
{'h1': ['A'], 'h2': ['A','B'], 'h3': ['B','C']}]
get_overall_misclassifications_2_expected = set('AB')
def get_overall_misclassifications_2_testanswer(val, original_val = None) :
return val == get_overall_misclassifications_2_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = get_overall_misclassifications_2_getargs,
testanswer = get_overall_misclassifications_2_testanswer,
expected_val = str(get_overall_misclassifications_2_expected),
name = 'get_overall_misclassifications')
#No points misclassified by h3
#H misclassifies C
def get_overall_misclassifications_3_getargs() : #TEST 21
return [[("h1", .5),("h2", -.3),("h3", .76)], ['A','B','C'],
{'h1': ['A','C'], 'h2': ['A','B'], 'h3': []}]
get_overall_misclassifications_3_expected = set('C')
def get_overall_misclassifications_3_testanswer(val, original_val = None) :
return val == get_overall_misclassifications_3_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = get_overall_misclassifications_3_getargs,
testanswer = get_overall_misclassifications_3_testanswer,
expected_val = str(get_overall_misclassifications_3_expected),
name = 'get_overall_misclassifications')
#All negative voting powers
#H misclassifies A,B,D
def get_overall_misclassifications_4_getargs() : #TEST 22
return [[("h1", -.5),("h2", -.3),("h3", -.45)], ['A','B','C','D'],
{'h1': ['A','C'], 'h2': ['B','C'], 'h3': ['D']}]
get_overall_misclassifications_4_expected = set('ABD')
def get_overall_misclassifications_4_testanswer(val, original_val = None) :
return val == get_overall_misclassifications_4_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = get_overall_misclassifications_4_getargs,
testanswer = get_overall_misclassifications_4_testanswer,
expected_val = str(get_overall_misclassifications_4_expected),
name = 'get_overall_misclassifications')
#misclassified training point is not listed in misclassifications
#same classifier used multiple times
def get_overall_misclassifications_5_getargs() : #TEST 23
return [[("h1", -0.549),("h2", 0.347),("h1", -0.255)], list('ABCD'),
dict(h1=list('ABC'), h2=list('AC'), h3=list('BC'))]
get_overall_misclassifications_5_expected = set('D')
def get_overall_misclassifications_5_testanswer(val, original_val = None) :
return val == get_overall_misclassifications_5_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = get_overall_misclassifications_5_getargs,
testanswer = get_overall_misclassifications_5_testanswer,
expected_val = str(get_overall_misclassifications_5_expected) \
+' (Hint: What happens if a training point is misclassified by ' \
+'H, but not misclassified by any weak classifier?)',
name = 'get_overall_misclassifications')
#one point misclassified, vote is a tie
# (No, this particular situation would not happen in Adaboost.)
def get_overall_misclassifications_6_getargs() : #TEST 24
return [[("h1", 0.5), ("h2", 0.5)], ['A','B'],
{'h1': ['A'], 'h2': []}]
get_overall_misclassifications_6_expected = set('A')
def get_overall_misclassifications_6_testanswer(val, original_val = None) :
return val == get_overall_misclassifications_6_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = get_overall_misclassifications_6_getargs,
testanswer = get_overall_misclassifications_6_testanswer,
expected_val = str(get_overall_misclassifications_6_expected) \
+' (Hint: This test checks what happens when the vote is a tie.)',
name = 'get_overall_misclassifications')
#violates triangle sum property
def get_overall_misclassifications_7_getargs() : #TEST 25
return [[("h1", 0.5), ("h2", 0.2), ("h3", 0.2)], list('ABCD'),
{'h1': ['A'], 'h2': ['B'], 'h3': ['C']}]
get_overall_misclassifications_7_expected = set('A')
def get_overall_misclassifications_7_testanswer(val, original_val = None) :
return val == get_overall_misclassifications_7_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = get_overall_misclassifications_7_getargs,
testanswer = get_overall_misclassifications_7_testanswer,
expected_val = str(get_overall_misclassifications_7_expected) \
+" (Hint: Make sure you're summing voting powers, not just "
+'counting classifiers.)',
name = 'get_overall_misclassifications')
# recitation problem from 2012 Q4; all points correctly classified
def get_overall_misclassifications_8_getargs() : #TEST 26
H = [('<6', 0.693), ('<2', 0.549), ('>4', 0.805)]
classifier_to_misclassified = {'<6': ['C'], '<4': ['C', 'B', 'E'],
'<2': ['B', 'E'], '>2': ['A', 'C', 'D'],
'>4': ['A', 'D'], '>6': ['A', 'B', 'D', 'E']}
return [H, list('ABCDE'), classifier_to_misclassified]
get_overall_misclassifications_8_expected = set()
def get_overall_misclassifications_8_testanswer(val, original_val = None) :
return val == get_overall_misclassifications_8_expected
make_test(type = 'FUNCTION_ENCODED_ARGS',
getargs = get_overall_misclassifications_8_getargs,
testanswer = get_overall_misclassifications_8_testanswer,
expected_val = str(get_overall_misclassifications_8_expected),
name = 'get_overall_misclassifications')
#same classifier used multiple times
def get_overall_misclassifications_9_getargs() : #TEST 27
H = [('good_h', 0.1), ('bad_h1', 0.14), ('good_h', 0.1), ('bad_h2', 0.14),
('good_h', 0.1), ('bad_h3', 0.04)]
classifier_to_misclassified = {'good_h': ['A'], 'bad_h1': ['B', 'C'],
'bad_h2': ['C', 'D'], 'bad_h3': ['B', 'D']}
return [H, list('ABCD'), classifier_to_misclassified]
get_overall_misclassifications_9_expected = | |
import os
import numpy
import scipy
import scipy.optimize
from cryspy.A_functions_base.symmetry_elements import \
calc_asymmetric_unit_cell_indexes
from cryspy.A_functions_base.mempy import \
calc_mem_col, \
calc_mem_chi, \
calc_symm_elem_points_by_index_points, \
get_uniform_density_col, \
renormailize_density_col, \
save_spin_density_into_file,\
form_basins,\
calc_point_susceptibility, \
get_uniform_density_chi,\
renormailize_density_chi, \
calc_model_value_by_precalculated_data, \
calc_chi_atoms
from cryspy.A_functions_base.unit_cell import \
calc_volume_uc_by_unit_cell_parameters, \
calc_sthovl_by_unit_cell_parameters, \
calc_eq_ccs_by_unit_cell_parameters
from cryspy.A_functions_base.structure_factor import \
calc_f_nucl_by_dictionary
from cryspy.A_functions_base.flip_ratio import \
calc_iint, calc_flip_ratio_by_iint, \
calc_asymmetry_by_iint
from cryspy.A_functions_base.extinction import \
calc_extinction_sphere
from cryspy.A_functions_base.orbital_functions import \
calc_density_spherical
from cryspy.A_functions_base.matrix_operations import \
calc_vv_as_v1_v2_v1
from cryspy.A_functions_base.function_1_error_simplex import \
error_estimation_simplex
def mempy_reconstruction_by_dictionary(dict_crystal, dict_mem_parameters, l_dict_diffrn, dict_in_out,
parameter_lambda:float=1.e-5, iteration_max:int=1000, parameter_lambda_min:float=1.e-9, delta_density:float=1.e-5):
# **Input information about mem parameters**
print("*******************************************")
print("MEM reconstruction by CrysPy (module MEMPy)")
print("*******************************************\n")
print("MEM iteration parameters")
print("------------------------")
print(f" starting lambda parameter: {parameter_lambda*1e6:.3f}*10^-6")
print(f" maximal number of iterations: {iteration_max:}")
print(f" minimal lambda parameter: {parameter_lambda_min*1e6:}*10^-6")
print(f" delta_density: {delta_density*1e5:}*10^-5\n")
dict_in_out_keys = dict_in_out.keys()
print("Density reconstruction")
print("----------------------")
n_abc = dict_mem_parameters["points_abc"]
print(f"Unit cell is devided on points {n_abc[0]:} x {n_abc[1]:} x {n_abc[2]:}.")
channel_plus_minus = dict_mem_parameters["channel_plus_minus"]
channel_chi = dict_mem_parameters["channel_chi"]
if channel_plus_minus:
magnetization_plus = dict_mem_parameters["magnetization_plus"]
magnetization_minus = dict_mem_parameters["magnetization_minus"]
file_spin_density = dict_mem_parameters["file_spin_density"]
dict_in_out["magnetization_plus"] = magnetization_plus
dict_in_out["magnetization_minus"] = magnetization_minus
if channel_chi:
flag_uniform_prior_density = dict_mem_parameters["flag_uniform_prior_density"]
flag_only_magnetic_basins = dict_mem_parameters["flag_only_magnetic_basins"]
file_magnetization_density = dict_mem_parameters["file_magnetization_density"]
flag_asymmetry = dict_mem_parameters["flag_asymmetry"]
gof_desired = dict_mem_parameters["gof_desired"]
# **Input information about crystal**
unit_cell_parameters = dict_crystal["unit_cell_parameters"]
full_symm_elems = dict_crystal["full_symm_elems"]
volume_unit_cell = calc_volume_uc_by_unit_cell_parameters(unit_cell_parameters, flag_unit_cell_parameters=False)[0]
reduced_symm_elems = dict_crystal["reduced_symm_elems"]
centrosymmetry = dict_crystal["centrosymmetry"]
if centrosymmetry:
centrosymmetry_position = dict_crystal["centrosymmetry_position"]
else:
centrosymmetry_position = None
translation_elems = dict_crystal["translation_elems"]
atom_label = dict_crystal["atom_label"]
atom_fract_xyz = dict_crystal["atom_fract_xyz"]
atom_multiplicity = dict_crystal["atom_multiplicity"]
if channel_chi:
atom_para_label = dict_crystal["atom_para_label"]
atom_para_susceptibility = dict_crystal["atom_para_susceptibility"]
atom_para_sc_chi = dict_crystal["atom_para_sc_chi"]
# **Index in asymmetric unit cell**
print("Calculation of asymmetric unit cell...", end="\r")
index_auc, point_multiplicity = calc_asymmetric_unit_cell_indexes(n_abc, full_symm_elems)
symm_elem_auc = calc_symm_elem_points_by_index_points(index_auc, n_abc)
print(f"Number of points in asymmetric unit cell is {index_auc.shape[1]:}.", end="\n")
# **Basin devision**
if channel_chi and flag_only_magnetic_basins:
print("Devision of asymmetric unit cell on bassins...", end="\r")
flag_atom_para = numpy.any(numpy.expand_dims(atom_label, axis=1) == numpy.expand_dims(atom_para_label, axis=0), axis=1)
flag_chi, atom_label_auc_chi, atom_multiplicity_auc_chi, atom_distance_auc_chi, atom_symm_elems_auc_chi = \
form_basins(symm_elem_auc, full_symm_elems, unit_cell_parameters, atom_label[flag_atom_para],
atom_fract_xyz[:,flag_atom_para], atom_multiplicity[flag_atom_para], atom_para_label)
dict_in_out["atom_multiplicity_channel_chi"] = atom_multiplicity_auc_chi
print(f"Magnetic basins occupy entire unit cell. \n(flag_only_magnetic_basins: {flag_only_magnetic_basins:})\n")
elif channel_chi:
print("Devision of asymmetric unit cell on bassins...", end="\r")
flag_chi, atom_label_auc_chi, atom_multiplicity_auc_chi, atom_distance_auc_chi, atom_symm_elems_auc_chi = \
form_basins(symm_elem_auc, full_symm_elems, unit_cell_parameters, atom_label,
atom_fract_xyz, atom_multiplicity, atom_para_label)
dict_in_out["atom_multiplicity_channel_chi"] = atom_multiplicity_auc_chi
print(f"Magnetic basins occupy area around magnetic atoms. \n(flag_only_magnetic_basins: {flag_only_magnetic_basins:})\n")
if channel_chi:
index_auc_chi = index_auc[:, flag_chi]
point_multiplicity_chi = point_multiplicity[flag_chi]
dict_in_out["point_multiplicity_channel_chi"] = point_multiplicity_chi
symm_elem_auc_chi = symm_elem_auc[:, flag_chi]
dict_in_out["symm_elem_channel_chi"] = symm_elem_auc_chi
if channel_plus_minus and channel_chi:
flag_col = numpy.logical_not(flag_chi)
index_auc_col = index_auc[:, flag_col]
point_multiplicity_col = point_multiplicity[flag_col]
symm_elem_auc_col = symm_elem_auc[:, flag_col]
dict_in_out["point_multiplicity_channel_plus_minus"] = point_multiplicity_col
dict_in_out["symm_elem_channel_plus_minus"] = symm_elem_auc_col
elif channel_plus_minus:
index_auc_col = numpy.copy(index_auc)
point_multiplicity_col = numpy.copy(point_multiplicity)
symm_elem_auc_col = numpy.copy(symm_elem_auc)
dict_in_out["point_multiplicity_channel_plus_minus"] = point_multiplicity_col
dict_in_out["symm_elem_channel_plus_minus"] = symm_elem_auc_col
print(f"channel_plus_minus: {channel_plus_minus:}")
print(f"channel_chi: {channel_chi:}\n")
if channel_plus_minus:
print(f"Magnetization of unit cell: {magnetization_plus+magnetization_minus:.3f} mu_B")
print(f"(positive channel {magnetization_plus:.3f} mu_B, negative channel {magnetization_minus:.3f} mu_B)")
print(f"\nNumber of density points for channel_plus_minus is {index_auc_col.shape[1]}.")
if channel_chi:
print(f"Number of density points for channel_chi is {index_auc_chi.shape[1]}.")
# **Susceptibility tensor $(3\times 3)$ for each point in magnetic basin**
if channel_chi:
print("Calculation of restriction on susceptibility...", end="\r")
point_susceptibility = calc_point_susceptibility(
unit_cell_parameters, atom_symm_elems_auc_chi, atom_label_auc_chi,
atom_para_label, atom_para_susceptibility, atom_para_sc_chi, full_symm_elems, symm_elem_auc_chi)
dict_in_out["susceptibility_channel_chi"] = point_susceptibility
print(80*" ", end="\r")
# **Prior density**
number_unit_cell = numpy.prod(n_abc)
print("\nCalculation of prior density... ", end="\r")
if channel_chi:
if flag_uniform_prior_density:
density_chi_prior = get_uniform_density_chi(point_multiplicity_chi, atom_label_auc_chi, atom_multiplicity_auc_chi, volume_unit_cell, number_unit_cell)
print("Prior density in channel chi is uniform. ")
else:
density_chi_prior = numpy.zeros_like(atom_distance_auc_chi)
for label in atom_para_label:
flag_atom = atom_label_auc_chi==label
dict_shell = dict_crystal[f"shell_{label:}"]
kappa = float(dict_crystal["mag_atom_kappa"][dict_crystal["mag_atom_label"] == label])
den_atom = calc_density_spherical(
atom_distance_auc_chi[flag_atom], dict_shell["core_population"], dict_shell["core_coeff"], dict_shell["core_zeta"],
dict_shell["core_n"], kappa)
density_chi_prior[flag_atom] = den_atom
density_chi_prior = renormailize_density_chi(density_chi_prior, point_multiplicity_chi, atom_label_auc_chi, atom_multiplicity_auc_chi, volume_unit_cell, number_unit_cell)
print("Prior density in channel chi is core. ")
if channel_plus_minus:
density_col_prior = get_uniform_density_col(point_multiplicity_col, volume_unit_cell, number_unit_cell)
print("Prior density in channel plus-minus is uniform. ")
# **Input information about experiments**
flag_use_precalculated_data = False
l_exp_value_sigma = []
l_mem_chi, l_mem_col = [], []
print(f"Number of experiments is {len(l_dict_diffrn):}. ")
for dict_diffrn in l_dict_diffrn:
if "dict_in_out_"+dict_diffrn["type_name"] in dict_in_out_keys:
diffrn_dict_in_out = dict_in_out["dict_in_out_"+dict_diffrn["type_name"]]
else:
diffrn_dict_in_out = {}
dict_in_out["dict_in_out_"+dict_diffrn["type_name"]] = diffrn_dict_in_out
index_hkl = dict_diffrn["index_hkl"]
h_ccs = dict_diffrn["magnetic_field"]
eh_ccs = dict_diffrn["matrix_u"][6:]
print(f"Preliminary calculation for experiment {dict_diffrn['name']:}...", end="\r")
diffrn_dict_in_out["index_hkl"] = index_hkl
diffrn_dict_in_out_keys = diffrn_dict_in_out.keys()
if channel_plus_minus:
if "dict_in_out_col" in diffrn_dict_in_out_keys:
dict_in_out_col = diffrn_dict_in_out["dict_in_out_col"]
else:
dict_in_out_col = {}
diffrn_dict_in_out["dict_in_out_col"] = dict_in_out_col
mem_col = calc_mem_col(
index_hkl, unit_cell_parameters, eh_ccs, full_symm_elems, symm_elem_auc_col,
volume_unit_cell, number_unit_cell,
point_multiplicity=point_multiplicity_col,
dict_in_out=dict_in_out_col, flag_use_precalculated_data=flag_use_precalculated_data)
diffrn_dict_in_out["mem_col"] = mem_col
l_mem_col.append(mem_col)
if channel_chi:
if "dict_in_out_chi" in diffrn_dict_in_out_keys:
dict_in_out_chi = diffrn_dict_in_out["dict_in_out_chi"]
else:
dict_in_out_chi = {}
diffrn_dict_in_out["dict_in_out_chi"] = dict_in_out_chi
mem_chi = calc_mem_chi(
index_hkl, unit_cell_parameters, h_ccs, full_symm_elems, symm_elem_auc_chi,
point_susceptibility, volume_unit_cell, number_unit_cell,
point_multiplicity=point_multiplicity_chi,
dict_in_out=dict_in_out_chi, flag_use_precalculated_data=flag_use_precalculated_data)
diffrn_dict_in_out["mem_chi"] = mem_chi
l_mem_chi.append(mem_chi)
f_nucl, dder = calc_f_nucl_by_dictionary(
dict_crystal, diffrn_dict_in_out, flag_use_precalculated_data=flag_use_precalculated_data)
diffrn_dict_in_out["f_nucl"] = f_nucl
flip_ratio_es = dict_diffrn["flip_ratio_es"]
if flag_asymmetry:
asymmetry_e = (flip_ratio_es[0] -1.)/(flip_ratio_es[0] + 1.)
asymmetry_s = numpy.sqrt(2.)*flip_ratio_es[1] * numpy.sqrt(numpy.square(flip_ratio_es[0]) + 1.)/numpy.square(flip_ratio_es[0] + 1.)
asymmetry_es = numpy.stack([asymmetry_e, asymmetry_s], axis=0)
l_exp_value_sigma.append(asymmetry_es)
else:
l_exp_value_sigma.append(flip_ratio_es)
exp_value_sigma = numpy.concatenate(l_exp_value_sigma, axis=1)
if channel_plus_minus:
mem_col = numpy.concatenate(l_mem_col, axis=1)
if channel_chi:
mem_chi = numpy.concatenate(l_mem_chi, axis=1)
print(f"Total number of reflections is {exp_value_sigma.shape[1]: }. ")
if flag_asymmetry:
print("Density reconstruction is based on asymmetry parameters.")
else:
print("Density reconstruction is based on flip ratios. ")
# **Preaparation to MEM itertion procedure**
if channel_plus_minus:
density_col = numpy.copy(density_col_prior)
density_col_next = numpy.copy(density_col_prior)
if channel_chi:
density_chi = numpy.copy(density_chi_prior)
density_chi_next = numpy.copy(density_chi_prior)
# **MEM iteration**
print("\nMEM iteration procedure")
print("-----------------------")
print(f"Desired GoF is {gof_desired:.2f}.")
c_desired = gof_desired
c_previous = numpy.inf
if channel_plus_minus:
der_c_den_col_previous = numpy.zeros_like(density_col_prior)
if channel_chi:
der_c_den_chi_previous = numpy.zeros_like(density_chi_prior)
iteration = 0
flag_next = True
while flag_next:
iteration += 1
if channel_plus_minus:
density_col = numpy.copy(density_col_next)
if channel_chi:
density_chi = numpy.copy(density_chi_next)
l_model_value = []
l_der_model_den_pm, l_der_model_den_chi = [], []
for dict_diffrn in l_dict_diffrn:
diffrn_dict_in_out = dict_in_out["dict_in_out_"+dict_diffrn['type_name']]
index_hkl = diffrn_dict_in_out["index_hkl"]
f_m_perp = numpy.zeros(index_hkl.shape, dtype=complex)
if channel_plus_minus:
mem_col_exp = diffrn_dict_in_out["mem_col"]
hh = numpy.expand_dims(numpy.expand_dims(magnetization_plus * density_col[0] + magnetization_minus * density_col[1], axis=0), axis=1)
f_m_perp_col = (hh*mem_col_exp).sum(axis=2)
f_m_perp += f_m_perp_col
if channel_chi:
mem_chi_exp = diffrn_dict_in_out["mem_chi"]
f_m_perp_chi = (density_chi*mem_chi_exp).sum(axis=2)
f_m_perp += f_m_perp_chi
beam_polarization = dict_diffrn["beam_polarization"]
flipper_efficiency = dict_diffrn["flipper_efficiency"]
matrix_u = dict_diffrn["matrix_u"]
flip_ratio_es = dict_diffrn["flip_ratio_es"]
f_nucl = diffrn_dict_in_out["f_nucl"]
wavelength = dict_diffrn["wavelength"]
sthovl = calc_sthovl_by_unit_cell_parameters(index_hkl, unit_cell_parameters, flag_unit_cell_parameters=False)[0]
cos_2theta = numpy.cos(2*numpy.arcsin(sthovl*wavelength))
extinction_model = dict_diffrn["extinction_model"]
extinction_radius = dict_diffrn["extinction_radius"]
extinction_mosaicity = dict_diffrn["extinction_mosaicity"]
func_extinction = lambda f_sq, flag_f_sq: calc_extinction_sphere(
f_sq, extinction_radius, extinction_mosaicity, volume_unit_cell, cos_2theta, wavelength,
extinction_model, flag_f_sq=False, flag_radius=False,
flag_mosaicity=False,
flag_volume_unit_cell=False,
flag_cos_2theta=False,
flag_wavelength=False)
iint_plus, iint_minus, dder_plus, dder_minus = calc_iint(
beam_polarization, flipper_efficiency, f_nucl, f_m_perp, matrix_u, func_extinction = func_extinction,
flag_beam_polarization = False, flag_flipper_efficiency = False,
flag_f_nucl = False, flag_f_m_perp = True,
dict_in_out = dict_in_out, flag_use_precalculated_data = flag_use_precalculated_data)
diffrn_dict_in_out["flip_ratio"] = iint_plus/iint_minus
der_int_plus_fm_perp_real = dder_plus["f_m_perp_real"]
der_int_plus_fm_perp_imag = dder_plus["f_m_perp_imag"]
der_int_minus_fm_perp_real = dder_minus["f_m_perp_real"]
der_int_minus_fm_perp_imag = dder_minus["f_m_perp_imag"]
if flag_asymmetry:
model_exp, dder_model_exp = calc_asymmetry_by_iint(
iint_plus, iint_minus, c_lambda2=None, iint_2hkl=None,
flag_iint_plus=True, flag_iint_minus=True,
flag_c_lambda2=False, flag_iint_2hkl=False)
else:
model_exp, dder_model_exp = calc_flip_ratio_by_iint(
iint_plus, iint_minus, c_lambda2=None, iint_2hkl=None,
flag_iint_plus=True, flag_iint_minus=True,
flag_c_lambda2=False, flag_iint_2hkl=False)
l_model_value.append(model_exp)
der_model_int_plus = numpy.expand_dims(dder_model_exp["iint_plus"], axis=0)
der_model_int_minus = numpy.expand_dims(dder_model_exp["iint_minus"], axis=0)
if channel_plus_minus:
der_model_den_pm_exp = (
(mem_col_exp.real*numpy.expand_dims(
der_model_int_plus*der_int_plus_fm_perp_real +
der_model_int_minus*der_int_minus_fm_perp_real, axis=2)
).sum(axis=0) +
(mem_col_exp.imag*numpy.expand_dims(
der_model_int_plus*der_int_plus_fm_perp_imag +
der_model_int_minus*der_int_minus_fm_perp_imag, axis=2)
).sum(axis=0))
l_der_model_den_pm.append(der_model_den_pm_exp)
if channel_chi:
der_model_den_chi_exp = (
(mem_chi_exp.real*numpy.expand_dims(
der_model_int_plus*der_int_plus_fm_perp_real +
der_model_int_minus*der_int_minus_fm_perp_real, axis=2)
).sum(axis=0) +
(mem_chi_exp.imag*numpy.expand_dims(
der_model_int_plus*der_int_plus_fm_perp_imag +
der_model_int_minus*der_int_minus_fm_perp_imag, axis=2)
).sum(axis=0))
l_der_model_den_chi.append(der_model_den_chi_exp)
model_value = numpy.concatenate(l_model_value, axis=0)
diff_value = (exp_value_sigma[0]-model_value)/exp_value_sigma[1]
c = numpy.square(diff_value).sum(axis=0)/diff_value.shape[0]
if channel_plus_minus:
der_model_den_pm = numpy.concatenate(l_der_model_den_pm, axis=0)
der_c_den_pm = (-2.)/diff_value.shape[0] * (
numpy.expand_dims((diff_value/exp_value_sigma[1]),axis=1) *
der_model_den_pm).sum(axis=0)
der_c_den_col = numpy.stack([magnetization_plus * der_c_den_pm, magnetization_minus * der_c_den_pm], axis=0)
if channel_chi:
der_model_den_chi = numpy.concatenate(l_der_model_den_chi, axis=0)
der_c_den_chi = (-2.)/diff_value.shape[0] * (
numpy.expand_dims((diff_value/exp_value_sigma[1]),axis=1) *
der_model_den_chi).sum(axis=0)
if c > c_previous:
parameter_lambda = 0.5 * parameter_lambda
c = c_previous
if channel_plus_minus:
density_col = numpy.copy(density_col_previous)
der_c_den_col = der_c_den_col_previous
if channel_chi:
density_chi = numpy.copy(density_chi_previous)
der_c_den_chi = der_c_den_chi_previous
else:
c_previous = c
parameter_lambda = 1.03 * parameter_lambda
if channel_plus_minus:
density_col_previous = numpy.copy(density_col)
der_c_den_col_previous = der_c_den_col
if channel_chi:
density_chi_previous = numpy.copy(density_chi)
der_c_den_chi_previous = der_c_den_chi
print(f"Iteration {iteration:5}, lambda {parameter_lambda*1e6:.3f}*10^-6, chi_sq: {c:.2f} ", end='\r')
if channel_plus_minus:
coeff = (parameter_lambda*number_unit_cell/(c_desired*volume_unit_cell))/point_multiplicity_col
hh = (density_col+delta_density)*numpy.exp(-coeff*der_c_den_col)-delta_density
hh = numpy.where(hh>0, hh, 0)
density_col_next = renormailize_density_col(hh, point_multiplicity_col, volume_unit_cell, number_unit_cell)
if channel_chi:
coeff = (parameter_lambda*number_unit_cell/(c_desired*volume_unit_cell))*atom_multiplicity_auc_chi/point_multiplicity_chi
hh = (density_chi+delta_density)*numpy.exp(-coeff*der_c_den_chi)-delta_density
hh = numpy.where(hh>0, hh, 0)
density_chi_next = renormailize_density_chi(hh, point_multiplicity_chi, atom_label_auc_chi, atom_multiplicity_auc_chi, volume_unit_cell, number_unit_cell)
if iteration >= iteration_max:
flag_next = False
print(f"Maximal number of iteration is reached ({iteration:}). ", end='\n')
if parameter_lambda < parameter_lambda_min:
flag_next = False
print(f"Minimal value of parameter lambda {parameter_lambda*1e6:.3f}*10^-6 is reached at iteration {iteration:}. ", end='\n')
if c <= c_desired:
flag_next = False
print(f"Desired | |
from functools import reduce
from operator import __mul__
import pytest
from treevalue.tree import func_treelize, TreeValue, method_treelize, classmethod_treelize, delayed
# noinspection DuplicatedCode
@pytest.mark.unittest
class TestTreeFuncFunc:
def test_tree_value_type(self):
class _MyTreeValue(TreeValue):
pass
@func_treelize(return_type=_MyTreeValue)
def ssum(*args):
return sum(args)
t1 = TreeValue({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
t2 = TreeValue({'a': 11, 'b': 22, 'x': {'c': 33, 'd': 44}})
tr1 = ssum(t1, t2)
assert tr1 != TreeValue({'a': 12, 'b': 24, 'x': {'c': 36, 'd': 48}})
assert tr1 == _MyTreeValue({'a': 12, 'b': 24, 'x': {'c': 36, 'd': 48}})
assert isinstance(tr1, _MyTreeValue)
assert isinstance(tr1.x, _MyTreeValue)
@func_treelize(return_type=_MyTreeValue)
def ssum2(*args):
return sum(args), reduce(__mul__, args, 1)
tr2 = ssum2(t1, t2)
assert tr2 == _MyTreeValue({'a': (12, 11), 'b': (24, 44), 'x': {'c': (36, 99), 'd': (48, 176)}})
@func_treelize(return_type=_MyTreeValue, rise=True)
def ssum3(*args):
return sum(args), reduce(__mul__, args, 1)
tr3, tr4 = ssum3(t1, t2)
assert tr3 == _MyTreeValue({'a': 12, 'b': 24, 'x': {'c': 36, 'd': 48}})
assert tr4 == _MyTreeValue({'a': 11, 'b': 44, 'x': {'c': 99, 'd': 176}})
@func_treelize(return_type=_MyTreeValue, subside=True, rise=dict(template=(None, None)))
def ssum4(args):
return sum(args), reduce(__mul__, args, 1)
tr5, tr6 = ssum4([t1, t2])
assert tr5 == _MyTreeValue({'a': 12, 'b': 24, 'x': {'c': 36, 'd': 48}})
assert tr6 == _MyTreeValue({'a': 11, 'b': 44, 'x': {'c': 99, 'd': 176}})
@func_treelize()
def ssum5(a, b, c):
return a + b * c
t3 = TreeValue({'a': 31, 'b': 12, 'x': {'c': 43, 'd': 24}})
assert ssum5(1, c=3, b=5) == 16
assert ssum5(t2, c=t1, b=t3) == TreeValue({
'a': 42,
'b': 46,
'x': {
'c': 162,
'd': 140,
}
})
assert ssum5(t2, c=2, b=t3) == TreeValue({
'a': 73,
'b': 46,
'x': {
'c': 119,
'd': 92,
}
})
@func_treelize('outer', missing=lambda: 1)
def ssum6(a, b, c):
return a + b * c
t4 = TreeValue({'a': 31, 'b': 12, 'x': {'c': 43}})
with pytest.raises(KeyError):
ssum5(t2, c=2, b=t4)
assert ssum6(t2, c=2, b=t4) == TreeValue({
'a': 73,
'b': 46,
'x': {
'c': 119,
'd': 46,
}
})
@func_treelize('left')
def ssum7(a, b, c):
return a + b * c
with pytest.raises(KeyError):
ssum7(t2, c=2, b=t4)
@func_treelize(inherit=False)
def ssum8(a, b, c):
return a + b * c
with pytest.raises(TypeError):
ssum8(t2, c=2, b=t1)
def test_tree_value_type_none(self):
@func_treelize(return_type=None)
def ssum(*args):
return sum(args)
t1 = TreeValue({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
t2 = TreeValue({'a': 11, 'b': 22, 'x': {'c': 33, 'd': 44}})
tr1 = ssum(t1, t2)
assert tr1 is None
def test_tree_value_type_invalid(self):
class _MyTreeValue:
pass
with pytest.raises(TypeError):
# noinspection PyTypeChecker
@func_treelize(return_type=_MyTreeValue)
def ssum(*args):
return sum(args)
with pytest.raises(TypeError):
# noinspection PyTypeChecker
@func_treelize(return_type=233)
def ssum(*args):
return sum(args)
def test_method_treelize(self):
class TreeNumber(TreeValue):
@method_treelize()
def _attr_extern(self, key):
return getattr(self, key)
@method_treelize('outer', missing=0)
def __add__(self, other):
return self + other
@method_treelize('outer', missing=0)
def __radd__(self, other):
return other + self
@method_treelize('outer', missing=0)
def __sub__(self, other):
return self - other
@method_treelize('outer', missing=0)
def __rsub__(self, other):
return other - self
@method_treelize()
def __pos__(self):
return +self
@method_treelize()
def __neg__(self):
return -self
@method_treelize()
def __call__(self, *args, **kwargs):
return self(*args, **kwargs)
@method_treelize(return_type=TreeValue)
def damn_it(self, x):
return self + x
t1 = TreeNumber({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
t2 = TreeNumber({'a': 11, 'b': 22, 'x': {'c': 33, 'd': 5}})
assert (t1 + t2 + 1) == TreeNumber({'a': 13, 'b': 25, 'x': {'c': 37, 'd': 10}})
assert (t1 - t2) == TreeNumber({'a': -10, 'b': -20, 'x': {'c': -30, 'd': -1}})
assert (1 - t2) == TreeNumber({'a': -10, 'b': -21, 'x': {'c': -32, 'd': -4}})
assert t1.damn_it(2) == TreeValue({'a': 3, 'b': 4, 'x': {'c': 5, 'd': 6}})
class P:
def __init__(self, value):
self.__value = value
@property
def value(self):
return self.__value
def vv(self):
return self.__value + 1
ttt = TreeNumber({"a": P(1), "b": P(2), "x": {"c": P(3), "d": P(4)}})
assert ttt.value == TreeNumber({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
assert ttt.vv() == TreeNumber({'a': 2, 'b': 3, 'x': {'c': 4, 'd': 5}})
with pytest.warns(UserWarning):
class MyTreeValue(TreeValue):
@method_treelize(self_copy=True, rise=True)
def __iadd__(self, other):
return self + other
def test_classmethod_treelize(self):
class TestUtils:
@classmethod
@classmethod_treelize('outer', missing=0, return_type=TreeValue)
def add(cls, a, b):
return cls, a + b
@classmethod
@classmethod_treelize(return_type=TreeValue)
def add2(cls, a, b):
return cls, a + b
assert TestUtils.add(1, 2) == (TestUtils, 3)
assert TestUtils.add(TreeValue({'a': 1, 'b': 2}), 2) == TreeValue({'a': (TestUtils, 3), 'b': (TestUtils, 4)})
assert TestUtils.add2(TreeValue({'a': 1, 'b': 2}), TreeValue({'a': 12, 'b': 22})) == TreeValue(
{'a': (TestUtils, 13), 'b': (TestUtils, 24)})
class MyTreeValue(TreeValue):
@classmethod
@classmethod_treelize()
def plus(cls, x, y):
return x + y
assert MyTreeValue.plus(TreeValue({'a': 1, 'b': 2}), 2) == MyTreeValue({'a': 3, 'b': 4})
def test_missing(self):
@func_treelize(mode='outer', missing=lambda: [])
def append(arr: list, *args):
for item in args:
if item:
arr.append(item)
return arr
t0 = TreeValue({})
t1 = TreeValue({'a': 2, 'b': 7, 'x': {'c': 4, 'd': 9}})
t2 = TreeValue({'a': 4, 'b': 48, 'x': {'c': -11, 'd': 54}})
t3 = TreeValue({'a': 9, 'b': -12, 'x': {'c': 3, 'd': 4}})
assert append(t0, t1, t2, t3) == TreeValue({
'a': [2, 4, 9],
'b': [7, 48, -12],
'x': {
'c': [4, -11, 3],
'd': [9, 54, 4],
}
})
t0 = TreeValue({})
t1 = TreeValue({'a': 2, 'x': {'c': 4, 'd': 9}})
t2 = TreeValue({'a': 4, 'b': 48, 'x': {'d': 54}})
t3 = TreeValue({'b': -12, 'x': 7, 'y': {'e': 3, 'f': 4}})
assert append(t0, t1, t2, t3) == TreeValue({
'a': [2, 4],
'b': [48, -12],
'x': {
'c': [4, 7],
'd': [9, 54, 7],
},
'y': {
'e': [3],
'f': [4],
},
})
def test_delay_support(self):
@func_treelize(return_type=TreeValue)
def f(x, y, z):
return x + y * 2 + z * 3
t1 = TreeValue({
'a': 1,
'b': delayed(lambda x: x ** 2, 3),
'c': {'x': 2, 'y': delayed(lambda: 4)},
})
t2 = TreeValue({
'a': delayed(lambda x: x + 1, t1.a),
'b': delayed(lambda: t1.c.y),
'c': delayed(lambda: 5),
})
t3 = delayed(lambda: 6)
assert f(t1, t2, t3) == TreeValue({
'a': 23, 'b': 35,
'c': {'x': 30, 'y': 32},
})
t1 = TreeValue({
'a': 1,
'b': delayed(lambda x: x ** 2, 3),
'c': {'x': 2, 'y': delayed(lambda: 4)},
})
t2 = TreeValue({
'a': delayed(lambda x: x + 1, t1.a),
'b': delayed(lambda: t1.c.y),
'c': delayed(lambda: 5),
})
t3 = delayed(lambda: 6)
assert f(x=t1, y=t2, z=t3) == TreeValue({
'a': 23, 'b': 35,
'c': {'x': 30, 'y': 32},
})
def test_delayed_treelize(self):
t1 = TreeValue({
'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4},
})
t2 = TreeValue({
'a': 11, 'b': 23, 'x': {'c': 35, 'd': 47},
})
cnt_1 = 0
@func_treelize(delayed=True)
def total(a, b):
nonlocal cnt_1
cnt_1 += 1
return a + b
# positional
t3 = total(t1, t2)
assert cnt_1 == 0
assert t3.a == 12
assert cnt_1 == 1
assert t3.x == TreeValue({'c': 38, 'd': 51})
assert cnt_1 == 3
assert t3 == TreeValue({
'a': 12, 'b': 25, 'x': {'c': 38, 'd': 51}
})
assert cnt_1 == 4
# keyword
cnt_1 = 0
t3 = total(a=t1, b=t2)
assert cnt_1 == 0
assert t3.a == 12
assert cnt_1 == 1
assert t3.x == TreeValue({'c': 38, 'd': 51})
assert cnt_1 == 3
assert t3 == TreeValue({
'a': 12, 'b': 25, 'x': {'c': 38, 'd': 51}
})
assert cnt_1 == 4
# positional, with constant
cnt_1 = 0
t3 = total(1, t2)
assert cnt_1 == 0
assert t3.a == 12
assert cnt_1 == 1
assert t3.x == TreeValue({'c': 36, 'd': 48})
assert cnt_1 == 3
assert t3 == TreeValue({
'a': 12, 'b': 24, 'x': {'c': 36, 'd': 48}
})
assert cnt_1 == 4
# keyword, with constant
cnt_1 = 0
t3 = total(b=1, a=t2)
assert cnt_1 == 0
assert t3.a == 12
assert cnt_1 == 1
assert t3.x == TreeValue({'c': 36, 'd': 48})
assert cnt_1 == 3
assert t3 == TreeValue({
'a': 12, 'b': 24, 'x': {'c': 36, 'd': 48}
})
assert cnt_1 == 4
# positional, with delay
cnt_1 = 0
t4 = TreeValue({'v': delayed(lambda: t1)})
t5 = TreeValue({'v': delayed(lambda: t2)})
t6 = total(t4, t5)
assert cnt_1 == 0
assert t6.v.a == 12
assert cnt_1 == 1
assert t6.v.x == TreeValue({'c': 38, 'd': 51})
assert cnt_1 == 3
assert t6 == TreeValue({
'v': {'a': 12, 'b': 25, 'x': {'c': 38, 'd': 51}},
})
assert cnt_1 == 4
# | |
<filename>composer/utils/_time_conversion.py<gh_stars>0
# Copyright 2021 MosaicML. All Rights Reserved.
import textwrap
import warnings
from typing import Optional, Union
from composer.core import Time, TimeUnit
def convert(
time: Union[str, Time],
unit: Union[TimeUnit, str],
*,
steps_per_epoch: Optional[int] = None,
samples_per_epoch: Optional[int] = None,
dataset_num_tokens: Optional[int] = None,
max_training_duration: Optional[Union[str, Time]] = None,
) -> Time:
r"""Convert a :class:`Time` instance into the specified ``unit``.
Parameter ``unit`` is always required. The following table lists the additional required parameters
to perform the conversion:
+-----------------------------------------------------+-----------------------------+------------------------------+-----------------------------+-----------------------------------+-----------------------------+
| From Unit |:arrow_down:| \\ To Unit |:arrow_right:| | :attr:`~TimeUnit.EPOCH` | :attr:`~TimeUnit.BATCH` | :attr:`~TimeUnit.SAMPLE` | :attr:`~TimeUnit.TOKEN` | :attr:`~TimeUnit.DURATION` |
+-----------------------------------------------------+-----------------------------+------------------------------+-----------------------------+-----------------------------------+-----------------------------+
| :attr:`~TimeUnit.EPOCH` | No required parameters. | - ``steps_per_epoch`` | - ``samples_per_epoch`` | - ``dataset_num_tokens`` | - ``max_training_duration`` |
+-----------------------------------------------------+-----------------------------+------------------------------+-----------------------------+-----------------------------------+-----------------------------+
| :attr:`~TimeUnit.BATCH` | - ``steps_per_epoch`` | No required parameters. | - ``steps_per_epoch`` | Unsupported conversion. | - ``max_training_duration`` |
| | | | - ``samples_per_epoch`` | | |
+-----------------------------------------------------+-----------------------------+------------------------------+-----------------------------+-----------------------------------+-----------------------------+
| :attr:`~TimeUnit.SAMPLE` | - ``samples_per_epoch`` | - ``steps_per_epoch`` | No required parameters. | Unsupported conversion. | - ``max_training_duration`` |
| | | - ``samples_per_epoch`` | | | |
+-----------------------------------------------------+-----------------------------+------------------------------+-----------------------------+-----------------------------------+-----------------------------+
| :attr:`~TimeUnit.TOKEN` | - ``dataset_num_tokens`` | Unsupported conversion. | Unsupported conversion. | No required parameters. | - ``max_training_duration`` |
+-----------------------------------------------------+-----------------------------+------------------------------+-----------------------------+-----------------------------------+-----------------------------+
| :attr:`~TimeUnit.DURATION` | - ``max_training_duration`` | - ``max_training_duration`` | - ``max_training_duration`` | - ``max_training_duration`` | No required parameters. |
+-----------------------------------------------------+-----------------------------+------------------------------+-----------------------------+-----------------------------------+-----------------------------+
Args:
unit (Union[TimeUnit, str]): The desired unit to convert the time instance into.
steps_per_epoch (int, optional): The number of optimization steps per epoch.
samples_per_epoch (int, optional): The number of samples per epoch.
dataset_num_tokens (int, optional): The number of tokens in the dataset. Required only if
converting to or from :attr:`TimeUnit.TOKEN`.
max_training_duration (str or Time, optional): The total training duration. Required only
if converting to or from :attr:`TimeUnit.DURATION`.
Raises:
ValueError: If it is not possible to perform the conversion.
Returns:
Time: The time, in the specified ``unit``.
"""
warnings.warn(textwrap.dedent("""TimeDeprecationWarning: Time conversion is deprecated.
Instead, please use closed-loop calculations that depend on the current training progress
(available via state.timer) and the total training duration (available via state.max_duration)"""),
category=DeprecationWarning)
if isinstance(time, str):
time = Time.from_timestring(time)
unit = TimeUnit(unit)
if unit == time.unit:
# No conversion required
return Time(time.value, time.unit)
if unit == TimeUnit.DURATION or time.unit == TimeUnit.DURATION:
# if the desired unit is duration, then the logic is the same regardless of the from unit
if max_training_duration is None:
raise ValueError("max_training_duration is required to convert to or from DURATION")
if isinstance(max_training_duration, str):
max_training_duration = Time.from_timestring(max_training_duration)
max_training_duration_unit = max_training_duration.unit
if unit == TimeUnit.DURATION:
time_in_max_duration_unit = convert(time,
max_training_duration_unit,
steps_per_epoch=steps_per_epoch,
samples_per_epoch=samples_per_epoch,
dataset_num_tokens=dataset_num_tokens)
return _convert_to_duration(time_in_max_duration_unit, max_training_duration=max_training_duration)
else:
max_training_duration_in_units = convert(max_training_duration,
unit,
steps_per_epoch=steps_per_epoch,
samples_per_epoch=samples_per_epoch,
dataset_num_tokens=dataset_num_tokens)
converted_time = _convert_from_duration(time, max_training_duration=max_training_duration_in_units)
return converted_time
if time.unit == TimeUnit.EPOCH:
if unit == TimeUnit.BATCH:
if steps_per_epoch is None:
raise ValueError("steps_per_epoch is required to convert from EPOCH to BATCH")
return _convert_epoch_to_batch(time, steps_per_epoch=steps_per_epoch)
if unit == TimeUnit.SAMPLE:
if samples_per_epoch is None:
raise ValueError("samples_per_epoch is required to convert from EPOCH to SAMPLE")
return _convert_epoch_to_sample(time, samples_per_epoch=samples_per_epoch)
if unit == TimeUnit.TOKEN:
if dataset_num_tokens is None:
raise ValueError("dataset_num_tokens is required to convert from EPOCH to TOKEN")
return _convert_epoch_to_token(time, dataset_num_tokens=dataset_num_tokens)
if time.unit == TimeUnit.BATCH:
if unit == TimeUnit.EPOCH:
if steps_per_epoch is None:
raise ValueError("samples_per_epoch is required to convert from EPOCH to BATCH")
return _convert_batch_to_epoch(time, steps_per_epoch=steps_per_epoch)
if unit == TimeUnit.SAMPLE:
if steps_per_epoch is None:
raise ValueError("steps_per_epoch is required to convert from BATCH to SAMPLE")
if samples_per_epoch is None:
raise ValueError("samples_per_epoch is required to convert from BATCH to SAMPLE")
return _convert_batch_to_sample(time, steps_per_epoch=steps_per_epoch, samples_per_epoch=samples_per_epoch)
if time.unit == TimeUnit.SAMPLE:
if unit == TimeUnit.EPOCH:
if samples_per_epoch is None:
raise ValueError("samples_per_epoch is required to convert from SAMPLE to SAMPLE")
return _convert_sample_to_epoch(time, samples_per_epoch=samples_per_epoch)
if unit == TimeUnit.BATCH:
if samples_per_epoch is None:
raise ValueError("samples_per_epoch is required to convert from BATCH to SAMPLE")
if steps_per_epoch is None:
raise ValueError("steps_per_epoch is required to convert from BATCH to SAMPLE")
return _convert_sample_to_batch(time, steps_per_epoch=steps_per_epoch, samples_per_epoch=samples_per_epoch)
if time.unit == TimeUnit.TOKEN:
if unit == TimeUnit.EPOCH:
if dataset_num_tokens is None:
raise ValueError("dataset_num_tokens is required to convert from TOKEN to EPOCH")
return _convert_token_to_epoch(time, dataset_num_tokens=dataset_num_tokens)
raise ValueError(f"Unable to convert from {time.unit} to {unit}")
def _convert_epoch_to_batch(time: Time[int], *, steps_per_epoch: int) -> Time[int]:
"""Convert ``time`` into :attr:`TimeUnit.BATCH`. Requires that ``time.unit == TimeUnit.EPOCH``.
Args:
time (Time): The time
steps_per_epoch (int): The number of optimizations steps per epoch.
Raises:
RuntimeError: Raised if ``time.unit != TimeUnit.EPOCH``
Returns:
Time: The time, in :attr:`TimeUnit.BATCH`.
"""
if time.unit != TimeUnit.EPOCH:
raise RuntimeError(f"Time {time} units are not epochs.")
return Time(time.value * steps_per_epoch, TimeUnit.BATCH)
def _convert_epoch_to_sample(time: Time[int], *, samples_per_epoch: int) -> Time[int]:
"""Convert ``time`` into :attr:`TimeUnit.SAMPLE`. Requires that ``time.unit == TimeUnit.EPOCH``.
Args:
time (Time): The time
samples_per_epoch (int): The number of samples trained per epoch.
Raises:
RuntimeError: Raised if ``time.unit != TimeUnit.EPOCH``
Returns:
Time: The time, in :attr:`TimeUnit.SAMPLE`.
"""
if time.unit != TimeUnit.EPOCH:
raise RuntimeError(f"Time {time} units are not epochs.")
return Time(time.value * samples_per_epoch, TimeUnit.SAMPLE)
def _convert_epoch_to_token(time: Time[int], *, dataset_num_tokens: int) -> Time[int]:
"""Convert ``time`` into :attr:`TimeUnit.TOKEN`. Requires that ``time.unit == TimeUnit.EPOCH``.
.. note::
The conversion is valid only if the dataloader yields all batches (i.e. ``drop_last`` is ``False``).
Args:
time (Time): The time
dataset_num_tokens (int): The number of tokens in the dataset.
Raises:
RuntimeError: Raised if ``time.unit != TimeUnit.EPOCH``
Returns:
Time: The time, in :attr:`TimeUnit.TOKEN`.
"""
if time.unit != TimeUnit.EPOCH:
raise RuntimeError(f"Time {time} units are not epochs.")
return Time(time.value * dataset_num_tokens, TimeUnit.TOKEN)
def _convert_batch_to_epoch(time: Time[int], *, steps_per_epoch: int) -> Time[int]:
"""Convert ``time`` into :attr:`TimeUnit.EPOCH`. Requires that ``time.unit == TimeUnit.BATCH``.
Args:
time (Time): The time
steps_per_epoch (int): The optimization batch size.
Raises:
RuntimeError: Raised if ``time.unit != TimeUnit.BATCH``
Returns:
Time: The time, in :attr:`TimeUnit.EPOCH`.
"""
if time.unit != TimeUnit.BATCH:
raise RuntimeError(f"Time {time} units are not batches.")
return Time(time.value // steps_per_epoch, TimeUnit.EPOCH)
def _convert_batch_to_sample(
time: Time[int],
*,
steps_per_epoch: int,
samples_per_epoch: int,
) -> Time[int]:
"""Convert ``time`` into :attr:`TimeUnit.SAMPLE`. Requires that ``time.unit == TimeUnit.BATCH``.
Args:
time (Time): The time
steps_per_epoch (int): The number of optimization steps per epoch.
samples_per_epoch (int): The number of samples per epoch.
Raises:
RuntimeError: Raised if ``time.unit != TimeUnit.BATCH``
Returns:
Time: The time, in :attr:`TimeUnit.SAMPLE`.
"""
if time.unit != TimeUnit.BATCH:
raise RuntimeError(f"Time {time} units are not batches.")
if samples_per_epoch % steps_per_epoch != 0:
raise ValueError("Cannot determine the batch size as samples_per_epoch %% steps_per_epoch != 0")
batch_size = samples_per_epoch // steps_per_epoch
return Time(time.value * batch_size, TimeUnit.SAMPLE)
def _convert_sample_to_epoch(time: Time[int], *, samples_per_epoch: int) -> Time[int]:
"""Convert ``time`` into :attr:`TimeUnit.EPOCH`. Requires that ``time.unit == TimeUnit.SAMPLE``.
Args:
time (Time): The time
samples_per_epoch (int): The number of samples per epoch.
Raises:
RuntimeError: Raised if ``time.unit != TimeUnit.SAMPLE``
Returns:
Time: The time, in :attr:`TimeUnit.EPOCH`.
"""
if time.unit != TimeUnit.SAMPLE:
raise RuntimeError(f"Time {time} units are not samples.")
return Time(time.value // samples_per_epoch, TimeUnit.EPOCH)
def _convert_sample_to_batch(
time: Time[int],
*,
steps_per_epoch: int,
samples_per_epoch: int,
) -> Time[int]:
"""Convert ``time`` into :attr:`TimeUnit.BATCH`. Requires that ``time.unit == TimeUnit.SAMPLE``.
Args:
time (Time): The time
steps_per_epoch (int): The number of optimization steps per epoch.
samples_per_epoch (int): The number of samples per epoch.
Raises:
RuntimeError: Raised if ``time.unit != TimeUnit.SAMPLE``
Returns:
Time: The time, in :attr:`TimeUnit.BATCH`.
"""
if time.unit != TimeUnit.SAMPLE:
raise RuntimeError(f"Time {time} units are not samples.")
if samples_per_epoch % steps_per_epoch != 0:
raise ValueError("Cannot determine the batch size as samples_per_epoch %% steps_per_epoch != 0")
batch_size = samples_per_epoch // steps_per_epoch
return Time(time.value // batch_size, TimeUnit.BATCH)
def _convert_token_to_epoch(time: Time[int], *, dataset_num_tokens: int) -> Time[int]:
"""Convert ``time`` into :attr:`TimeUnit.EPOCH`. Requires that ``time.unit == TimeUnit.TOKEN``.
.. note::
The conversion is valid only if the dataloader yields all batches (i.e. ``drop_last`` == ``False``).
Args:
time (Time): The time
dataset_num_tokens (int): The number of tokens in the dataset.
Raises:
RuntimeError: Raised if ``time.unit != TimeUnit.TOKEN``
Returns:
Time: The time, in :attr:`TimeUnit.EPOCH`.
"""
if time.unit != TimeUnit.TOKEN:
raise RuntimeError(f"Time {time} units are not tokens.")
return Time(time.value // dataset_num_tokens, TimeUnit.EPOCH)
def _convert_to_duration(time: Time, *, max_training_duration: Union[str, Time[int]]) -> Time[float]:
"""Convert ``time`` into :attr:`TimeUnit.DURATION`.
Args:
time (Time): The time
max_training_duration (str or Time): The total training duration.
Returns:
Time: The time, in :attr:`TimeUnit.DURATION`.
"""
if time.unit == TimeUnit.DURATION:
return Time(time.value, time.unit)
if isinstance(max_training_duration, str):
max_training_duration = Time.from_timestring(max_training_duration)
return time / max_training_duration
def _convert_from_duration(time: Time[float], *, max_training_duration: Union[str, Time[int]]) -> Time:
"""Convert ``time`` | |
I1IiiI - iIii1I11I1II1
if ( OOo000 == LISP_DDT_ACTION_DELEGATION_HOLE ) :
lisp_send_negative_map_reply ( O0o00000o0O . lisp_sockets , OOoO . eid ,
OOoO . group , O0o00000o0O . nonce , O0o00000o0O . itr , O0o00000o0O . sport , 15 , None , False )
O0o00000o0O . dequeue_map_request ( )
if 1 - 1: O0 . Ii1I % Ii1I + II111iiii . oO0o
if 24 - 24: o0oOOo0O0Ooo . I1Ii111 % O0
if ( OOo000 == LISP_DDT_ACTION_NOT_AUTH ) :
if ( O0o00000o0O . tried_root ) :
lisp_send_negative_map_reply ( O0o00000o0O . lisp_sockets , OOoO . eid ,
OOoO . group , O0o00000o0O . nonce , O0o00000o0O . itr , O0o00000o0O . sport , 0 , None , False )
O0o00000o0O . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( O0o00000o0O , True )
if 67 - 67: I1IiiI * Ii1I
if 64 - 64: OOooOOo
if 90 - 90: iII111i . OoOoOO00 + i1IIi % ooOoO0o * I11i + OoooooooOO
if ( OOo000 == LISP_DDT_ACTION_MS_NOT_REG ) :
if ( OOoO . referral_set . has_key ( IiII1iiI ) ) :
ii = OOoO . referral_set [ IiII1iiI ]
ii . updown = False
if 2 - 2: o0oOOo0O0Ooo . II111iiii
if ( len ( OOoO . referral_set ) == 0 ) :
O0o00000o0O . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( O0o00000o0O , False )
if 9 - 9: I1Ii111 - II111iiii + OoOoOO00 . OoO0O00
if 33 - 33: Oo0Ooo
if 12 - 12: i11iIiiIii . Oo0Ooo / OoOoOO00 + iII111i . Ii1I + ooOoO0o
if ( OOo000 in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) :
if ( O0o00000o0O . eid . is_exact_match ( iiI . eid ) ) :
if ( not O0o00000o0O . tried_root ) :
lisp_send_ddt_map_request ( O0o00000o0O , True )
else :
lisp_send_negative_map_reply ( O0o00000o0O . lisp_sockets ,
OOoO . eid , OOoO . group , O0o00000o0O . nonce , O0o00000o0O . itr ,
O0o00000o0O . sport , 15 , None , False )
O0o00000o0O . dequeue_map_request ( )
if 66 - 66: IiII
else :
lisp_send_ddt_map_request ( O0o00000o0O , False )
if 41 - 41: II111iiii + Oo0Ooo / iII111i . IiII / iII111i / I1IiiI
if 78 - 78: o0oOOo0O0Ooo % OoOoOO00 . O0
if 41 - 41: iIii1I11I1II1 . OOooOOo - Oo0Ooo % OOooOOo
if ( OOo000 == LISP_DDT_ACTION_MS_ACK ) : O0o00000o0O . dequeue_map_request ( )
if 90 - 90: i11iIiiIii + OoooooooOO - i11iIiiIii + OoooooooOO
return
if 23 - 23: i11iIiiIii - IiII - I1ii11iIi11i + I1ii11iIi11i % I1IiiI
if 79 - 79: II111iiii / OoooooooOO
if 35 - 35: i1IIi + IiII + II111iiii % OOooOOo
if 25 - 25: I11i + i11iIiiIii + O0 - Ii1I
if 69 - 69: I11i . OoOoOO00 / OOooOOo / i1IIi . II111iiii
if 17 - 17: I1Ii111
if 2 - 2: O0 % OoOoOO00 + oO0o
if 24 - 24: iII111i + iII111i - OoooooooOO % OoooooooOO * O0
def lisp_process_ecm ( lisp_sockets , packet , source , ecm_port ) :
O0ooOOo0 = lisp_ecm ( 0 )
packet = O0ooOOo0 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode ECM packet" )
return
if 51 - 51: IiII
if 31 - 31: I11i - iIii1I11I1II1 * Ii1I + Ii1I
O0ooOOo0 . print_ecm ( )
if 10 - 10: OoOoOO00 - i11iIiiIii % iIii1I11I1II1 / ooOoO0o * i11iIiiIii - Ii1I
Ii1I1i1IiiI = lisp_control_header ( )
if ( Ii1I1i1IiiI . decode ( packet ) == None ) :
lprint ( "Could not decode control header" )
return
if 64 - 64: II111iiii . i11iIiiIii . iII111i . OOooOOo
if 95 - 95: O0 - OoOoOO00
o0OOo0OOo0Oo = Ii1I1i1IiiI . type
del ( Ii1I1i1IiiI )
if 51 - 51: oO0o . OoooooooOO + OOooOOo * I1ii11iIi11i - ooOoO0o
if ( o0OOo0OOo0Oo != LISP_MAP_REQUEST ) :
lprint ( "Received ECM without Map-Request inside" )
return
if 41 - 41: Oo0Ooo
if 46 - 46: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii . iII111i
if 66 - 66: oO0o % i1IIi % OoooooooOO
if 58 - 58: OOooOOo
if 89 - 89: iIii1I11I1II1 - i1IIi
I111iI1i = O0ooOOo0 . udp_sport
lisp_process_map_request ( lisp_sockets , packet , source , ecm_port ,
O0ooOOo0 . source , I111iI1i , O0ooOOo0 . ddt , - 1 )
return
if 83 - 83: I1Ii111 * II111iiii
if 28 - 28: I11i - Oo0Ooo + iIii1I11I1II1 + O0 * Ii1I + I1IiiI
if 13 - 13: iII111i
if 42 - 42: I1Ii111 - I1IiiI % I1IiiI * I1IiiI
if 70 - 70: O0 / I1IiiI / I1IiiI
if 71 - 71: OOooOOo - Oo0Ooo + IiII * oO0o
if 90 - 90: OoOoOO00 * I1ii11iIi11i
if 16 - 16: i1IIi - OoO0O00
if 61 - 61: o0oOOo0O0Ooo + OoOoOO00 - ooOoO0o + ooOoO0o % ooOoO0o % II111iiii
if 16 - 16: I1IiiI . Ii1I
def lisp_send_map_register ( lisp_sockets , packet , map_register , ms ) :
if 80 - 80: OOooOOo * O0 / iIii1I11I1II1 / IiII / OoOoOO00
if 15 - 15: I1ii11iIi11i * iII111i + i11iIiiIii
if 68 - 68: i1IIi / oO0o * I1ii11iIi11i - OoOoOO00 + Oo0Ooo / O0
if 1 - 1: ooOoO0o - Oo0Ooo + I1Ii111
if 90 - 90: I1Ii111 * O0 . iII111i - Oo0Ooo % iIii1I11I1II1
if 7 - 7: I1ii11iIi11i % o0oOOo0O0Ooo % O0 % iIii1I11I1II1
if 10 - 10: OoooooooOO - iII111i . i1IIi % oO0o . OoooooooOO + OOooOOo
oO0o0 = ms . map_server
if ( lisp_decent_push_configured and oO0o0 . is_multicast_address ( ) and
( ms . map_registers_multicast_sent == 1 or ms . map_registers_sent == 1 ) ) :
oO0o0 = copy . deepcopy ( oO0o0 )
oO0o0 . address = 0x7f000001
I11i1iIiiIiIi = bold ( "Bootstrap" , False )
i11ii = ms . map_server . print_address_no_iid ( )
lprint ( "{} mapping system for peer-group {}" . format ( I11i1iIiiIiIi , i11ii ) )
if 59 - 59: I1IiiI * OoooooooOO % OOooOOo / I11i
if 77 - 77: II111iiii - IiII % OOooOOo
if 22 - 22: OoooooooOO / oO0o
if 78 - 78: oO0o * I11i . i1IIi % i1IIi + i1IIi / OOooOOo
if 66 - 66: OoooooooOO % o0oOOo0O0Ooo / I11i * I1Ii111
if 12 - 12: I1Ii111
packet = lisp_compute_auth ( packet , map_register , ms . password )
if 17 - 17: I1Ii111 % oO0o + O0
if 15 - 15: o0oOOo0O0Ooo - OoooooooOO % ooOoO0o % oO0o / i11iIiiIii / Oo0Ooo
if 59 - 59: iII111i + O0 - I1ii11iIi11i * I1ii11iIi11i + iIii1I11I1II1
if 41 - 41: iIii1I11I1II1 . O0 - ooOoO0o / OoOoOO00 % iIii1I11I1II1 + IiII
if 23 - 23: OoOoOO00 + ooOoO0o . i11iIiiIii
if ( ms . ekey != None ) :
iI1IiI = ms . ekey . zfill ( 32 )
i1Oo = "0" * 8
o0o0oO0OOO = chacha . ChaCha ( iI1IiI , i1Oo ) . encrypt ( packet [ 4 : : ] )
packet = packet [ 0 : 4 ] + o0o0oO0OOO
oOo = bold ( "Encrypt" , False )
lprint ( "{} Map-Register with key-id {}" . format ( oOo , ms . ekey_id ) )
if 39 - 39: OoOoOO00 - I1ii11iIi11i / I1Ii111
if 48 - 48: IiII - oO0o + I11i % o0oOOo0O0Ooo
oOOOo = ""
if ( lisp_decent_pull_xtr_configured ( ) ) :
oOOOo = ", decent-index {}" . format ( bold ( ms . dns_name , False ) )
if 16 - 16: OoOoOO00 * iII111i . O0
if 60 - 60: IiII . I11i * Oo0Ooo . i1IIi
lprint ( "Send Map-Register to map-server {}{}{}" . format ( oO0o0 . print_address ( ) , ", ms-name '{}'" . format ( ms . ms_name ) , oOOOo ) )
| |
links:
if link.get('title', '') == 'Opendap':
opendap_url = link.get('href', None)
if not opendap_url:
continue
date_property = properties.get('date', None)
if date_property:
split_date = date_property.split('/')
# remove trailing symbols from times
start_time = datetime.strptime(split_date[0].split('.')[0].split('+')[0],
_TIMESTAMP_FORMAT)
end_time = datetime.strptime(split_date[1].split('.')[0].split('+')[0],
_TIMESTAMP_FORMAT)
else:
title = properties.get('title', None)
if title:
start_time, end_time = get_timestrings_from_string(title)
if start_time:
start_time = datetime.strptime(start_time, _TIMESTAMP_FORMAT)
if end_time:
end_time = datetime.strptime(end_time, _TIMESTAMP_FORMAT)
else:
end_time = start_time
if start_time:
start_time = pd.Timestamp(datetime.strftime(start_time, _TIMESTAMP_FORMAT))
end_time = pd.Timestamp(datetime.strftime(end_time, _TIMESTAMP_FORMAT))
features.append((start_time, end_time, opendap_url))
def get_time_ranges_from_data(self, dataset_name: str, start_time: str, end_time: str) -> \
List[Tuple[datetime, datetime]]:
return self._run_with_session(self._get_time_ranges_from_data,
dataset_name,
start_time,
end_time)
async def _get_time_ranges_from_data(self, session, dataset_name: str, start_time: str,
end_time: str) -> List[Tuple[datetime, datetime]]:
dataset_id = await self._get_dataset_id(session, dataset_name)
request = dict(parentIdentifier=dataset_id,
startDate=start_time,
endDate=end_time,
drsId=dataset_name,
fileFormat='.nc')
feature_list = await self._get_feature_list(session, request)
request_time_ranges = [feature[0:2] for feature in feature_list]
return request_time_ranges
def get_dataset_id(self, dataset_name: str) -> str:
return self._run_with_session(self._get_dataset_id, dataset_name)
async def _get_dataset_id(self, session, dataset_name: str) -> str:
await self._ensure_in_data_sources(session, [dataset_name])
return self._data_sources[dataset_name].get('uuid',
self._data_sources[dataset_name]['fid'])
async def _get_opendap_url(self, session, request: Dict):
request['fileFormat'] = '.nc'
# async with _FEATURE_LIST_LOCK:
feature_list = await self._get_feature_list(session, request)
if len(feature_list) == 0:
return
return feature_list[0][2]
def get_data_chunk(self, request: Dict, dim_indexes: Tuple) -> Optional[bytes]:
data_chunk = self._run_with_session(self._get_data_chunk, request, dim_indexes)
return data_chunk
async def _get_data_chunk(self, session, request: Dict, dim_indexes: Tuple) -> Optional[bytes]:
var_name = request['varNames'][0]
opendap_url = await self._get_opendap_url(session, request)
if not opendap_url:
return None
dataset = await self._get_opendap_dataset(session, opendap_url)
if not dataset:
return None
await self._ensure_all_info_in_data_sources(session, [request.get('drsId')])
data_type = self._data_sources[request['drsId']].get('variable_infos', {})\
.get(var_name, {}).get('data_type')
data = await self._get_data_from_opendap_dataset(dataset, session, var_name, dim_indexes)
if data is None:
return None
data = np.array(data, copy=False, dtype=data_type)
return data.flatten().tobytes()
async def _fetch_data_source_list_json(self, session, base_url, query_args,
max_wanted_results=100000) -> Dict:
def _extender(inner_catalogue: dict, feature_list: List[Dict]):
for fc in feature_list:
fc_props = fc.get("properties", {})
fc_id = fc_props.get("identifier", None)
if not fc_id:
continue
inner_catalogue[fc_id] = _get_feature_dict_from_feature(fc)
catalogue = {}
await self._fetch_opensearch_feature_list(session, base_url, catalogue, _extender,
query_args, max_wanted_results)
return catalogue
async def _fetch_opensearch_feature_list(self, session, base_url, extension, extender,
query_args, max_wanted_results=100000):
"""
Return JSON value read from Opensearch web service.
:return:
"""
start_page = 1
initial_maximum_records = min(1000, max_wanted_results)
maximum_records = 10000
total_results = await self._fetch_opensearch_feature_part_list(session, base_url,
query_args, start_page,
initial_maximum_records,
extension, extender,
None, None)
if total_results < initial_maximum_records or max_wanted_results < 1000:
return
# num_results = maximum_records
num_results = 0
extension.clear()
while num_results < total_results:
if 'startDate' in query_args and 'endDate' in query_args:
# we have to clear the extension of any previous values to avoid duplicate values
# extension.clear()
start_time = datetime.strptime(query_args.pop('startDate'), _TIMESTAMP_FORMAT)
end_time = datetime.strptime(query_args.pop('endDate'), _TIMESTAMP_FORMAT)
num_days_per_delta = \
max(1,
int(np.ceil((end_time - start_time).days /
(total_results / 1000))))
delta = relativedelta(days=num_days_per_delta, seconds=-1)
tasks = []
current_time = start_time
while current_time < end_time:
task_start = current_time.strftime(_TIMESTAMP_FORMAT)
current_time += delta
if current_time > end_time:
current_time = end_time
task_end = current_time.strftime(_TIMESTAMP_FORMAT)
tasks.append(self._fetch_opensearch_feature_part_list(session, base_url,
query_args, start_page,
maximum_records,
extension,
extender,
task_start, task_end))
await asyncio.gather(*tasks)
num_results = total_results
else:
tasks = []
# do not have more than 4 open connections at the same time
while len(tasks) < 4 and num_results < total_results:
tasks.append(self._fetch_opensearch_feature_part_list(session, base_url,
query_args, start_page,
maximum_records,
extension,
extender, None, None))
start_page += 1
num_results += maximum_records
await asyncio.gather(*tasks)
async def _fetch_opensearch_feature_part_list(
self, session, base_url, query_args, start_page, maximum_records,
extension, extender, start_date, end_date
) -> int:
paging_query_args = dict(query_args or {})
paging_query_args.update(startPage=start_page,
maximumRecords=maximum_records,
httpAccept='application/geo+json')
if start_date:
paging_query_args.update(startDate=start_date)
if end_date:
paging_query_args.update(endDate=end_date)
url = base_url + '?' + urllib.parse.urlencode(paging_query_args)
num_reattempts = start_page * 2
attempt = 0
while attempt < num_reattempts:
resp = await self.get_response(session, url)
if resp:
json_text = await resp.read()
json_dict = json.loads(json_text.decode('utf-8'))
if extender:
feature_list = json_dict.get("features", [])
extender(extension, feature_list)
return json_dict['totalResults']
attempt += 1
if 'startDate' in paging_query_args and \
'endDate' in paging_query_args:
_LOG.debug(f'Did not read page {start_page} with start date '
f'{paging_query_args["startDate"]} and '
f'end date {paging_query_args["endDate"]} at '
f'attempt # {attempt}')
else:
_LOG.debug(f'Did not read page {start_page} '
f'at attempt {attempt}')
time.sleep(4)
return 0
async def _set_variable_infos(self, opensearch_url: str, dataset_id: str,
dataset_name: str, session, data_source):
attributes = {}
dimensions = {}
variable_infos = {}
feature, time_dimension_size = \
await self._fetch_feature_and_num_nc_files_at(session,
opensearch_url,
dict(parentIdentifier=dataset_id,
drsId=dataset_name),
1)
# we need to do this to determine whether we are using the old
# or the new version of the odp
if 'uuid' not in data_source or data_source['uuid'] == data_source['fid']:
time_dimension_size = data_source['num_files']
if feature is not None:
variable_infos, attributes = \
await self._get_variable_infos_from_feature(feature, session)
for variable_info in variable_infos:
for index, dimension in enumerate(variable_infos[variable_info]['dimensions']):
if dimension not in dimensions:
dimensions[dimension] = variable_infos[variable_info]['shape'][index]
if 'time' in dimensions:
time_dimension_size *= dimensions['time']
data_source['dimensions'] = dimensions
data_source['variable_infos'] = variable_infos
data_source['attributes'] = attributes
data_source['time_dimension_size'] = time_dimension_size
async def _fetch_feature_and_num_nc_files_at(self, session, base_url, query_args, index) -> \
Tuple[Optional[Dict], int]:
paging_query_args = dict(query_args or {})
paging_query_args.update(startPage=index,
maximumRecords=2,
httpAccept='application/geo+json',
fileFormat='.nc')
url = base_url + '?' + urllib.parse.urlencode(paging_query_args)
resp = await self.get_response(session, url)
if resp:
json_text = await resp.read()
json_dict = json.loads(json_text.decode('utf-8'))
feature_list = json_dict.get("features", [])
# we try not to take the first feature, as the last and the first one may have
# different time chunkings
if len(feature_list) > 1:
return feature_list[1], json_dict.get("totalResults", 0)
elif len(feature_list) > 0:
return feature_list[0], json_dict.get("totalResults", 0)
return None, 0
async def _fetch_meta_info(self,
session,
datasource_id: str,
odd_url: str,
metadata_url: str) -> Dict:
meta_info_dict = {}
if odd_url:
meta_info_dict = await self._extract_metadata_from_odd_url(session, odd_url)
if metadata_url:
desc_metadata = await self._extract_metadata_from_descxml_url(session, metadata_url)
for item in desc_metadata:
if item not in meta_info_dict:
meta_info_dict[item] = desc_metadata[item]
await self._set_drs_metadata(session, datasource_id, meta_info_dict)
_harmonize_info_field_names(meta_info_dict, 'file_format', 'file_formats')
_harmonize_info_field_names(meta_info_dict, 'platform_id', 'platform_ids')
_harmonize_info_field_names(meta_info_dict, 'sensor_id', 'sensor_ids')
_harmonize_info_field_names(meta_info_dict, 'processing_level', 'processing_levels')
_harmonize_info_field_names(meta_info_dict, 'time_frequency', 'time_frequencies')
return meta_info_dict
async def _set_drs_metadata(self, session, datasource_id, metainfo_dict):
data_source_list = \
await self._fetch_data_source_list_json(session,
OPENSEARCH_CEDA_URL,
{'parentIdentifier': datasource_id},
max_wanted_results=20)
for data_source_key, data_source_value in data_source_list.items():
drs_id = data_source_value.get('title', 'All Files')
variables = data_source_value.get('variables', None)
uuid = data_source_value.get('uuid', None)
if drs_id != 'All Files':
if variables:
if 'variables' not in metainfo_dict:
metainfo_dict['variables'] = {}
metainfo_dict['variables'][drs_id] = variables
if uuid:
if 'uuids' not in metainfo_dict:
metainfo_dict['uuids'] = {}
metainfo_dict['uuids'][drs_id] = uuid
async def _extract_metadata_from_descxml_url(self, session, descxml_url: str = None) -> dict:
if not descxml_url:
return {}
resp = await self.get_response(session, descxml_url)
if resp:
descxml = etree.XML(await resp.read())
try:
return _extract_metadata_from_descxml(descxml)
except etree.ParseError:
_LOG.info(f'Cannot read metadata from {descxml_url} due to parsing error.')
return {}
async def _extract_metadata_from_odd_url(self, session: aiohttp.ClientSession,
odd_url: str = None) -> dict:
if not odd_url:
return {}
resp = await self.get_response(session, odd_url)
if not resp:
return {}
xml_text = await resp.read()
return _extract_metadata_from_odd(etree.XML(xml_text))
def _determine_fill_value(self, dtype):
if np.issubdtype(dtype, np.integer):
return np.iinfo(dtype).max
if np.issubdtype(dtype, np.inexact):
return np.nan
async def _get_variable_infos_from_feature(self, feature: dict, session) -> (dict, dict):
feature_info = _extract_feature_info(feature)
opendap_url = f"{feature_info[4]['Opendap']}"
dataset = await self._get_opendap_dataset(session, opendap_url)
if not dataset:
_LOG.warning(f'Could not extract information about variables and attributes '
f'from {opendap_url}')
return {}, {}
variable_infos = {}
for key in dataset.keys():
fixed_key = key.replace('%2E', '_').replace('.', '_')
data_type = dataset[key].dtype.name
variable_infos[fixed_key] = copy.deepcopy(dataset[key].attributes)
variable_infos[fixed_key]['orig_data_type'] = data_type
if '_FillValue' in variable_infos[fixed_key]:
variable_infos[fixed_key]['fill_value'] = variable_infos[fixed_key]['_FillValue']
variable_infos[fixed_key].pop('_FillValue')
else:
if data_type in _DTYPES_TO_DTYPES_WITH_MORE_BYTES:
data_type = _DTYPES_TO_DTYPES_WITH_MORE_BYTES[data_type]
variable_infos[fixed_key]['fill_value'] = \
self._determine_fill_value(np.dtype(data_type))
else:
warnings.warn(f'Variable "{fixed_key}" has no fill value, '
f'cannot set one. For parts where no data is'
f'available you will see random values. This'
f'is usually the case when data is missing '
f'for a time step.',
category=CciOdpWarning)
if '_ChunkSizes' in variable_infos[fixed_key]:
variable_infos[fixed_key]['chunk_sizes'] = variable_infos[fixed_key]['_ChunkSizes']
if type(variable_infos[fixed_key]['chunk_sizes']) == int:
variable_infos[fixed_key]['file_chunk_sizes'] = \
variable_infos[fixed_key]['chunk_sizes']
else:
variable_infos[fixed_key]['file_chunk_sizes'] = \
copy.deepcopy(variable_infos[fixed_key]['chunk_sizes'])
variable_infos[fixed_key].pop('_ChunkSizes')
variable_infos[fixed_key]['data_type'] = data_type
variable_infos[fixed_key]['dimensions'] = list(dataset[key].dimensions)
variable_infos[fixed_key]['file_dimensions'] = \
copy.deepcopy(variable_infos[fixed_key]['dimensions'])
variable_infos[fixed_key]['size'] = dataset[key].size
variable_infos[fixed_key]['shape'] = list(dataset[key].shape)
return variable_infos, dataset.attributes
def get_opendap_dataset(self, url: str):
return self._run_with_session(self._get_opendap_dataset, url)
async def _get_result_dict(self, session, url: str):
if url in self._result_dicts:
return self._result_dicts[url]
tasks = []
res_dict = {}
tasks.append(self._get_content_from_opendap_url(url, 'dds', res_dict, session))
tasks.append(self._get_content_from_opendap_url(url, 'das', res_dict, session))
await asyncio.gather(*tasks)
if 'das' in res_dict:
res_dict['das'] = res_dict['das'].replace(' Float32 valid_min -Infinity;\n', '')
res_dict['das'] = res_dict['das'].replace(' Float32 valid_max Infinity;\n', '')
self._result_dicts[url] = res_dict
return res_dict
async def _get_opendap_dataset(self, session, url: str):
res_dict = await self._get_result_dict(session, url)
if 'dds' not in res_dict or 'das' not in res_dict:
_LOG.warning('Could not open opendap url. No dds or das file provided.')
return
if res_dict['dds'] == '':
_LOG.warning('Could not open opendap | |
import copy
import datetime
import re
from wikibaseintegrator.wbi_config import config
from wikibaseintegrator.wbi_jsonparser import JsonParser
class BaseDataType:
"""
The base class for all Wikibase data types, they inherit from it
"""
DTYPE = 'base-data-type'
sparql_query = '''
SELECT * WHERE {{
?item_id <{wb_url}/prop/{pid}> ?s .
?s <{wb_url}/prop/statement/{pid}> '{value}' .
}}
'''
def __init__(self, value, prop_nr, **kwargs):
"""
Constructor, will be called by all data types.
:param value: Data value of the Wikibase data snak
:type value: str or int or tuple
:param prop_nr: The property number a Wikibase snak belongs to
:type prop_nr: A string with a prefixed 'P' and several digits e.g. 'P715' (Drugbank ID) or an int
:param data_type: The Wikibase data type declaration of this snak
:type data_type: str
:param snak_type: The snak type of the Wikibase data snak, three values possible, depending if the value is a known (value), not existent (novalue) or
unknown (somevalue). See Wikibase documentation.
:type snak_type: a str of either 'value', 'novalue' or 'somevalue'
:param references: A one level nested list with reference Wikibase snaks of base type BaseDataType,
e.g. references=[[<BaseDataType>, <BaseDataType>], [<BaseDataType>]]
This will create two references, the first one with two statements, the second with one
:type references: A one level nested list with instances of BaseDataType or children of it.
:param qualifiers: A list of qualifiers for the Wikibase mainsnak
:type qualifiers: A list with instances of BaseDataType or children of it.
:param is_reference: States if the snak is a reference, mutually exclusive with qualifier
:type is_reference: boolean
:param is_qualifier: States if the snak is a qualifier, mutually exlcusive with reference
:type is_qualifier: boolean
:param rank: The rank of a Wikibase mainsnak, should determine the status of a value
:type rank: A string of one of three allowed values: 'normal', 'deprecated', 'preferred'
:param check_qualifier_equality: When comparing two objects, test if qualifiers are equals between them. Default to true.
:type check_qualifier_equality: boolean
:param if_exists: Replace or append the statement. You can force an append if the statement already exists.
:type if_exists: A string of one of three allowed values: 'REPLACE', 'APPEND', 'FORCE_APPEND', 'KEEP'
:return:
"""
self.value = value
self.data_type = kwargs.pop('data_type', self.DTYPE)
self.snak_type = kwargs.pop('snak_type', 'value')
self.references = kwargs.pop('references', None)
self.qualifiers = kwargs.pop('qualifiers', None)
self.is_reference = kwargs.pop('is_reference', None)
self.is_qualifier = kwargs.pop('is_qualifier', None)
self.rank = kwargs.pop('rank', 'normal')
self.check_qualifier_equality = kwargs.pop('check_qualifier_equality', True)
self.if_exists = kwargs.pop('if_exists', 'REPLACE')
self._statement_ref_mode = 'KEEP_GOOD'
if not self.references:
self.references = []
else:
if isinstance(self.references, BaseDataType):
self.references = [[self.references]]
for ref_list in self.references:
if isinstance(ref_list, BaseDataType):
ref_list = [ref_list]
for reference in ref_list:
if not isinstance(reference, BaseDataType):
raise ValueError('A reference must be an instance of class BaseDataType.')
if reference.is_reference is False:
raise ValueError('A reference can\'t be declared as is_reference=False')
elif reference.is_reference is None:
reference.is_reference = True
if not self.qualifiers:
self.qualifiers = []
else:
if isinstance(self.qualifiers, BaseDataType):
self.qualifiers = [self.qualifiers]
for qualifier in self.qualifiers:
if not isinstance(qualifier, BaseDataType):
raise ValueError('A qualifier must be an instance of class BaseDataType.')
if qualifier.is_qualifier is False:
raise ValueError('A qualifier can\'t be declared as is_qualifier=False')
elif qualifier.is_qualifier is None:
qualifier.is_qualifier = True
if isinstance(prop_nr, int):
self.prop_nr = 'P' + str(prop_nr)
else:
pattern = re.compile(r'^P?([0-9]+)$')
matches = pattern.match(prop_nr)
if not matches:
raise ValueError(f'Invalid prop_nr, format must be "P[0-9]+", got {prop_nr}')
else:
self.prop_nr = 'P' + str(matches.group(1))
# Internal ID and hash are issued by the Wikibase instance
self.id = ''
self.hash = ''
self.json_representation = {
'snaktype': self.snak_type,
'property': self.prop_nr,
'datavalue': {},
'datatype': self.data_type
}
if self.snak_type not in ['value', 'novalue', 'somevalue']:
raise ValueError(f'{self.snak_type} is not a valid snak type')
if self.if_exists not in ['REPLACE', 'APPEND', 'FORCE_APPEND', 'KEEP']:
raise ValueError(f'{self.if_exists} is not a valid if_exists value')
if self.value is None and self.snak_type == 'value':
raise ValueError('Parameter \'value\' can\'t be \'None\' if \'snak_type\' is \'value\'')
if self.is_qualifier and self.is_reference:
raise ValueError('A claim cannot be a reference and a qualifer at the same time')
if (len(self.references) > 0 or len(self.qualifiers) > 0) and (self.is_qualifier or self.is_reference):
raise ValueError('Qualifiers or references cannot have references or qualifiers')
def has_equal_qualifiers(self, other):
# check if the qualifiers are equal with the 'other' object
equal_qualifiers = True
self_qualifiers = copy.deepcopy(self.get_qualifiers())
other_qualifiers = copy.deepcopy(other.get_qualifiers())
if len(self_qualifiers) != len(other_qualifiers):
equal_qualifiers = False
else:
flg = [False for _ in range(len(self_qualifiers))]
for count, i in enumerate(self_qualifiers):
for q in other_qualifiers:
if i == q:
flg[count] = True
if not all(flg):
equal_qualifiers = False
return equal_qualifiers
def __eq__(self, other):
equal_qualifiers = self.has_equal_qualifiers(other)
equal_values = self.get_value() == other.get_value() and self.get_prop_nr() == other.get_prop_nr()
if not (self.check_qualifier_equality and other.check_qualifier_equality) and equal_values:
return True
elif equal_values and equal_qualifiers:
return True
else:
return False
@property
def statement_ref_mode(self):
return self._statement_ref_mode
@statement_ref_mode.setter
def statement_ref_mode(self, value):
"""Set the reference mode for a statement, always overrides the global reference state."""
valid_values = ['STRICT_KEEP', 'STRICT_KEEP_APPEND', 'STRICT_OVERWRITE', 'KEEP_GOOD', 'CUSTOM']
if value not in valid_values:
raise ValueError('Not an allowed reference mode, allowed values {}'.format(' '.join(valid_values)))
self._statement_ref_mode = value
def get_value(self):
return self.value
def get_sparql_value(self):
return self.value
def set_value(self, value):
if value is None and self.snak_type not in {'novalue', 'somevalue'}:
raise ValueError("If 'value' is None, snak_type must be novalue or somevalue")
if self.snak_type in {'novalue', 'somevalue'}:
del self.json_representation['datavalue']
elif 'datavalue' not in self.json_representation:
self.json_representation['datavalue'] = {}
self.value = value
def get_references(self):
return self.references
def set_references(self, references):
if len(references) > 0 and (self.is_qualifier or self.is_reference):
raise ValueError("Qualifiers or references cannot have references")
# Force clean duplicate references
temp_references = []
for reference in references:
if reference not in temp_references:
temp_references.append(reference)
references = temp_references
self.references = references
def get_qualifiers(self):
return self.qualifiers
def set_qualifiers(self, qualifiers):
# TODO: introduce a check to prevent duplicate qualifiers, those are not allowed in Wikibase
if len(qualifiers) > 0 and (self.is_qualifier or self.is_reference):
raise ValueError("Qualifiers or references cannot have qualifiers")
self.qualifiers = qualifiers
def get_rank(self):
if self.is_qualifier or self.is_reference:
return ''
else:
return self.rank
def set_rank(self, rank):
if self.is_qualifier or self.is_reference:
raise ValueError("References or qualifiers do not have ranks")
valid_ranks = ['normal', 'deprecated', 'preferred']
if rank not in valid_ranks:
raise ValueError(f"{rank} not a valid rank")
self.rank = rank
def get_id(self):
return self.id
def set_id(self, claim_id):
self.id = claim_id
def set_hash(self, claim_hash):
self.hash = claim_hash
def get_hash(self):
return self.hash
def get_prop_nr(self):
return self.prop_nr
def set_prop_nr(self, prop_nr):
if prop_nr[0] != 'P':
raise ValueError("Invalid property number")
self.prop_nr = prop_nr
def get_json_representation(self):
if self.is_qualifier or self.is_reference:
tmp_json = {
self.prop_nr: [self.json_representation]
}
if self.hash != '' and self.is_qualifier:
self.json_representation.update({'hash': self.hash})
return tmp_json
else:
ref_json = []
for count, ref in enumerate(self.references):
snaks_order = []
snaks = {}
ref_json.append({
'snaks': snaks,
'snaks-order': snaks_order
})
for sub_ref in ref:
prop_nr = sub_ref.get_prop_nr()
# set the hash for the reference block
if sub_ref.get_hash() != '':
ref_json[count].update({'hash': sub_ref.get_hash()})
tmp_json = sub_ref.get_json_representation()
# if more reference values with the same property number, append to its specific property list.
if prop_nr in snaks:
snaks[prop_nr].append(tmp_json[prop_nr][0])
else:
snaks.update(tmp_json)
snaks_order.append(prop_nr)
qual_json = {}
qualifiers_order = []
for qual in self.qualifiers:
prop_nr = qual.get_prop_nr()
if prop_nr in qual_json:
qual_json[prop_nr].append(qual.get_json_representation()[prop_nr][0])
else:
qual_json.update(qual.get_json_representation())
qualifiers_order.append(qual.get_prop_nr())
if hasattr(self, 'remove'):
statement = {
'remove': ''
}
else:
statement = {
'mainsnak': self.json_representation,
'type': 'statement',
'rank': self.rank
}
if qual_json:
statement['qualifiers'] = qual_json
if qualifiers_order:
statement['qualifiers-order'] = qualifiers_order
if ref_json:
statement['references'] = ref_json
if self.id != '':
statement.update({'id': self.id})
return statement
@classmethod
@JsonParser
def from_json(cls, json_representation):
pass
def equals(self, that, include_ref=False, fref=None):
"""
Tests for equality of two statements.
If comparing references, the order of the arguments matters!!!
self is the current statement, the next argument is the new statement.
Allows passing in a function to use to compare the references 'fref'. Default is equality.
fref accepts two arguments 'oldrefs' and 'newrefs', each of which are a list of references,
where each reference is a list of statements
"""
if not include_ref:
# return the result of BaseDataType.__eq__, which is testing for equality of value and qualifiers
return self == that
else:
if self != that:
return False
if fref is None:
return BaseDataType.refs_equal(self, that)
else:
return fref(self, that)
@staticmethod
def refs_equal(olditem, newitem):
"""
| |
<filename>fcts/servers.py
import typing
from utils import Zbot, MyContext
import time
import emoji
import copy
import discord
from discord.ext import commands
from math import ceil
roles_options = ["clear","slowmode","mute","kick","ban","warn","say","welcome_roles","muted_role",'partner_role','update_mentions','verification_role','voice_roles']
bool_options = ["enable_xp","anti_caps_lock","enable_fun","help_in_dm","compress_help"]
textchan_options = ["welcome_channel","bot_news","poll_channels","modlogs_channel","noxp_channels","partner_channel"]
vocchan_options = ["membercounter","voice_channel"]
category_options = ["voice_category"]
text_options = ["welcome","leave","levelup_msg","description","voice_channel_format"]
prefix_options = ['prefix']
emoji_option = ['vote_emojis', 'morpion_emojis']
numb_options = []
raid_options = ["anti_raid"]
xp_type_options = ['xp_type']
color_options = ['partner_color']
xp_rate_option = ['xp_rate']
levelup_channel_option = ["levelup_channel"]
ttt_display_option = ["ttt_display"]
class Servers(commands.Cog):
""""Cog in charge of all the bot configuration management for your server. As soon as an option is searched, modified or deleted, this cog will handle the operations."""
def __init__(self, bot: Zbot):
self.bot = bot
self.default_language = 'en'
self.embed_color = discord.Colour(0x3fb9ef)
self.log_color = 1793969
self.file = "servers"
self.raids_levels = ["None","Smooth","Careful","High","(╯°□°)╯︵ ┻━┻"]
self.table = 'servers_beta' if bot.beta else 'servers'
self.default_opt = {"rr_max_number":7,
"rss_max_number":10,
"roles_react_max_number":20,
"language":1,
"description":"",
"clear":"",
"slowmode":"",
"mute":"",
"kick":"",
"ban":"",
"warn":"",
"say":"",
"hunter":"",
"welcome_channel":'',
"welcome":"",
"leave":"",
"welcome_roles":"",
"bot_news":'',
"save_roles":0,
"poll_channels":"",
"modlogs_channel":"",
"enable_xp":0,
"levelup_msg":'',
"levelup_channel":'any',
"noxp_channels":'',
"xp_rate":1.0,
"xp_type":0,
"anti_caps_lock":0,
"enable_fun":1,
"prefix":'!',
"membercounter":"",
"anti_raid":0,
"vote_emojis":":thumbsup:;:thumbsdown:;",
"morpion_emojis":":red_circle:;:blue_circle:;",
"help_in_dm":0,
"muted_role":"",
"partner_channel":'',
"partner_color":10949630,
'partner_role':'',
'update_mentions':'',
'verification_role':'',
'voice_roles':'',
'voice_channel':'',
'voice_category':'',
'voice_channel_format':'{random}',
'compress_help':0,
'ttt_display': 2}
self.optionsList = ["prefix","language","description","clear","slowmode","mute","kick","ban","warn","say","welcome_channel","welcome","leave","welcome_roles","bot_news","update_mentions","poll_channels","partner_channel","partner_color","partner_role","modlogs_channel","verification_role","enable_xp","levelup_msg","levelup_channel","noxp_channels","xp_rate","xp_type","anti_caps_lock","enable_fun","membercounter","anti_raid","vote_emojis","morpion_emojis","help_in_dm","compress_help","muted_role","voice_roles","voice_channel","voice_category","voice_channel_format","ttt_display"]
self.membercounter_pending = {}
@commands.Cog.listener()
async def on_ready(self):
self.table = 'servers_beta' if self.bot.beta else 'servers'
async def get_bot_infos(self, botID: int):
"""Return every options of the bot"""
if not self.bot.database_online:
return list()
cnx = self.bot.cnx_frm
cursor = cnx.cursor(dictionary=True)
query = ("SELECT * FROM `bot_infos` WHERE `ID`={}".format(botID))
cursor.execute(query)
liste = list()
for x in cursor:
liste.append(x)
return liste
async def edit_bot_infos(self, botID: int, values=[()]):
if type(values)!=list:
raise ValueError
v = list()
cnx = self.bot.cnx_frm
cursor = cnx.cursor()
for x in values:
if isinstance(x, bool):
v.append("`{x[0]}`={x[1]}".format(x=x))
else:
v.append("""`{x[0]}`="{x[1]}" """.format(x=x))
query = ("UPDATE `bot_infos` SET {v} WHERE `ID`='{id}'".format(v=",".join(v),id=botID))
cursor.execute(query)
cnx.commit()
cursor.close()
return True
async def get_languages(self, ignored_guilds: typing.List[int], return_dict: bool = False):
"""Return stats on used languages"""
if not self.bot.database_online:
return list()
cnx = self.bot.cnx_frm
cursor = cnx.cursor(dictionary=True)
query = ("SELECT `language`,`ID` FROM `{}`".format(self.table))
cursor.execute(query)
liste = list()
guilds = [x.id for x in self.bot.guilds if x.id not in ignored_guilds]
for x in cursor:
if x['ID'] in guilds:
liste.append(x['language'])
for _ in range(len(guilds)-len(liste)):
liste.append(self.bot.get_cog('Languages').languages.index(self.default_language))
if return_dict:
langs = dict()
for e, l in enumerate(self.bot.get_cog('Languages').languages):
langs[l] = liste.count(e)
else:
langs = list()
for e, l in enumerate(self.bot.get_cog('Languages').languages):
langs.append((l, liste.count(e)))
return langs
async def get_xp_types(self, ignored_guilds: typing.List[int], return_dict: bool = False):
"""Return stats on used xp types"""
if not self.bot.database_online:
return list()
cnx = self.bot.cnx_frm
cursor = cnx.cursor(dictionary=True)
query = ("SELECT `xp_type`,`ID` FROM `{}`".format(self.table))
cursor.execute(query)
liste = list()
guilds = [x.id for x in self.bot.guilds if x.id not in ignored_guilds]
for x in cursor:
if x['ID'] in guilds:
liste.append(x['xp_type'])
for _ in range(len(guilds)-len(liste)):
liste.append(self.default_opt['xp_type'])
if return_dict:
types = dict()
for e, l in enumerate(self.bot.get_cog('Xp').types):
types[l] = liste.count(e)
else:
types = list()
for e, l in enumerate(self.bot.get_cog('Xp').types):
types.append((l, liste.count(e)))
return types
async def staff_finder(self, user: discord.Member, option: str):
"""Check is user is part of a staff"""
if option not in roles_options:
raise TypeError
if await self.bot.get_cog('Admin').check_if_god(user):
return True
if not self.bot.database_online or not isinstance(user, discord.Member):
return False
staff = str(await self.get_option(user.guild.id,option)).split(";")
staff = [x for x in staff if len(x) > 10 and x.isnumeric()]
if len(staff) == 0:
return False
for r in user.roles:
if str(r.id) in staff:
return True
raise commands.CommandError("User doesn't have required roles")
async def get_option(self, ID: int, name: str) -> typing.Optional[str]:
"""return the value of an option
Return None if this option doesn't exist or if no value has been set"""
if isinstance(ID, discord.Guild):
ID = ID.id
elif ID is None or not self.bot.database_online:
return None
l = await self.get_server([name],criters=["ID="+str(ID)],Type=list)
if l == []:
return None
elif l[0][0] == '':
return self.default_opt[name]
else:
return l[0][0]
async def get_server(self, columns=[], criters=["ID > 1"], relation="AND", Type=dict):
"""return every options of a server"""
await self.bot.wait_until_ready()
if type(columns)!=list or type(criters)!=list:
raise ValueError
cnx = self.bot.cnx_frm
cursor = cnx.cursor(dictionary = (Type==dict))
if columns == []:
cl = "*"
else:
cl = "`"+"`,`".join(columns)+"`"
relation = " "+relation+" "
query = ("SELECT {} FROM `{}` WHERE {}".format(cl,self.table,relation.join(criters)))
cursor.execute(query)
liste = list()
for x in cursor:
if isinstance(x, dict):
for k, v in x.items():
if v == '':
x[k] = self.default_opt[k]
liste.append(x)
cursor.close()
return liste
async def modify_server(self, ID: int, values=[()]):
"""Update a server config in the database"""
if type(values)!=list:
raise ValueError
v = list()
v2 = dict()
cnx = self.bot.cnx_frm
cursor = cnx.cursor()
for e, x in enumerate(values):
v.append(f"`{x[0]}` = %(v{e})s")
v2[f'v{e}'] = x[1]
query = ("UPDATE `{t}` SET {v} WHERE `ID`='{id}'".format(t=self.table, v=",".join(v), id=ID))
cursor.execute(query, v2)
cnx.commit()
cursor.close()
return True
async def delete_option(self, ID: int, opt):
"""reset an option"""
if opt not in self.default_opt.keys():
raise ValueError
value = self.default_opt[opt]
if opt == 'language':
await self.bot.get_cog('Languages').change_cache(ID,value)
elif opt == 'prefix':
self.bot.get_cog('Utilities').update_prefix(ID,value)
return await self.modify_server(ID,values=[(opt,value)])
async def add_server(self, ID: int):
"""add a new server to the db"""
if isinstance(ID, str):
if not ID.isnumeric():
raise ValueError
cnx = self.bot.cnx_frm
cursor = cnx.cursor()
query = ("INSERT INTO `{}` (`ID`) VALUES ('{}')".format(self.table,ID))
cursor.execute(query)
cnx.commit()
return True
async def is_server_exist(self, ID: int):
"""Check if a server is already in the db"""
i = await self.get_option(ID,"ID")
if i is None:
g = self.bot.get_guild(ID)
if g is None:
raise Exception("Guild not found")
emb = self.bot.get_cog("Embeds").Embed(desc="New server in the database :tada: `{}` ({})".format(g.name,g.id),color=self.log_color).update_timestamp()
await self.bot.get_cog("Embeds").send([emb])
return await self.add_server(ID)
return True
async def delete_server(self, ID: int):
"""remove a server from the db"""
if not isinstance(ID, int):
raise ValueError
cnx = self.bot.cnx_frm
cursor = cnx.cursor()
query = ("DELETE FROM `{}` WHERE `ID`='{}'".format(self.table,ID))
cursor.execute(query)
cnx.commit()
cursor.close()
return True
@commands.group(name='config')
@commands.guild_only()
async def sconfig_main(self, ctx: MyContext):
"""Function for setting the bot on a server
..Doc server.html#config-options"""
if ctx.bot.database_online:
await self.is_server_exist(ctx.guild.id)
if ctx.invoked_subcommand is None:
msg = copy.copy(ctx.message)
subcommand_passed = ctx.message.content.replace(ctx.prefix+"config ","")
if subcommand_passed is None:
msg.content = ctx.prefix + "config help"
elif subcommand_passed.isnumeric():
msg.content = ctx.prefix + "config see " + subcommand_passed
elif subcommand_passed.split(" ")[0] in self.optionsList:
if len(subcommand_passed.split(" "))==1:
msg.content = ctx.prefix + "config see " + subcommand_passed
else:
msg.content = ctx.prefix + "config change " + subcommand_passed
else:
msg.content = ctx.prefix + "config help"
new_ctx = await self.bot.get_context(msg)
await self.bot.invoke(new_ctx)
@sconfig_main.command(name="help")
@commands.cooldown(1, 2, commands.BucketType.guild)
async def sconfig_help(self, ctx: MyContext):
"""Get help about this command"""
msg = await self.bot._(ctx.guild, "server.config-help", p=(await self.bot.get_prefix(ctx.message))[-1])
await ctx.send(msg.format(ctx.guild.owner.name))
@sconfig_main.command(name="del")
@commands.cooldown(1, 2, commands.BucketType.guild)
async def sconfig_del(self, ctx: MyContext, option: str):
"""Reset an option to zero"""
if not (ctx.channel.permissions_for(ctx.author).manage_guild or await self.bot.get_cog("Admin").check_if_god(ctx)):
return await ctx.send(await self.bot._(ctx.guild.id, "server.need-manage-server"))
if not ctx.bot.database_online:
return await ctx.send(await self.bot._(ctx.guild.id,"cases.no_database"))
await self.sconfig_del2(ctx, option)
@sconfig_main.command(name="change")
@commands.cooldown(1, 2, commands.BucketType.guild)
async def sconfig_change(self, ctx: MyContext, option:str, *, value: str):
"""Allows you to modify an option"""
if not (ctx.channel.permissions_for(ctx.author).manage_guild or await self.bot.get_cog("Admin").check_if_god(ctx)):
return await ctx.send(await self.bot._(ctx.guild.id, "server.need-manage-server"))
if not ctx.bot.database_online:
return await ctx.send(await self.bot._(ctx.guild.id,"cases.no_database"))
if value == 'del':
await self.sconfig_del2(ctx, option)
return
try:
if option in roles_options:
await self.conf_roles(ctx, option, value)
elif option in bool_options:
await self.conf_bool(ctx, option, value)
elif option in textchan_options:
await self.conf_textchan(ctx, option, value)
elif option in category_options:
await self.conf_category(ctx, option, value)
elif option in text_options:
await self.conf_text(ctx, option, value)
elif option in numb_options:
await self.conf_numb(ctx, option, value)
elif option in vocchan_options:
await self.conf_vocal(ctx, option, value)
elif option == "language":
await self.conf_lang(ctx, option, value)
elif option in prefix_options:
await self.conf_prefix(ctx, option, value)
elif option in raid_options:
await self.conf_raid(ctx, option, value)
elif option in emoji_option:
await self.conf_emoji(ctx, option, value)
elif option in xp_type_options:
await self.conf_xp_type(ctx, option, value)
elif option in color_options:
await self.conf_color(ctx, option, value)
elif option in xp_rate_option:
await self.conf_xp_rate(ctx, option, value)
elif option in levelup_channel_option:
await self.conf_levelup_chan(ctx, option, value)
elif option in ttt_display_option:
await self.conf_tttdisplay(ctx, option, value)
else:
await ctx.send(await self.bot._(ctx.guild.id, "server.option-notfound"))
return
except Exception as e:
await self.bot.get_cog("Errors").on_error(e,ctx)
await ctx.send(await self.bot._(ctx.guild.id, "server.internal-error"))
async def sconfig_del2(self, ctx: MyContext, option: str):
try:
t = await self.delete_option(ctx.guild.id,option)
if t:
msg = await self.bot._(ctx.guild.id, "server.value-deleted", option=option)
else:
msg = await self.bot._(ctx.guild.id, "server.internal-error")
await ctx.send(msg)
m = "Reset option in server {}: {}".format(ctx.guild.id,option)
emb = self.bot.get_cog("Embeds").Embed(desc=m,color=self.log_color).update_timestamp().set_author(ctx.guild.me)
await self.bot.get_cog("Embeds").send([emb])
self.bot.log.debug(m)
except ValueError:
await ctx.send(await self.bot._(ctx.guild.id, "server.option-notfound"))
except Exception as e:
await self.bot.get_cog("Errors").on_error(e,ctx)
await ctx.send(await self.bot._(ctx.guild.id, "server.internal-error"))
async def send_embed(self, guild: discord.Guild, option: str, value: str):
| |
ln_option_full = ln_option.replace( '?', line_number ).split()
command = [ editor, *ln_option_full, epath ]
po = subprocess.Popen( command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL )
po.wait()
if compressed:
with open( epath ) as ifd, gzip.open( path.as_posix(), 'wt' ) as ofd:
for line in ifd:
ofd.write( line )
os.remove( epath )
else:
t = f"Text Editor for raw index in config file '{editor}' not found."
self.conf.do_popup( t )
return True
# -----------------------------------------------------
# Left-Click in main body of table.
elif click == 'Left':
if col_num < 3: # Ignore click in title or 'same' column
return True
# -----------------------------------------------------
# Ignore click in empty column
if src not in self.src_list:
return True
# -----------------------------------------------------
if not row[ 'tags' ]: # Click in column headers, show offset graph
file = self.fb.get_file_from_canonical( self.canonical )
if not file:
self.conf.do_nastygram( 'canonical2file', None )
return
fullpath = Path( self.fb.Music_File_Root, file )
if not fullpath.is_file():
self.conf.do_nastygram( 'music_file_root', fullpath )
return
page_count = self.pdf.get_page_count( fullpath )
self.show_offset_graph( src, local, page_count )
return True
# -----------------------------------------------------
contents = row['values'][int(col[1:])-1]
row_num = row[ 'tags' ][0]
title = self.table_data[ row_num ][ 0 ]
if not contents:
return True # Ignore click in empty cell
sheet, page = contents.split( '->' )
sheet=sheet.strip()
page=page.strip()
# ----------------------------------------------------------------
# WRW 15 Apr 2022 - Try popup of PDF instead of going to PDF Tab.
if True:
self.do_pdf_popup( self.canonical, page, sheet )
return True
else:
file = self.fb.get_file_from_canonical( self.canonical )
if not file:
self.conf.do_nastygram( 'canonical2file', None )
return
fullpath = Path( self.fb.Music_File_Root, file )
if not fullpath.is_file():
self.conf.do_nastygram( 'music_file_root', fullpath )
return True
self.pdf.show_music_file( file=fullpath, page=page, force=True ) # Click in Management Table
self.display_pdf.select()
self.display_pdf.set_focus()
# print( title, src, local, sheet, page, file )
self.meta.show( id='IndexDiffClick',
mode='N', # Not using this code
file=file,
title = title,
canonical = self.canonical,
src = src,
local = local,
sheet = sheet,
page=page,
page_count = self.pdf.get_info()[ 'page_count' ],
)
return True
# -------------------------------------------
# Button does same thing as click on left click in select mode
elif event == 'index-diff-select-button':
selected_rows = values[ "index-diff-table" ]
if len( selected_rows ) < 2 : # Nothing selected
return True
titles_to_edit = [ self.title_by_row[ row ] for row in selected_rows ]
self.do_titles_to_edit_window( titles_to_edit )
self.index_diff_table.update( select_rows = [] )
return True
# -------------------------------------------
# Didn't recognize any of our events.
return False
# --------------------------------------------------------------------------
# Called with data for one title. Have to check all entries for one title together.
# Need to rearrange into canonical, sheet, src order so can see all sources for one canonical together.
# src_list is sorted list of all srcs covered by canoical formed from set()
# Returns a truncated title, an indication of page mismatch (same == '*'), and list of pages offsets
# for srcs covering title, blanks if not, and a set of srcs covering this specific title.
# WRW 1 Apr 2022 - include title and short_title in results. Was just short_title as title.
def inspect_data( self, title, src_list, data ):
# -------------------------------------------
# Title may appear in more than one canonical. Only check for match in the indexes for one canonical at a time.
# First group by canonical.
sp_by_srcs = {}
pages = []
srcs = set()
# Build array of sheet and page indexed by src and array of all page numbers for title.
# Page numbers should all be the same for a given title and canonical. If they are
# not it is because of mismatch in the index from different srcs.
for item in data:
title = title
sheet = item['sheet']
src = item['src']
local = item['local']
page = self.fb.get_page_from_sheet( sheet, src, local ) # Applies sheet offset to get page from sheet
if not page:
print( f"ERROR-DEV: get_page_from_sheet() returned None, title: '{title}', sheet: '{sheet}', src: '{src}', local: '{local}', skipping.", file=sys.stderr )
continue
sp_by_srcs[ src ] = { 'sheet' : sheet, 'page': page } # Coverage of title by src
pages.append( page ) # Page numbers of title from each src
srcs.add( src ) # srcs covering title
same = all( x == pages[0] for x in pages ) # Test to see if all pages are the same
same_flag = ' ' if same else '*'
res_by_src = {}
for src in src_list: # Traverse all srcs covering canonical
if src in sp_by_srcs: # Is this title covered by src
item = sp_by_srcs[ src ] # Yes, pick up sheet/page
res_by_src[ src ] = f"{item['sheet']:>3}->{item['page']:>3}" # and include it in result.
else:
res_by_src[ src ] = '' # Otherwise include empty string in result
short_title = title[0:40] # *** Truncating title here. 31 Mar 2022 was 30
most_common_page = most_common( pages ) # Likely correct page.
res = { 'title': title,
'short_title' : short_title,
'same' : same_flag,
'res_by_src' : res_by_src,
'srcs' : srcs,
'most_common' : most_common_page,
}
return res
# print( f"{short_title:>50} {same_flag}: {index_list}" )
# --------------------------------------------------------------------------
# Round m to next larger multiple of n: k = m + n - m % n
def show_offset_graph( self, src, local, page_count ):
# -------------------------------------------------------
# For offset graph
o_min = 9999999
o_max = -9999999
offsets = self.fb.get_offsets( src, local )
for o in offsets:
o_min = min( o_min, o[ 'offset' ] )
o_max = max( o_max, o[ 'offset' ] )
y_range = max( 5, o_max, abs( o_min ) ) # Make sure have some graph even when 0 range.
y_range = y_range + 2 - y_range % 2 # Round up to next highest multiple of 2
y_margin = 20 # top and bottom
x_margin = 20 # left and right
y_scale = 10 # apply to offsets to make them more aparent
y_size = 2* y_range * y_scale
x_size = page_count
x_total = x_size + 2 * x_margin
y_total = y_size + 2 * y_margin
# -------------------------------------------------------
# For scatter plot
# x and y are so close that can use one value for max of both
scat_max = -9999999
scatter_data = []
for p in self.scatter_by_src[ src ]:
x = int(p[ 'sheet' ]) # x, y assigned here.
y = int(p[ 'page'])
scatter_data.append( (x,y) )
scat_max = max( scat_max, x )
scat_max = max( scat_max, y )
scat_range = scat_max
scat_x_size = scat_max
scat_y_size = scat_max
scat_x_total = scat_x_size + 2 * x_margin
scat_y_total = scat_y_size + 2 * y_margin
# -------------------------------------------------------
# Build graph in absolute coordinates. Put y0 in middle.
layout = [[
self.sg.Graph( (x_total, y_total ),
key = 'sheet-offset-graph',
graph_bottom_left=(0, -y_total/2 ),
graph_top_right=(x_total, y_total/2),
background_color = "#f0f0ff",
enable_events = True,
# motion_events = True, # not distributed copy yet
# expand_x = True,
# expand_y = True,
pad= ((4, 4), (8,8)),
),
self.sg.Graph( (scat_x_total, scat_y_total ),
key = 'sheet-offset-scatter-chart',
graph_bottom_left=(0, 0 ),
graph_top_right=(scat_x_total, scat_y_total),
background_color = "#f0f0ff",
enable_events = True,
# motion_events = True, # not distributed copy yet
# expand_x = True,
# expand_y = True,
pad= ((4, 4), (8,8)),
)
],
[ self.sg.Button('Close', key='sheet-offset-close', font=("Helvetica", 10), pad=((2,2),(2,1)), ) ]
]
window = self.sg.Window( "Sheet-Offset Graph - PDF page number = sheet number + offset",
layout,
icon= BL_Icon,
finalize=True,
element_justification = 'right',
modal = True,
)
graph = window['sheet-offset-graph']
scatter = window['sheet-offset-scatter-chart'] # WRW 5 Apr 2022 - a bit of an experiment
# -------------------------------------------------------
# Draw axis in absolute coordinates for graph
graph.draw_line(
( x_margin, 0),
( x_margin + x_size, 0),
color = '#a0a0a0',
width = 1,
)
graph.draw_line(
( x_margin, -y_size/2 ),
( x_margin, y_size/2 ),
color = '#a0a0a0',
width = 1,
)
# -------------------------------------------------------
# Draw axis in absolute coordinates for scatter
scatter.draw_line(
( x_margin, y_margin),
( x_margin + scat_x_size, y_margin),
color = '#a0a0a0',
width = 1,
)
scatter.draw_line(
( x_margin, y_margin),
( x_margin, scat_y_size + y_margin),
color = '#a0a0a0',
width = 1,
)
# | |
#
# Copyright 2009-2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import hashlib
import urllib2
import urllib
import csv
from django import http
from django import shortcuts
from django import template
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.core import urlresolvers
from django.db.models import Q
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import ListView
from django.views.decorators.cache import never_cache
from django.views.decorators.vary import vary_on_headers
import databrowse
from databrowse.datastructures import *
import forms
from models import *
import models
import operations
import rdf
CONFIG = settings.CONFIG
class HttpResponseSeeOther(http.HttpResponseRedirect):
status_code = 303
def homepage(request):
model_list = [ Trial, Intervention, Condition,
Country, City, State, Location,
Eligibility, Keyword, Mesh_term,
Condition_browse, Intervention_browse,
Reference, Link, Investigator, Responsible_party,
Outcome, Arm_group,
Contact, Address, Facility, Oversight_info,
Overall_official, Sponsor, Sponsor_group,
Provenance ]
# easy_model_list = [EasyModel(model) for model in model_list]
flat_page_model = FlatPage
# return shortcuts.render_to_response('homepage.html',
# {'model_list': easy_model_list, 'flat_page_model': flat_page_model,
# 'upload_form': forms.XMLSelectForm()})
#m_list = [EasyModel(databrowse.site, m) for m in databrowse.site.registry.keys()]
databrowse.site.root_url = CONFIG['ROOT']
m_list = [EasyModel(databrowse.site, m) for m in model_list]
return shortcuts.render_to_response('databrowse/homepage.html',
{'model_list': m_list,
'root_url': databrowse.site.root_url,
'flat_page_model': flat_page_model})
#def gen_view(request, **kwargs):
# return list_detail.object_detail(request, **kwargs)
@vary_on_headers('Accept', 'User-Agent')
def multi_format_object_detail(request, **kwargs):
"""A view that handles multiple format output.
By default, the view use object_detail generic view to output. If HTTP
request parameter format is set to rdf, redirect the output to D2R server.
"""
# For pubentry, the type is passed in but we really don't need it. If we
# don't remove it, object_detail is going to complain about extra field
# 'type'.
if kwargs.get('type', None) is not None:
kwargs.pop('type')
if 'uid' in kwargs:
uid_or_slug = kwargs['uid']
kwargs.pop('uid')
else:
uid_or_slug = kwargs['slug']
# Serve the RDF page if the user explicitly wants RDF.
if request.GET.get('format', '').lower() == 'rdf':
rdf_url = '%s%s/%s' % (CONFIG['RDF_SERVER'],
kwargs['extra_context']['model'].name(),
uid_or_slug)
# For debugging purpose, if redirect=1 is specified, we redirect
# to the d2r server. Otherwise, we load the RDF output from the d2r
# server and return to the user.
if request.GET.get('redirect'):
#return HttpResponseSeeOther(rdf_url)
response = http.HttpResponse(content="", status=303, status_code=303)
response["Location"] = rdf_url
return response
else:
rdf_content = rdf.load_rdf_from_url(rdf_url)
return http.HttpResponse(rdf_content, mimetype='text/rdf+xml', status=303, status_code=303)
# Serve the XML page if the user explicitly wants XML.
# if request.GET.get('format', '').lower() == 'xml':
# return cfxml(request, kwargs['extra_context']['model'].name(), uid_or_slug)
# If it's a RDF browser, redirect to the RDF format page.
if request.META.get('HTTP_ACCEPT', '').lower().find('rdf') != -1:
#rdf_url = '%s%s/%s' % (CONFIG['RDF_SERVER'],
# kwargs['extra_context']['model'].name(),
# uid_or_slug)
# TODO: the following is temporary solution until we find a better way
return rdf_view(request, request.path.replace('/resource/',''))
#rdf_content = rdf.load_rdf_from_url(rdf_url)
#return http.HttpResponse(rdf_content,
#return rdf_view()
#return HttpResponseSeeOther(request.path + '?format=rdf')
# If template_name is not set, use the default base_detail.html template.
if not kwargs.get('template_name'):
kwargs['template_name'] = 'base_detail.html'
return list_detail.object_detail(request, **kwargs)
#return gen_view(request, **kwargs)
#def cfxml(request, modelname, slug):
# """show xml view for a given slug. (currently not working)"""
# rtv = ""
# t = template.loader.get_template('trial_xml.xml')
#
# if modelname == "trial":
# trials = Trial.objects.filter(author__slug=slug)
## else:
## papers = PubAuthor.objects.filter(author__slug=slug)
# for t in trials:
# trial = t.pubentry
# if not trial.pid:
# trial.pid = trial.title.replace(" ", "")
# c = template.Context({ 'object': trial })
# rtv += t.render(c) + "\n\n"
# else:
# if modelname == "provenance":
# papers = Trial.objects.filter(provenance__slug=slug)
# elif modelname == "xxx":
# papers = xxx.objects.filter(series__slug=slug)
# else:
# return http.HttpResponse("not yet implemented for " + modelname)
#
# for t in trials:
# trial = t.pubentry
# if not trial.pid:
# trial.pid = trial.title.replace(" ", "")
# c = template.Context({ 'object': trial })
# rtv += t.render(c) + "\n\n"
#
# return http.HttpResponse(rtv, mimetype="text/plain")
# Displaying a search form is simple enough that we should disable cross-site
# attack protection.
@csrf_exempt
def search_form(request, object_type):
"""Display a form so that the user can perform search actions."""
if request.method == 'POST':
form = forms.SearchForm(request.POST)
if form.is_valid():
keyword = form.cleaned_data['keyword']
return http.HttpResponseRedirect(
urlresolvers.reverse('search', args=[object_type, keyword]))
else:
form = forms.SearchForm()
return shortcuts.render_to_response('search_form.html',
{'form': form, 'root_url' : CONFIG['ROOT']},
context_instance=template.RequestContext(request))
class SearchResultView(ListView):
template_name = 'databrowse/model_detail.html'
def get_queryset(self):
'''
object_type: One of (pub, author, journal, series, school, keyword).
keyword: The keyword to search for.
'''
object_type = self.kwargs['object_type']
keyword = self.kwargs['keyword']
model = getattr(models, object_type.capitalize())
matched_objects = model.objects.filter(label__icontains=keyword)
return matched_objects
def get_context_data(self, **kwargs):
context = super(SearchResultView, self).get_context_data(**kwargs)
self.queryset = self.get_queryset()
model = getattr(models, self.kwargs['object_type'].capitalize())
easy_model = EasyModel(databrowse.site, model)
easy_qs = self.queryset._clone(klass=EasyQuerySet)
easy_qs._easymodel = easy_model
databrowse.site.root_url = CONFIG['ROOT']
extra_context = {'model': easy_model,
'root_url': databrowse.site.root_url,
'request': self.request,
'objectlist': easy_qs}
context.update(extra_context)
return context
@csrf_exempt
def upload_xml(request):
"""Display a form so the user can select a xml file.
When the form is submitted, redirects to process_xml to process the
xml file.
"""
if request.method == 'POST':
form = forms.XMLSelectForm(request.POST)
if form.is_valid():
url = form.cleaned_data['url']
return http.HttpResponseRedirect(
urlresolvers.reverse('processxml', args=[url]) +
'?encoding=' + form.cleaned_data['encoding'])
else:
form = forms.XMLSelectForm()
return shortcuts.render_to_response('form_upload_xml.html',
{'form': form, 'root_url' : CONFIG['ROOT']},
context_instance=template.RequestContext(request))
def load_external_source(request, source_name):
"""Loads an external source."""
## Loading DBpedia
if source_name == 'dbpedia':
for m in External_resource.objects.filter(source_name='dbpedia'):
m.delete()
## Loading diseases
inputfilename = CONFIG.get('SOURCES_FILE_LOC') + 'dbpedia_disease.csv'
inputfile = file(inputfilename,'r')
csv_reader = csv.reader(inputfile, delimiter=',', quotechar='"')
row = csv_reader.next()
if not csv_reader:
return http.HttpResponse("Error reading csv file")
else:
id_index = 0
try:
id_index = row.index('id')
except:
return http.HttpResponse("Error finding the right column in the csv file")
while row:
try:
row = csv_reader.next()
id = row[id_index]
resource_url = 'http://dbpedia.org/resource/' + id
resource_label = id.replace('_',' ')
resource_format = 'RDF_HTML'
related_model_name = 'Condition'
label = id + ' (dbpedia disease resource)'
if len(label)>127:
label = hashlib.md5(label).hexdigest()
external_resource, created = models.External_resource.objects.get_or_create(
label = label,
source_id = id,
source_label = resource_label,
source_name = source_name,
source_url = resource_url,
source_format = resource_format,
related_model_name =related_model_name,
)
except StopIteration:
row = None
## Loading drugs
inputfilename = CONFIG.get('SOURCES_FILE_LOC') + 'dbpedia_drugs.csv'
inputfile = file(inputfilename,'r')
csv_reader = csv.reader(inputfile, delimiter=',', quotechar='"')
row = csv_reader.next()
if not csv_reader:
return http.HttpResponse("Error reading csv file")
else:
id_index = 0
try:
id_index = row.index('id')
except:
return http.HttpResponse("Error finding the right column in the csv file")
while row:
try:
row = csv_reader.next()
id = row[id_index]
resource_url = 'http://dbpedia.org/resource/' + id
resource_label = id.replace('_',' ')
resource_format = 'RDF_HTML'
related_model_name = 'Intervention'
label = id + ' (dbpedia drug resource)'
if len(label)>127:
label = hashlib.md5(label).hexdigest()
external_resource, created = models.External_resource.objects.get_or_create(
label = label,
source_label = resource_label,
source_id = id,
source_name = source_name,
source_url = resource_url,
source_format = resource_format,
related_model_name =related_model_name,
)
except StopIteration:
row = None
return http.HttpResponse("{'status':'OK'}")
## Loading Drugbank
elif source_name == 'drugbank':
for m in External_resource.objects.filter(source_name='drugbank'):
m.delete()
inputfilename = CONFIG.get('SOURCES_FILE_LOC') + 'drugbank_drugs.csv'
inputfile = file(inputfilename,'r')
csv_reader = csv.reader(inputfile, delimiter=',', quotechar='"')
row = csv_reader.next()
if not csv_reader:
return http.HttpResponse("Error reading csv file")
else:
id_index = 0
name_index = 1
try:
id_index = row.index('id')
name_index = row.index('name')
except:
return http.HttpResponse("Error finding the right column(s) in the csv file")
while row:
try:
row = csv_reader.next()
id = row[id_index]
name = row[name_index]
resource_url = 'http://www4.wiwiss.fu-berlin.de/drugbank/resource/drugs/' + id
resource_label = name
resource_format = 'RDF_HTML'
related_model_name = 'Intervention'
label = id + ' (drugbank drug resource)'
if len(label)>127:
label = hashlib.md5(label).hexdigest()
external_resource, created = models.External_resource.objects.get_or_create(
label = label,
source_label = resource_label,
source_url = resource_url,
source_format = resource_format,
source_name = source_name,
related_model_name =related_model_name,
)
except StopIteration:
row = None
# alternative names
inputfilename = CONFIG.get('SOURCES_FILE_LOC') + 'drugbank_drug_brandnames.csv'
inputfile = file(inputfilename,'r')
csv_reader = csv.reader(inputfile, delimiter=',', quotechar='"')
row = csv_reader.next()
if not csv_reader:
return http.HttpResponse("Error reading csv file")
else:
id_index = 0
name_index = 1
try:
id_index = row.index('id')
name_index = row.index('name')
except:
return http.HttpResponse("Error finding the right column(s) in the csv file")
while row:
try:
row = csv_reader.next()
id = row[id_index]
altname = unicode(row[name_index],errors='ignore')
alt_name, created = models.Alt_name.objects.get_or_create(
label = hashlib.md5(source_name+id+altname).hexdigest(),
source = source_name,
id = id,
altname = altname,
)
except StopIteration:
row = None
inputfilename = CONFIG.get('SOURCES_FILE_LOC') + 'drugbank_drug_synonyms.csv'
inputfile = file(inputfilename,'r')
csv_reader = csv.reader(inputfile, delimiter=',', quotechar='"')
row = csv_reader.next()
if not csv_reader:
return http.HttpResponse("Error reading csv file")
else:
id_index = 0
name_index = 1
try:
id_index = row.index('id')
name_index = row.index('name')
except:
return http.HttpResponse("Error finding the right | |
`1`") # noqa: E501
self._id = id
@property
def ifaces(self):
"""Gets the ifaces of this SubnetsSubnetPoolsPool. # noqa: E501
List of interface members in this pool. # noqa: E501
:return: The ifaces of this SubnetsSubnetPoolsPool. # noqa: E501
:rtype: list[SubnetsSubnetPoolIface]
"""
return self._ifaces
@ifaces.setter
def ifaces(self, ifaces):
"""Sets the ifaces of this SubnetsSubnetPoolsPool.
List of interface members in this pool. # noqa: E501
:param ifaces: The ifaces of this SubnetsSubnetPoolsPool. # noqa: E501
:type: list[SubnetsSubnetPoolIface]
"""
if ifaces is None:
raise ValueError("Invalid value for `ifaces`, must not be `None`") # noqa: E501
self._ifaces = ifaces
@property
def name(self):
"""Gets the name of this SubnetsSubnetPoolsPool. # noqa: E501
The name of the pool. It must be unique throughout the given subnet.It's a required field with POST method. # noqa: E501
:return: The name of this SubnetsSubnetPoolsPool. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SubnetsSubnetPoolsPool.
The name of the pool. It must be unique throughout the given subnet.It's a required field with POST method. # noqa: E501
:param name: The name of this SubnetsSubnetPoolsPool. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if name is not None and len(name) > 32:
raise ValueError("Invalid value for `name`, length must be less than or equal to `32`") # noqa: E501
if name is not None and len(name) < 1:
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501
if name is not None and not re.search('^[0-9a-zA-Z_-]*$', name): # noqa: E501
raise ValueError("Invalid value for `name`, must be a follow pattern or equal to `/^[0-9a-zA-Z_-]*$/`") # noqa: E501
self._name = name
@property
def ranges(self):
"""Gets the ranges of this SubnetsSubnetPoolsPool. # noqa: E501
List of IP address ranges in this pool. # noqa: E501
:return: The ranges of this SubnetsSubnetPoolsPool. # noqa: E501
:rtype: list[SubnetsSubnetPoolRange]
"""
return self._ranges
@ranges.setter
def ranges(self, ranges):
"""Sets the ranges of this SubnetsSubnetPoolsPool.
List of IP address ranges in this pool. # noqa: E501
:param ranges: The ranges of this SubnetsSubnetPoolsPool. # noqa: E501
:type: list[SubnetsSubnetPoolRange]
"""
if ranges is None:
raise ValueError("Invalid value for `ranges`, must not be `None`") # noqa: E501
self._ranges = ranges
@property
def rebalance_policy(self):
"""Gets the rebalance_policy of this SubnetsSubnetPoolsPool. # noqa: E501
Rebalance policy.. # noqa: E501
:return: The rebalance_policy of this SubnetsSubnetPoolsPool. # noqa: E501
:rtype: str
"""
return self._rebalance_policy
@rebalance_policy.setter
def rebalance_policy(self, rebalance_policy):
"""Sets the rebalance_policy of this SubnetsSubnetPoolsPool.
Rebalance policy.. # noqa: E501
:param rebalance_policy: The rebalance_policy of this SubnetsSubnetPoolsPool. # noqa: E501
:type: str
"""
if rebalance_policy is None:
raise ValueError("Invalid value for `rebalance_policy`, must not be `None`") # noqa: E501
allowed_values = ["auto", "manual"] # noqa: E501
if rebalance_policy not in allowed_values:
raise ValueError(
"Invalid value for `rebalance_policy` ({0}), must be one of {1}" # noqa: E501
.format(rebalance_policy, allowed_values)
)
self._rebalance_policy = rebalance_policy
@property
def rules(self):
"""Gets the rules of this SubnetsSubnetPoolsPool. # noqa: E501
Names of the rules in this pool. # noqa: E501
:return: The rules of this SubnetsSubnetPoolsPool. # noqa: E501
:rtype: list[str]
"""
return self._rules
@rules.setter
def rules(self, rules):
"""Sets the rules of this SubnetsSubnetPoolsPool.
Names of the rules in this pool. # noqa: E501
:param rules: The rules of this SubnetsSubnetPoolsPool. # noqa: E501
:type: list[str]
"""
if rules is None:
raise ValueError("Invalid value for `rules`, must not be `None`") # noqa: E501
self._rules = rules
@property
def sc_auto_unsuspend_delay(self):
"""Gets the sc_auto_unsuspend_delay of this SubnetsSubnetPoolsPool. # noqa: E501
Time delay in seconds before a node which has been automatically unsuspended becomes usable in SmartConnect responses for pool zones. # noqa: E501
:return: The sc_auto_unsuspend_delay of this SubnetsSubnetPoolsPool. # noqa: E501
:rtype: int
"""
return self._sc_auto_unsuspend_delay
@sc_auto_unsuspend_delay.setter
def sc_auto_unsuspend_delay(self, sc_auto_unsuspend_delay):
"""Sets the sc_auto_unsuspend_delay of this SubnetsSubnetPoolsPool.
Time delay in seconds before a node which has been automatically unsuspended becomes usable in SmartConnect responses for pool zones. # noqa: E501
:param sc_auto_unsuspend_delay: The sc_auto_unsuspend_delay of this SubnetsSubnetPoolsPool. # noqa: E501
:type: int
"""
if sc_auto_unsuspend_delay is None:
raise ValueError("Invalid value for `sc_auto_unsuspend_delay`, must not be `None`") # noqa: E501
if sc_auto_unsuspend_delay is not None and sc_auto_unsuspend_delay > 86400: # noqa: E501
raise ValueError("Invalid value for `sc_auto_unsuspend_delay`, must be a value less than or equal to `86400`") # noqa: E501
if sc_auto_unsuspend_delay is not None and sc_auto_unsuspend_delay < 0: # noqa: E501
raise ValueError("Invalid value for `sc_auto_unsuspend_delay`, must be a value greater than or equal to `0`") # noqa: E501
self._sc_auto_unsuspend_delay = sc_auto_unsuspend_delay
@property
def sc_connect_policy(self):
"""Gets the sc_connect_policy of this SubnetsSubnetPoolsPool. # noqa: E501
SmartConnect client connection balancing policy. # noqa: E501
:return: The sc_connect_policy of this SubnetsSubnetPoolsPool. # noqa: E501
:rtype: str
"""
return self._sc_connect_policy
@sc_connect_policy.setter
def sc_connect_policy(self, sc_connect_policy):
"""Sets the sc_connect_policy of this SubnetsSubnetPoolsPool.
SmartConnect client connection balancing policy. # noqa: E501
:param sc_connect_policy: The sc_connect_policy of this SubnetsSubnetPoolsPool. # noqa: E501
:type: str
"""
if sc_connect_policy is None:
raise ValueError("Invalid value for `sc_connect_policy`, must not be `None`") # noqa: E501
allowed_values = ["round_robin", "conn_count", "throughput", "cpu_usage"] # noqa: E501
if sc_connect_policy not in allowed_values:
raise ValueError(
"Invalid value for `sc_connect_policy` ({0}), must be one of {1}" # noqa: E501
.format(sc_connect_policy, allowed_values)
)
self._sc_connect_policy = sc_connect_policy
@property
def sc_dns_zone(self):
"""Gets the sc_dns_zone of this SubnetsSubnetPoolsPool. # noqa: E501
SmartConnect zone name for the pool. # noqa: E501
:return: The sc_dns_zone of this SubnetsSubnetPoolsPool. # noqa: E501
:rtype: str
"""
return self._sc_dns_zone
@sc_dns_zone.setter
def sc_dns_zone(self, sc_dns_zone):
"""Sets the sc_dns_zone of this SubnetsSubnetPoolsPool.
SmartConnect zone name for the pool. # noqa: E501
:param sc_dns_zone: The sc_dns_zone of this SubnetsSubnetPoolsPool. # noqa: E501
:type: str
"""
if sc_dns_zone is None:
raise ValueError("Invalid value for `sc_dns_zone`, must not be `None`") # noqa: E501
if sc_dns_zone is not None and len(sc_dns_zone) > 2048:
raise ValueError("Invalid value for `sc_dns_zone`, length must be less than or equal to `2048`") # noqa: E501
if sc_dns_zone is not None and len(sc_dns_zone) < 0:
raise ValueError("Invalid value for `sc_dns_zone`, length must be greater than or equal to `0`") # noqa: E501
if sc_dns_zone is not None and not re.search('^$|^[a-zA-Z0-9-]+(\\.[a-zA-Z0-9-]*)*$', sc_dns_zone): # noqa: E501
raise ValueError("Invalid value for `sc_dns_zone`, must be a follow pattern or equal to `/^$|^[a-zA-Z0-9-]+(\\.[a-zA-Z0-9-]*)*$/`") # noqa: E501
self._sc_dns_zone = sc_dns_zone
@property
def sc_dns_zone_aliases(self):
"""Gets the sc_dns_zone_aliases of this SubnetsSubnetPoolsPool. # noqa: E501
List of SmartConnect zone aliases (DNS names) to the pool. # noqa: E501
:return: The sc_dns_zone_aliases of this SubnetsSubnetPoolsPool. # noqa: E501
:rtype: list[str]
"""
return self._sc_dns_zone_aliases
@sc_dns_zone_aliases.setter
def sc_dns_zone_aliases(self, sc_dns_zone_aliases):
"""Sets the sc_dns_zone_aliases of this SubnetsSubnetPoolsPool.
List of SmartConnect zone aliases (DNS names) to the pool. # noqa: E501
:param sc_dns_zone_aliases: The sc_dns_zone_aliases of this SubnetsSubnetPoolsPool. # noqa: E501
:type: list[str]
"""
if sc_dns_zone_aliases is None:
raise ValueError("Invalid value for `sc_dns_zone_aliases`, must not be `None`") # noqa: E501
self._sc_dns_zone_aliases = sc_dns_zone_aliases
@property
def sc_failover_policy(self):
"""Gets the sc_failover_policy of this SubnetsSubnetPoolsPool. # noqa: E501
SmartConnect IP failover policy. # noqa: E501
:return: The sc_failover_policy of this SubnetsSubnetPoolsPool. # noqa: E501
:rtype: str
"""
return self._sc_failover_policy
@sc_failover_policy.setter
def sc_failover_policy(self, sc_failover_policy):
"""Sets the sc_failover_policy of this SubnetsSubnetPoolsPool.
SmartConnect IP failover policy. # noqa: E501
:param sc_failover_policy: The sc_failover_policy of this SubnetsSubnetPoolsPool. # noqa: E501
:type: str
"""
if sc_failover_policy is None:
raise ValueError("Invalid value for `sc_failover_policy`, must not be `None`") # noqa: E501
allowed_values = ["round_robin", "conn_count", "throughput", "cpu_usage"] # noqa: E501
if sc_failover_policy not in allowed_values:
raise ValueError(
"Invalid value for `sc_failover_policy` ({0}), must be one of {1}" # noqa: E501
.format(sc_failover_policy, allowed_values)
)
self._sc_failover_policy = sc_failover_policy
@property
def sc_subnet(self):
"""Gets the sc_subnet of this SubnetsSubnetPoolsPool. # noqa: E501
Name of SmartConnect service subnet for this pool. # noqa: E501
:return: The sc_subnet of this SubnetsSubnetPoolsPool. # noqa: E501
:rtype: str
"""
return self._sc_subnet
@sc_subnet.setter
def sc_subnet(self, sc_subnet):
| |
<gh_stars>1-10
#!/usr/bin/env python
#############################################################################
##
# This file is part of Taurus
##
# http://taurus-scada.org
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Taurus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Taurus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Taurus. If not, see <http://www.gnu.org/licenses/>.
##
#############################################################################
"""This module contains all taurus tango attribute"""
__all__ = ["TangoAttribute", "TangoAttributeEventListener", "TangoAttrValue"]
__docformat__ = "restructuredtext"
# -*- coding: utf-8 -*-
import re
import time
import threading
import weakref
import PyTango
import numpy
from functools import partial
from taurus import Manager
from taurus.external.pint import Quantity, UR, UndefinedUnitError
from taurus.core.taurusattribute import TaurusAttribute
from taurus.core.taurusbasetypes import (TaurusEventType,
TaurusSerializationMode,
SubscriptionState, TaurusAttrValue,
DataFormat, DataType)
from taurus.core.taurusoperation import WriteAttrOperation
from taurus.core.util.event import EventListener
from taurus.core.util.log import (debug, taurus4_deprecation,
deprecation_decorator)
from taurus.core.tango.enums import (EVENT_TO_POLLING_EXCEPTIONS,
FROM_TANGO_TO_NUMPY_TYPE,
DevState)
from .util.tango_taurus import (description_from_tango,
display_level_from_tango,
quality_from_tango,
standard_display_format_from_tango,
quantity_from_tango_str,
str_2_obj, data_format_from_tango,
data_type_from_tango)
class TangoAttrValue(TaurusAttrValue):
"""A TaurusAttrValue specialization to decode PyTango.DeviceAttribute
objects
"""
def __init__(self, attr=None, pytango_dev_attr=None, config=None):
# config parameter is kept for backwards compatibility only
TaurusAttrValue.__init__(self)
if config is not None:
from taurus.core.util.log import deprecated
deprecated(dep='"config" kwarg', alt='"attr"', rel='4.0')
attr = config
if attr is None:
self._attrRef = None
else:
self._attrRef = weakref.proxy(attr)
self.config = self._attrRef # bck-compat
self._pytango_dev_attr = p = pytango_dev_attr
if p is None:
self._pytango_dev_attr = p = PyTango.DeviceAttribute()
return
if self._attrRef is None:
return
numerical = (PyTango.is_numerical_type(self._attrRef._tango_data_type,
inc_array=True) or
p.type == PyTango.CmdArgType.DevUChar
)
if p.has_failed:
self.error = PyTango.DevFailed(*p.get_err_stack())
else:
# spectra and images can be empty without failing
if p.is_empty and self._attrRef.data_format != DataFormat._0D:
dtype = FROM_TANGO_TO_NUMPY_TYPE.get(
self._attrRef._tango_data_type)
if self._attrRef.data_format == DataFormat._1D:
shape = (0,)
elif self._attrRef.data_format == DataFormat._2D:
shape = (0, 0)
p.value = numpy.empty(shape, dtype=dtype)
if not (numerical or self._attrRef.type == DataType.Boolean):
# generate a nested empty list of given shape
p.value = []
for _ in xrange(len(shape) - 1):
p.value = [p.value]
rvalue = p.value
wvalue = p.w_value
if numerical:
units = self._attrRef._units
if rvalue is not None:
rvalue = Quantity(rvalue, units=units)
if wvalue is not None:
wvalue = Quantity(wvalue, units=units)
elif isinstance(rvalue, PyTango._PyTango.DevState):
rvalue = DevState[str(rvalue)]
self.rvalue = rvalue
self.wvalue = wvalue
self.time = p.time # TODO: decode this into a TaurusTimeVal
self.quality = quality_from_tango(p.quality)
def __getattr__(self, name):
try:
ret = getattr(self._attrRef, name)
except AttributeError:
try:
ret = getattr(self._pytango_dev_attr, name)
except AttributeError:
raise AttributeError('%s has no attribute %s'
% (self.__class__.__name__, name))
# return the attr but only after warning
from taurus.core.util.log import deprecated
deprecated(dep='TangoAttrValue.%s' % name,
alt='TangoAttribute.%s' % name, rel='4.0')
return ret
# --------------------------------------------------------
# This is for backwards compat with the API of taurus < 4
#
@taurus4_deprecation(alt='.rvalue')
def _get_value(self):
"""for backwards compat with taurus < 4"""
debug(repr(self))
try:
return self.__fix_int(self.rvalue.magnitude)
except AttributeError:
return self.rvalue
@taurus4_deprecation(alt='.rvalue')
def _set_value(self, value):
"""for backwards compat with taurus < 4"""
debug('Setting %r to %s' % (value, self.name))
if self.rvalue is None: # we do not have a previous rvalue
import numpy
dtype = numpy.array(value).dtype
if numpy.issubdtype(dtype, int) or numpy.issubdtype(dtype, float):
msg = 'Refusing to set ambiguous value (deprecated .value API)'
raise ValueError(msg)
else:
self.rvalue = value
elif hasattr(self.rvalue, 'units'): # we do have it and is a Quantity
self.rvalue = Quantity(value, units=self.rvalue.units)
else: # we do have a previous value and is not a quantity
self.rvalue = value
value = property(_get_value, _set_value)
@taurus4_deprecation(alt='.wvalue')
def _get_w_value(self):
"""for backwards compat with taurus < 4"""
debug(repr(self))
try:
return self.__fix_int(self.wvalue.magnitude)
except AttributeError:
return self.wvalue
@taurus4_deprecation(alt='.wvalue')
def _set_w_value(self, value):
"""for backwards compat with taurus < 4"""
debug('Setting %r to %s' % (value, self.name))
if self.wvalue is None: # we do not have a previous wvalue
import numpy
dtype = numpy.array(value).dtype
if numpy.issubdtype(dtype, int) or numpy.issubdtype(dtype, float):
msg = 'Refusing to set ambiguous value (deprecated .value API)'
raise ValueError(msg)
else:
self.wvalue = value
elif hasattr(self.wvalue, 'units'): # we do have it and is a Quantity
self.wvalue = Quantity(value, units=self.wvalue.units)
else: # we do have a previous value and is not a quantity
self.wvalue = value
w_value = property(_get_w_value, _set_w_value)
@property
@taurus4_deprecation(alt='.error')
def has_failed(self):
return self.error
def __fix_int(self, value):
"""cast value to int if it is an integer.
Works on scalar and non-scalar values
"""
if self._attrRef.type is None or self._attrRef.type != DataType.Integer:
return value
try:
return int(value)
except TypeError:
import numpy
return numpy.array(value, dtype='int')
class TangoAttribute(TaurusAttribute):
no_cfg_value = '-----'
no_unit = 'No unit'
no_standard_unit = 'No standard unit'
no_display_unit = 'No display unit'
no_description = 'No description'
not_specified = 'Not specified'
no_min_value = no_max_value = not_specified
no_min_alarm = no_max_alarm = not_specified
no_min_warning = no_max_warning = not_specified
no_delta_t = no_delta_val = not_specified
no_rel_change = no_abs_change = not_specified
no_archive_rel_change = no_archive_abs_change = not_specified
no_archive_period = not_specified
# helper class property that stores a reference to the corresponding
# factory
_factory = None
_scheme = 'tango'
_description = 'A Tango Attribute'
def __init__(self, name, parent, **kwargs):
# the last attribute value
self.__attr_value = None
# the last attribute error
self.__attr_err = None
# the change event identifier
self.__chg_evt_id = None
# current event subscription state
self.__subscription_state = SubscriptionState.Unsubscribed
self.__subscription_event = threading.Event()
# the parent's HW object (the PyTango Device obj)
self.__dev_hw_obj = None
# unit for which a decode warning has already been issued
self.__already_warned_unit = None
self.call__init__(TaurusAttribute, name, parent, **kwargs)
attr_info = None
if parent:
attr_name = self.getSimpleName()
try:
attr_info = parent.attribute_query(attr_name)
except (AttributeError, PyTango.DevFailed):
# if PyTango could not connect to the dev
attr_info = None
# Set default values in case the attrinfoex is None
self.writable = False
dis_level = PyTango.DispLevel.OPERATOR
self.display_level = display_level_from_tango(dis_level)
self.tango_writable = PyTango.AttrWriteType.READ
self._units = self._unit_from_tango(PyTango.constants.UnitNotSpec)
# decode the Tango configuration attribute (adds extra members)
self._pytango_attrinfoex = None
self._decodeAttrInfoEx(attr_info)
# subscribe to configuration events (unsubscription done at cleanup)
self.__cfg_evt_id = None
self._subscribeConfEvents()
def cleanUp(self):
self.trace("[TangoAttribute] cleanUp")
self._unsubscribeConfEvents()
TaurusAttribute.cleanUp(self)
self.__dev_hw_obj = None
self._pytango_attrinfoex = None
def __getattr__(self, name):
try:
return getattr(self._pytango_attrinfoex, name)
except AttributeError:
raise Exception('TangoAttribute does not have the attribute %s'
% name)
def getNewOperation(self, value):
attr_value = PyTango.AttributeValue()
attr_value.name = self.getSimpleName()
attr_value.value = self.encode(value)
op = WriteAttrOperation(self, attr_value)
return op
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
# PyTango connection
#-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
def isNumeric(self, inc_array=False):
tgtype = self._tango_data_type
return PyTango.is_numerical_type(tgtype, inc_array=inc_array)
def isInteger(self, inc_array=False):
tgtype = self._tango_data_type
return PyTango.is_int_type(tgtype, inc_array=inc_array)
def isFloat(self, inc_array=False):
tgtype = self._tango_data_type
return PyTango.is_float_type(tgtype, inc_array=inc_array)
def isBoolean(self, inc_array=False):
tgtype = self._tango_data_type
# PyTango.is_bool_type is not implemented in Tango7 and neither in
# some Tango8, at least 8.1.1. Avoid to use is_bool_type method
# while taurus is still compatible with these versions.
# PyTango.is_bool_type(tgtype, inc_array=inc_array)
if tgtype == PyTango.CmdArgType.DevBoolean:
return True
if inc_array and tgtype == PyTango.CmdArgType.DevVarBooleanArray:
return True
return False
def isState(self):
tgtype = self._tango_data_type
return tgtype == PyTango.CmdArgType.DevState
def getFormat(self, cache=True):
return self.format
def encode(self, value):
"""Translates the given value into a tango compatible value according to
the attribute data type.
Raises `pint.DimensionalityError` if value is a Quantity and it
cannot be expressed in the units of the attribute set in the DB
"""
if isinstance(value, Quantity):
# convert to units of the attr in the DB (or raise an exception)
magnitude = value.to(self._units).magnitude
else:
magnitude = value
fmt = self.getDataFormat()
tgtype = self._tango_data_type
if fmt == DataFormat._0D:
if tgtype == PyTango.CmdArgType.DevDouble:
attrvalue = float(magnitude)
elif tgtype == PyTango.CmdArgType.DevFloat:
# We encode to float, but rounding to Tango::DevFloat precision
# see: http://sf.net/p/sardana/tickets/162
attrvalue = float(numpy.float32(magnitude))
elif PyTango.is_int_type(tgtype):
# changed as a partial workaround to a problem in PyTango
# writing to DevULong64 attributes (see ALBA RT#29793)
attrvalue = long(magnitude)
elif tgtype == PyTango.CmdArgType.DevBoolean:
try:
attrvalue = bool(int(magnitude))
except:
attrvalue = str(magnitude).lower() == 'true'
elif tgtype == PyTango.CmdArgType.DevUChar:
attrvalue = int(magnitude)
elif tgtype in (PyTango.CmdArgType.DevState,
PyTango.CmdArgType.DevEncoded):
attrvalue = magnitude
else:
attrvalue = str(magnitude)
elif fmt in (DataFormat._1D, DataFormat._2D):
if PyTango.is_int_type(tgtype):
# cast to integer because the | |
<gh_stars>0
import logging
import os
from decimal import Decimal
from time import sleep
import requests
from hexbytes import HexBytes
from web3 import Web3
from web3 import contract
from web3.contract import Contract
from config.constants import BASE_CURRENCIES
from config.constants import GAS_LIMITS
from config.constants import MULTICHAIN_CONFIG
from config.enums import Network
from src.harvester import IHarvester
from src.misc_utils import hours
from src.misc_utils import seconds_to_blocks
from src.tx_utils import get_effective_gas_price
from src.tx_utils import get_gas_price_of_tx
from src.tx_utils import get_priority_fee
from src.web3_utils import confirm_transaction
from src.utils import get_abi
from src.discord_utils import get_hash_from_failed_tx_error
from src.web3_utils import get_last_harvest_times
from src.token_utils import get_token_price
from src.discord_utils import send_error_to_discord
from src.discord_utils import send_success_to_discord
logging.basicConfig(level=logging.INFO)
MAX_TIME_BETWEEN_HARVESTS = hours(120)
HARVEST_THRESHOLD = 0.0005 # min ratio of want to total vault AUM required to harvest
NUM_FLASHBOTS_BUNDLES = 6
class GeneralHarvester(IHarvester):
def __init__(
self,
chain: Network = Network.Ethereum,
web3: Web3 = None,
keeper_acl: str = os.getenv("KEEPER_ACL"),
keeper_address: str = os.getenv("KEEPER_ADDRESS"),
keeper_key: str = os.getenv("KEEPER_KEY"),
base_oracle_address: str = os.getenv("ETH_USD_CHAINLINK"),
use_flashbots: bool = False,
discord_url: str = None,
):
self.logger = logging.getLogger(__name__)
self.chain = chain
self.web3 = web3
self.keeper_key = keeper_key
self.keeper_address = keeper_address
self.keeper_acl: Contract = self.web3.eth.contract(
address=self.web3.toChecksumAddress(keeper_acl),
abi=get_abi(self.chain, "keeper_acl"),
)
self.base_usd_oracle: Contract = self.web3.eth.contract(
address=self.web3.toChecksumAddress(base_oracle_address),
abi=get_abi(self.chain, "oracle"),
)
# Times of last harvest
if self.chain in [Network.Ethereum, Network.Fantom]:
self.last_harvest_times = get_last_harvest_times(
self.web3,
self.keeper_acl,
start_block=self.web3.eth.block_number
- seconds_to_blocks(MAX_TIME_BETWEEN_HARVESTS),
chain=self.chain,
)
else:
# Don't care about poly/arbitrum
self.last_harvest_times = {}
self.use_flashbots = use_flashbots
self.discord_url = discord_url
def is_time_to_harvest(
self,
strategy: contract.Contract,
harvest_interval_threshold: int = MAX_TIME_BETWEEN_HARVESTS,
) -> bool:
"""Calculates the time between harvests for the supplied strategy and returns true if
it has been longer than the supplied harvest_interval_threshold which is measured in seconds
Args:
strategy (contract): Vault strategy web3 contract object
harvest_interval_threshold (int, optional):
Amount of time in seconds that is acceptable to not have harvested within.
Defaults to MAX_TIME_BETWEEN_HARVESTS.
Returns:
bool: True if time since last harvest is > harvest_interval_threshold, else False
"""
# Only care about harvest gas costs on eth
if self.chain not in [Network.Ethereum, Network.Fantom]:
return True
try:
last_harvest = self.last_harvest_times[strategy.address]
current_time = self.web3.eth.get_block("latest")["timestamp"]
self.logger.info(
f"Time since last harvest: {(current_time - last_harvest) / 3600}"
)
return current_time - last_harvest > harvest_interval_threshold
except KeyError:
return True
def harvest(
self,
strategy: contract.Contract,
):
"""Orchestration function that harvests outstanding rewards.
Args:
strategy (contract)
Raises:
ValueError: If the keeper isn't whitelisted, throw an error and alert user.
"""
strategy_name = strategy.functions.getName().call()
# TODO: update for ACL
if not self.__is_keeper_whitelisted("harvest"):
raise ValueError("Keeper ACL is not whitelisted for calling harvest")
want_address = strategy.functions.want().call()
want = self.web3.eth.contract(
address=want_address,
abi=get_abi(self.chain, "erc20"),
)
vault_balance = want.functions.balanceOf(strategy.address).call()
self.logger.info(f"vault balance: {vault_balance}")
want_to_harvest = (
self.estimate_harvest_amount(strategy)
/ 10 ** want.functions.decimals().call()
)
self.logger.info(f"estimated want change: {want_to_harvest}")
# TODO: figure out how to handle profit estimation
# current_price_eth = self.get_current_rewards_price()
# self.logger.info(f"current rewards price per token (ETH): {current_price_eth}")
gas_fee = self.estimate_gas_fee(strategy.address)
self.logger.info(f"estimated gas cost: {gas_fee}")
# for now we'll just harvest every hour
should_harvest = self.is_profitable()
self.logger.info(f"Should we harvest: {should_harvest}")
if should_harvest:
self.__process_harvest(
strategy=strategy,
strategy_name=strategy_name,
)
def harvest_no_return(
self,
strategy: contract,
):
strategy_name = strategy.functions.getName().call()
# TODO: update for ACL
if not self.__is_keeper_whitelisted("harvestNoReturn"):
raise ValueError(
"Keeper ACL is not whitelisted for calling harvestNoReturn"
)
want_address = strategy.functions.want().call()
want = self.web3.eth.contract(
address=want_address,
abi=get_abi(self.chain, "erc20"),
)
vault_balance = want.functions.balanceOf(strategy.address).call()
self.logger.info(f"vault balance: {vault_balance}")
# TODO: figure out how to handle profit estimation
# current_price_eth = self.get_current_rewards_price()
# self.logger.info(f"current rewards price per token (ETH): {current_price_eth}")
gas_fee = self.estimate_gas_fee(strategy.address, returns=False)
self.logger.info(f"estimated gas cost: {gas_fee}")
# for now we'll just harvest every hour
should_harvest = self.is_profitable()
self.logger.info(f"Should we harvest: {should_harvest}")
if should_harvest:
self.__process_harvest(
strategy=strategy,
strategy_name=strategy_name,
)
def harvest_rewards_manager(
self,
strategy: contract,
):
strategy_name = strategy.functions.getName().call()
self.keeper_acl = self.web3.eth.contract(
address=self.web3.toChecksumAddress(
MULTICHAIN_CONFIG[self.chain]["rewards_manager"]
),
abi=get_abi(self.chain, "rewards_manager"),
)
if not self.__is_keeper_whitelisted("rewards_manager"):
raise ValueError(f"Keeper is not whitelisted for {strategy_name}")
want_address = strategy.functions.want().call()
want = self.web3.eth.contract(
address=want_address,
abi=get_abi(self.chain, "erc20"),
)
vault_balance = want.functions.balanceOf(strategy.address).call()
self.logger.info(f"vault balance: {vault_balance}")
gas_fee = self.estimate_gas_fee(strategy.address)
self.logger.info(f"estimated gas cost: {gas_fee}")
self.__process_harvest(
strategy=strategy,
strategy_name=strategy_name,
)
def harvest_mta(
self,
voter_proxy: contract,
):
# TODO: update for ACL
if not self.__is_keeper_whitelisted("harvestMta"):
raise ValueError("Keeper ACL is not whitelisted for calling harvestMta")
gas_fee = self.estimate_gas_fee(voter_proxy.address, function="harvestMta")
self.logger.info(f"estimated gas cost: {gas_fee}")
should_harvest_mta = self.is_profitable()
self.logger.info(f"Should we call harvestMta: {should_harvest_mta}")
if should_harvest_mta:
self.__process_harvest_mta(voter_proxy)
def tend(self, strategy: contract):
strategy_name = strategy.functions.getName().call()
# TODO: update for ACL
if not self.__is_keeper_whitelisted("tend"):
raise ValueError("Keeper ACL is not whitelisted for calling tend")
# TODO: figure out how to handle profit estimation
# current_price_eth = self.get_current_rewards_price()
# self.logger.info(f"current rewards price per token (ETH): {current_price_eth}")
gas_fee = self.estimate_gas_fee(strategy.address, function="tend")
self.logger.info(f"estimated gas cost: {gas_fee}")
self.__process_tend(
strategy=strategy,
strategy_name=strategy_name,
)
def tend_then_harvest(self, strategy: contract):
self.tend(strategy)
sleep(60)
self.harvest(strategy)
def estimate_harvest_amount(self, strategy: contract) -> Decimal:
want = self.web3.eth.contract(
address=strategy.functions.want().call(),
abi=get_abi(self.chain, "erc20"),
)
want_gained = self.keeper_acl.functions.harvest(strategy.address).call(
{"from": self.keeper_address}
)
# call badger api to get prices
currency = BASE_CURRENCIES[self.chain]
if self.chain == Network.Fantom:
price_per_want = get_token_price(
want.address, currency, self.chain, use_staging=True
)
else:
price_per_want = get_token_price(want.address, currency, self.chain)
self.logger.info(f"price per want: {price_per_want} {currency}")
self.logger.info(f"want gained: {want_gained}")
if type(want_gained) is list:
want_gained = 0
return price_per_want * want_gained
def is_profitable(self) -> bool:
# TODO: Implement this
# harvest if ideal want change is > 0.05% of total vault assets
# should_harvest = want_to_harvest / vault_balance >= HARVEST_THRESHOLD
return True
def __is_keeper_whitelisted(self, function: str) -> bool:
"""Checks if the bot we're using is whitelisted for the strategy.
Returns:
bool: True if our bot is whitelisted to make function calls, False otherwise.
"""
if function in ["harvest", "harvestMta"]:
key = self.keeper_acl.functions.HARVESTER_ROLE().call()
elif function == "tend":
key = self.keeper_acl.functions.TENDER_ROLE().call()
elif function == "rewards_manager":
key = self.keeper_acl.functions.KEEPER_ROLE().call()
return self.keeper_acl.functions.hasRole(key, self.keeper_address).call()
def __process_tend(
self,
strategy: contract = None,
strategy_name: str = None,
):
try:
tx_hash = self.__send_tend_tx(strategy)
succeeded, _ = confirm_transaction(self.web3, tx_hash)
if succeeded:
gas_price_of_tx = get_gas_price_of_tx(
self.web3, self.base_usd_oracle, tx_hash, self.chain
)
self.logger.info(f"got gas price of tx: {gas_price_of_tx}")
send_success_to_discord(
tx_type=f"Tend {strategy_name}",
tx_hash=tx_hash,
gas_cost=gas_price_of_tx,
chain=self.chain,
url=self.discord_url,
)
elif tx_hash != HexBytes(0):
send_success_to_discord(
tx_type=f"Tend {strategy_name}",
tx_hash=tx_hash,
chain=self.chain,
url=self.discord_url,
)
except Exception as e:
self.logger.error(f"Error processing tend tx: {e}")
send_error_to_discord(
strategy_name,
"Tend",
error=e,
chain=self.chain,
keeper_address=self.keeper_address,
)
def __process_harvest(
self,
strategy: contract = None,
strategy_name: str = None,
harvested: Decimal = None,
returns: bool = True,
):
"""Private function to create, broadcast, confirm tx on eth and then send
transaction to Discord for monitoring
Args:
strategy (contract, optional): Defaults to None.
strategy_name (str, optional): Defaults to None.
harvested (Decimal, optional): Amount of Sushi harvested. Defaults to None.
"""
try:
tx_hash, max_target_block = self.__send_harvest_tx(
strategy, returns=returns
)
succeeded, msg = confirm_transaction(
self.web3, tx_hash, max_block=max_target_block
)
if succeeded:
# If successful, update last harvest harvest
# time to make sure we don't double harvest
self.update_last_harvest_time(strategy.address)
gas_price_of_tx = get_gas_price_of_tx(
self.web3, self.base_usd_oracle, tx_hash, self.chain
)
self.logger.info(f"got gas price of tx: {gas_price_of_tx}")
send_success_to_discord(
tx_type=f"Harvest {strategy_name}",
tx_hash=tx_hash,
gas_cost=gas_price_of_tx,
chain=self.chain,
url=self.discord_url,
)
elif tx_hash != HexBytes(0):
if not self.use_flashbots:
# And if pending
self.update_last_harvest_time(strategy.address)
send_success_to_discord(
tx_type=f"Harvest {strategy_name}",
tx_hash=tx_hash,
chain=self.chain,
url=self.discord_url,
)
else:
send_error_to_discord(
strategy_name,
"Harvest",
tx_hash=tx_hash,
message=msg,
chain=self.chain,
keeper_address=self.keeper_address,
)
except Exception as e:
self.logger.error(f"Error processing harvest tx: {e}")
send_error_to_discord(
strategy_name,
"Harvest",
error=e,
chain=self.chain,
keeper_address=self.keeper_address,
)
def __process_harvest_mta(
self,
voter_proxy: contract,
):
"""Private function to create, broadcast, confirm tx on eth and then send
transaction to Discord for monitoring
Args:
voter_proxy (contract): Mstable voter proxy contract
"""
try:
tx_hash = self.__send_harvest_mta_tx(voter_proxy)
succeeded, _ = confirm_transaction(self.web3, tx_hash)
if succeeded:
# If successful, update last harvest harvest time
self.update_last_harvest_time(voter_proxy.address)
gas_price_of_tx = get_gas_price_of_tx(
self.web3, self.base_usd_oracle, tx_hash, self.chain
)
self.logger.info(f"got gas price of tx: {gas_price_of_tx}")
send_success_to_discord(
tx_type="Harvest MTA",
tx_hash=tx_hash,
gas_cost=gas_price_of_tx,
chain=self.chain,
url=self.discord_url,
)
elif tx_hash != HexBytes(0):
send_success_to_discord(
tx_type="Harvest MTA",
tx_hash=tx_hash,
chain=self.chain,
url=self.discord_url,
)
except Exception as e:
self.logger.error(f"Error processing harvestMta tx: {e}")
send_error_to_discord(
"",
"Harvest MTA",
error=e,
chain=self.chain,
keeper_address=self.keeper_address,
)
def __send_harvest_tx(self, strategy: contract, returns: bool = True) -> HexBytes:
"""Sends transaction to ETH node for confirmation.
Args:
strategy (contract)
Raises:
Exception: If we have an issue sending transaction (unable to communicate with
node, etc.) we log the error and return a tx_hash of 0x00.
Returns:
HexBytes: Transaction hash for transaction that was sent.
"""
max_target_block = None
tx_hash = HexBytes(0)
try:
tx = self.__build_transaction(strategy.address, returns=returns)
signed_tx = self.web3.eth.account.sign_transaction(
tx, private_key=self.keeper_key
)
tx_hash = signed_tx.hash
if not self.use_flashbots:
self.web3.eth.send_raw_transaction(signed_tx.rawTransaction)
else:
bundle = [
{"signed_transaction": signed_tx.rawTransaction},
]
block_number = self.web3.eth.block_number
for i | |
from center of images.
Parameters
----------
crop_size : tuple, list or ndarray of int
(z,y,x)-shape of central crop along three axes(z,y,x order is used).
crop_mask : bool
if True, crop the mask in the same way.
Returns
-------
batch
"""
crop_size = np.asarray(crop_size).reshape(-1)
crop_halfsize = np.rint(crop_size / 2)
img_shapes = [np.asarray(self.get(i, 'images').shape)
for i in range(len(self))]
if any(np.any(shape < crop_size) for shape in img_shapes):
raise ValueError(
"Crop size must be smaller than size of inner 3D images")
cropped_images = []
cropped_masks = []
for i in range(len(self)):
image = self.get(i, 'images')
cropped_images.append(make_central_crop(image, crop_size))
if crop_mask and self.masks is not None:
mask = self.get(i, 'masks')
cropped_masks.append(make_central_crop(mask, crop_size))
self._bounds = np.cumsum([0] + [crop_size[0]] * len(self))
self.images = np.concatenate(cropped_images, axis=0)
if crop_mask and self.masks is not None:
self.masks = np.concatenate(cropped_masks, axis=0)
# recalculate origin, refresh nodules_info, leave only relevant nodules
self.origin = self.origin + self.spacing * crop_halfsize
if self.nodules is not None:
self._refresh_nodules_info()
self._filter_nodules_info()
return self
def flip(self):
""" Invert the order of slices for each patient
Returns
-------
batch
Examples
--------
>>> batch = batch.flip()
"""
message = ("There is no implementation of flip method for class " +
"CTIMagesMaskedBatch. Nothing happened")
logger.warning(message)
return self
@action
def binarize_mask(self, threshold=0.35):
""" Binarize masks by threshold.
Parameters
----------
threshold : float
threshold for masks binarization.
"""
self.masks *= np.asarray(self.masks > threshold, dtype=np.int)
return self
@action
def predict_on_scan(self, model, strides=(16, 32, 32), crop_shape=(32, 64, 64),
batch_size=4, targets_mode='segmentation', data_format='channels_last',
show_progress=True, model_type='tf', dst='masks'):
""" Get predictions of the model on data contained in batch.
Transforms scan data into patches of shape crop_shape and then feed
this patches sequentially into model with name specified by
argument 'model'; after that loads predicted masks or probabilities
into 'masks' component of the current batch and returns it.
Parameters
----------
model : str
name of model that will be used for predictions
or callable (model_type must be 'callable').
strides : tuple, list or ndarray of int
(z,y,x)-strides for patching operation.
crop_shape : tuple, list or ndarray of int
(z,y,x)-shape of crops.
batch_size : int
number of patches to feed in model in one iteration.
targets_mode: str
type of targets 'segmentation', 'regression' or 'classification'.
data_format: str
format of neural network input data,
can be 'channels_first' or 'channels_last'.
model_type : str
represents type of model that will be used for prediction.
Possible values are 'keras', 'tf' or 'callable'.
Returns
-------
CTImagesMaskedBatch.
"""
if model_type not in ('tf', 'keras', 'callable'):
raise ValueError("Argument 'model_type' must be one of ['tf', 'keras', 'callable']")
if model_type in ('keras', 'tf') and isinstance(model, str):
_model = self.get_model_by_name(model)
elif callable(model):
_model = model
else:
raise ValueError("Argument 'model' must be str or callable. "
+ " If callable then 'model_type' argument's value "
+ "must be set to 'callable'")
crop_shape = np.asarray(crop_shape).reshape(-1)
strides = np.asarray(strides).reshape(-1)
patches_arr = self.get_patches(patch_shape=crop_shape,
stride=strides,
padding='reflect')
if data_format == 'channels_first':
patches_arr = patches_arr[:, np.newaxis, ...]
elif data_format == 'channels_last':
patches_arr = patches_arr[..., np.newaxis]
predictions = []
iterations = range(0, patches_arr.shape[0], batch_size)
if show_progress:
iterations = tqdm_notebook(iterations) # pylint: disable=bad-option-value
for i in iterations:
if model_type == 'tf':
_prediction = _model.predict(feed_dict={'images': patches_arr[i: i + batch_size, ...]})
elif model_type == 'keras':
_prediction = _model.predict(patches_arr[i: i + batch_size, ...])
elif model_type == 'callable':
_prediction = _model(patches_arr[i: i + batch_size, ...])
current_prediction = np.asarray(_prediction)
if targets_mode == 'classification':
current_prediction = np.stack([np.ones(shape=(crop_shape)) * prob
for prob in current_prediction.ravel()])
if targets_mode == 'regression':
current_prediction = create_mask_reg(current_prediction[:, :3],
current_prediction[:, 3:6],
current_prediction[:, 6],
crop_shape, 0.01)
predictions.append(current_prediction)
patches_mask = np.concatenate(predictions, axis=0)
patches_mask = np.squeeze(patches_mask)
self.load_from_patches(patches_mask, stride=strides,
scan_shape=tuple(self.images_shape[0, :]),
data_attr=dst)
return self
def unpack(self, component='images', **kwargs):
""" Basic way for unpacking components from batch.
Parameters
----------
component : str
component to unpack, can be 'images' or 'masks'.
data_format : 'channels_last' or 'channels_first' or None
Reflects where to put channels dimension: right after batch dimension or after all spatial axes
or do not put it all if None.
kwargs : dict
key-word arguments that will be passed in callable if
component argument reffers to method of batch class.
Returns
-------
ndarray(batch_size, ...) or None
"""
if not hasattr(self, component):
return None
if component in ('images', 'masks'):
data_format = kwargs.get('data_format')
if np.all(self.images_shape == self.images_shape[0, :]):
value = self.get(None, component).reshape(-1, *self.images_shape[0, :])
else:
value = np.stack([self.get(i, component) for i in range(len(self))])
if data_format is None:
pass
elif data_format == 'channels_last':
value = value[..., np.newaxis]
elif data_format == 'channels_first':
value = value[:, np.newaxis, ...]
else:
attr_value = getattr(self, component)
if callable(attr_value):
value = attr_value(**kwargs)
else:
value = attr_value
return value
def classification_targets(self, threshold=10, **kwargs):
""" Unpack data from batch in format suitable for classification task.
Parameters
----------
threshold : int
minimum number of '1' pixels in mask to consider it cancerous.
Returns
-------
ndarray(batch_size, 1)
targets for classification task: labels corresponding to cancerous
nodules ('1') and non-cancerous nodules ('0').
"""
masks_labels = np.asarray([self.get(i, 'masks').sum() > threshold
for i in range(len(self))], dtype=np.int)
return masks_labels[..., np.newaxis]
def regression_targets(self, threshold=10, **kwargs):
""" Unpack data from batch in format suitable for regression task.
Parameters
----------
threshold : int
minimum number of '1' pixels in mask to consider it cancerous.
Returns
-------
ndarray(batch_size, 7)
targets for regression task: cancer center, size
and label(1 for cancerous and 0 for non-cancerous). Note that in case
of non-cancerous crop first 6 column of output array will be set to zero.
"""
nodules = self.nodules
sizes = np.zeros(shape=(len(self), 3), dtype=np.float)
centers = np.zeros(shape=(len(self), 3), dtype=np.float)
for item_pos, _ in enumerate(self.indices):
item_nodules = nodules[nodules.patient_pos == item_pos]
if len(item_nodules) == 0:
continue
mask_nod_indices = item_nodules.nodule_size.max(axis=1).argmax()
nodule_sizes = (item_nodules.nodule_size / self.spacing[item_pos, :]
/ self.images_shape[item_pos, :])
nodule_centers = (item_nodules.nodule_center / self.spacing[item_pos, :]
/ self.images_shape[item_pos, :])
sizes[item_pos, :] = nodule_sizes[mask_nod_indices, :]
centers[item_pos, :] = nodule_centers[mask_nod_indices, :]
labels = self.unpack('classification_targets', threshold=threshold)
reg_targets = np.concatenate([centers, sizes, labels], axis=1)
return reg_targets
def segmentation_targets(self, data_format='channels_last', **kwargs):
""" Unpack data from batch in format suitable for regression task.
Parameters
----------
data_format : str
data_format shows where to put new axis for channels dimension:
can be 'channels_last' or 'channels_first'.
Returns
-------
ndarray(batch_size, ...)
batch array with masks.
"""
return self.unpack('masks', data_format=data_format)
@staticmethod
def make_data_tf(batch, model=None, mode='segmentation', is_training=True, **kwargs):
""" Prepare data in batch for training neural network implemented in tensorflow.
Parameters
----------
mode : str
mode can be one of following 'classification', 'regression'
or 'segmentation'. Default is 'segmentation'.
data_format : str
data format batch data. Can be 'channels_last'
or 'channels_first'. Default is 'channels_last'.
is_training : bool
whether model is in training or prediction mode. Default is True.
threshold : int
threshold value of '1' pixels in masks to consider it cancerous.
Default is 10.
Returns
-------
dict or None
feed dict and fetches for training neural network.
"""
inputs = batch.unpack('images', **kwargs)
if mode in ['segmentation', 'classification', 'regression']:
labels = batch.unpack(mode + '_targets', **kwargs)
else:
raise ValueError("Argument 'mode' must have one of values: "
+ "'segmentation', 'classification' or 'regression'")
feed_dict = dict(images=inputs, labels=labels) if is_training else dict(images=inputs)
return dict(feed_dict=feed_dict, fetches=None)
@staticmethod
def make_data_keras(batch, model=None, mode='segmentation', is_training=True, **kwargs):
""" Prepare data in batch for training neural network implemented in keras.
Parameters
----------
mode : str
mode can be one of following 'classification', 'regression'
or 'segmentation'. Default is 'segmentation'.
data_format : str
data format batch data. Can be 'channels_last'
or 'channels_first'. Default is 'channels_last'.
is_training : bool
whether model is in training or prediction mode. Default is True.
threshold : int
threshold value of '1' pixels in masks to consider it cancerous.
Default is 10.
Returns
-------
dict or None
kwargs for keras model train method:
{'x': ndarray(...), 'y': ndarrray(...)} for training neural network.
"""
inputs = batch.unpack('images', **kwargs)
if mode in ['segmentation', 'classification', 'regression']:
labels = batch.unpack(mode + '_targets', **kwargs)
else:
raise ValueError("Argument 'mode' must have one of values: "
+ "'segmentation', 'classification' or 'regression'")
return dict(x=inputs, y=labels) if is_training else dict(x=inputs)
@action
def mix_images(self, p=0.8, mode='sum', mix_masks=True):
""" Mix images and masks.
Parameters
----------
p : float in (0, 1)
weight of the | |
= open(self._manip_file, 'rb')
self._front = ABS_PATH + '/files/front.jpg'
self.front = open(self._front, 'rb')
self._side = ABS_PATH + '/files/side.jpg'
self.side = open(self._side, 'rb')
self._top = ABS_PATH + '/files/top.jpg'
self.top = open(self._top, 'rb')
self._choice0sm = ABS_PATH + '/files/choice0sm.jpg'
self.choice0sm = open(self._choice0sm, 'rb')
self._choice0big = ABS_PATH + '/files/choice0big.jpg'
self.choice0big = open(self._choice0big, 'rb')
self._choice1sm = ABS_PATH + '/files/choice1sm.jpg'
self.choice1sm = open(self._choice1sm, 'rb')
self._choice1big = ABS_PATH + '/files/choice1big.jpg'
self.choice1big = open(self._choice1big, 'rb')
def tearDown(self):
"""
"""
super(Ortho3DTests, self).tearDown()
self.manip.close()
self.front.close()
self.side.close()
self.top.close()
self.choice0big.close()
self.choice0sm.close()
self.choice1big.close()
self.choice1sm.close()
class Ortho3DLabelFacesTests(Ortho3DTests):
def add_item(self, bank):
form = bank.get_item_form_for_create([])
form.display_name = 'a test item!'
form.description = 'for testing with'
new_item = bank.create_item(form)
question_form = bank.get_question_form_for_create(new_item.ident,
[self.question_type])
question_form.display_name = 'Question for ' + new_item.display_name.text
question_form.description = ''
question_form.set_text('Please answer the question.')
question_form.set_manip(DataInputStream(self.manip))
question_form.set_ortho_view_set(front_view=DataInputStream(self.front),
side_view=DataInputStream(self.side),
top_view=DataInputStream(self.top))
new_question = bank.create_question(question_form)
answer_form = bank.get_answer_form_for_create(new_item.ident,
[self.answer_type])
answer_form.display_name = 'Answer for ' + new_item.display_name.text
answer_form.set_face_values(front_face_value=self.answer['frontFaceValue'],
side_face_value=self.answer['sideFaceValue'],
top_face_value=self.answer['topFaceValue'])
new_answer = bank.create_answer(answer_form)
item = bank.get_item(new_item.ident)
return item
def setUp(self):
super(Ortho3DLabelFacesTests, self).setUp()
self.question_type = Type('question-record-type%3Alabel-ortho-faces%40ODL.MIT.EDU')
self.answer_type = Type('answer-record-type%3Alabel-ortho-faces%40ODL.MIT.EDU')
self.answer = {
"frontFaceValue": 1,
"sideFaceValue": 2,
"topFaceValue": 3
}
self._item = self.add_item(self._bank)
self._taken = self.create_taken_for_items(self._bank, [self._item])
def tearDown(self):
"""
"""
super(Ortho3DLabelFacesTests, self).tearDown()
def test_can_submit_wrong_answer(self):
wrong_response = {
"frontFaceValue": 0,
"sideFaceValue": 1,
"topFaceValue": 2
}
first_section = self._bank.get_first_assessment_section(self._taken.ident)
null_response = self._bank.get_response(first_section.ident, self._item.ident)
try:
null_response.object_map
except errors.IllegalState:
pass
else:
self.fail('null response did not throw illegal state')
response_form = self._bank.get_response_form(assessment_section_id=first_section.ident,
item_id=self._item.ident)
response_form.set_face_values(front_face_value=wrong_response['frontFaceValue'],
side_face_value=wrong_response['sideFaceValue'],
top_face_value=wrong_response['topFaceValue'])
self._bank.submit_response(first_section.ident, self._item.ident, response_form)
response = self._bank.get_response(first_section.ident, self._item.ident).object_map
for key, val in response['integerValues'].items():
self.assertEqual(
val,
wrong_response[key]
)
def test_can_submit_right_answer(self):
first_section = self._bank.get_first_assessment_section(self._taken.ident)
null_response = self._bank.get_response(first_section.ident, self._item.ident)
try:
null_response.object_map
except errors.IllegalState:
pass
else:
self.fail('Did not raise illegal state on unanswered response')
response_form = self._bank.get_response_form(assessment_section_id=first_section.ident,
item_id=self._item.ident)
response_form.set_face_values(front_face_value=self.answer['frontFaceValue'],
side_face_value=self.answer['sideFaceValue'],
top_face_value=self.answer['topFaceValue'])
self._bank.submit_response(first_section.ident, self._item.ident, response_form)
response = self._bank.get_response(first_section.ident, self._item.ident).object_map
for key, val in response['integerValues'].items():
self.assertEqual(
val,
self.answer[key]
)
def test_taken_question_contains_expected_files(self):
first_section = self._bank.get_first_assessment_section(self._taken.ident)
question = self._bank.get_question(first_section.ident, self._item.ident)
self.reset_files()
test_cases = [(question.get_manip(), self.manip),
(question.get_front_view(), self.front),
(question.get_side_view(), self.side),
(question.get_top_view(), self.top)]
for case in test_cases:
self.assertEqual(
case[1].read(),
case[0].read()
)
def test_responding_to_one_question_makes_next_one_available(self):
self.reset_files()
self._item2 = self.add_item(self._bank)
self._taken2 = self.create_taken_for_items(self._bank, [self._item, self._item2])
first_section = self._bank.get_first_assessment_section(self._taken2.ident)
first_question = self._bank.get_first_unanswered_question(first_section.ident)
response_form = self._bank.get_response_form(assessment_section_id=first_section.ident,
item_id=first_question.ident)
response_form.set_face_values(front_face_value=self.answer['frontFaceValue'],
side_face_value=self.answer['sideFaceValue'],
top_face_value=self.answer['topFaceValue'])
self._bank.submit_response(first_section.ident, first_question.ident, response_form)
second_question = self._bank.get_first_unanswered_question(first_section.ident)
self.assertNotEqual(
str(first_question.ident),
str(second_question.ident)
)
def test_can_set_first_angle_on_item(self):
self.assertFalse(self._item.object_map['question']['firstAngle'])
q_edit_form = self._bank.get_question_form_for_update(self._item.ident)
q_edit_form.set_first_angle_projection(True)
updated_q = self._bank.update_question(q_edit_form)
item = self._bank.get_item(self._item.ident)
item_map = item.object_map
self.assertTrue(item_map['question']['firstAngle'])
def test_can_update_single_ovs_view(self):
question = self._bank.get_item(self._item.ident).get_question()
top_view = question.get_top_view()
self.check_files(top_view, self.top)
top_view.close()
update_form = self._bank.get_question_form_for_update(self._item.ident)
update_form.set_ovs_view(DataInputStream(self.choice0big), 'topView')
self._bank.update_question(update_form)
question = self._bank.get_item(self._item.ident).get_question()
top_view = question.get_top_view()
self.check_files(top_view, self.choice0big)
top_view.close()
class Ortho3DMultiChoiceTests(Ortho3DTests):
def add_item(self, bank):
form = bank.get_item_form_for_create([])
form.display_name = 'a test item!'
form.description = 'for testing with'
new_item = bank.create_item(form)
question_form = bank.get_question_form_for_create(new_item.ident,
[self.question_type])
question_form.display_name = 'Question for ' + new_item.display_name.text
question_form.description = ''
question_form.set_text('Please answer the question.')
new_question = bank.create_question(question_form)
answer_form = bank.get_answer_form_for_create(new_item.ident,
[self.answer_type])
answer_form.display_name = 'Answer for ' + new_item.display_name.text
new_answer = bank.create_answer(answer_form)
item = bank.get_item(new_item.ident)
return item
def get_asset_content_by_genus_type(self, asset_id, genus_type):
mgr = get_manager(self.req, 'repository')
repo = mgr.get_repository(self._bank.ident)
asset = repo.get_asset(asset_id)
ac = None
for content in asset.get_asset_contents():
if content.genus_type == genus_type:
return content
return ac
def setUp(self):
super(Ortho3DMultiChoiceTests, self).setUp()
self.question_type = Type('question-record-type%3Amulti-choice-ortho%40ODL.MIT.EDU')
self.answer_type = Type('answer-record-type%3Amulti-choice-ortho%40ODL.MIT.EDU')
self.answer = {
"choiceId": 2
}
self._item = self.add_item(self._bank)
self._taken = self.create_taken_for_items(self._bank, [self._item])
def tearDown(self):
"""
"""
super(Ortho3DMultiChoiceTests, self).tearDown()
def test_can_set_manip_with_right_answer_choice_files(self):
question_map = self._item.object_map['question']
self.assertEqual(
{},
question_map['fileIds']
)
update_form = self._bank.get_question_form_for_update(self._item.ident)
update_form.set_manip(DataInputStream(self.manip),
DataInputStream(self.choice0sm),
DataInputStream(self.choice0big),
'manip')
self._bank.update_question(update_form)
question = self._bank.get_item(self._item.ident).get_question()
question_manip = question.get_manip()
self.check_files(question_manip, self.manip)
question_manip.close()
repo = self.get_repo(self._bank.ident)
manip_asset = repo.get_asset(question.get_manip_id())
self.assertEqual(
manip_asset.get_asset_contents().available(),
3
)
def test_can_set_manip_without_right_answer_choice_files(self):
question_map = self._item.object_map['question']
self.assertEqual(
{},
question_map['fileIds']
)
update_form = self._bank.get_question_form_for_update(self._item.ident)
update_form.set_manip(DataInputStream(self.manip),
name='manip')
self._bank.update_question(update_form)
question = self._bank.get_item(self._item.ident).get_question()
question_manip = question.get_manip()
self.check_files(question_manip, self.manip)
question_manip.close()
repo = self.get_repo(self._bank.ident)
manip_asset = repo.get_asset(question.get_manip_id())
self.assertEqual(
manip_asset.get_asset_contents().available(),
1
)
def test_invalid_argument_thrown_if_non_datainputstream_passed_in(self):
update_form = self._bank.get_question_form_for_update(self._item.ident)
self.assertRaises(errors.InvalidArgument,
update_form.set_manip,
self.manip)
self.assertRaises(errors.InvalidArgument,
update_form.set_ortho_choice,
self.choice0sm,
DataInputStream(self.choice0big))
self.assertRaises(errors.InvalidArgument,
update_form.set_ortho_choice,
DataInputStream(self.choice0sm),
self.choice0big)
def test_can_set_choice_files(self):
question_map = self._item.object_map['question']
self.assertEqual(
{},
question_map['fileIds']
)
update_form = self._bank.get_question_form_for_update(self._item.ident)
update_form.set_ortho_choice(small_asset_data=DataInputStream(self.choice0sm),
large_asset_data=DataInputStream(self.choice0big),
name='Choice 1')
self._bank.update_question(update_form)
question = self._bank.get_item(self._item.ident).get_question()
choice_files = question.get_files()
self.assertEqual(
len(choice_files['choices']),
1
)
asset_id = Id(question.object_map['choices'][0]['assetId'])
small_ac = self.get_asset_content_by_genus_type(asset_id,
OV_SET_SMALL_ASSET_CONTENT_TYPE)
large_ac = self.get_asset_content_by_genus_type(asset_id,
OV_SET_LARGE_ASSET_CONTENT_TYPE)
choice = choice_files['choices'][0]
self.is_streamable_url(choice['largeOrthoViewSet'])
self.is_streamable_url(choice['smallOrthoViewSet'])
self.assertNotEqual(
choice['largeOrthoViewSet'],
choice['smallOrthoViewSet']
)
self.assertEqual(
choice['name'],
'Choice 1'
)
self.assertIn(
str(large_ac.ident),
choice['largeOrthoViewSet']
)
self.assertIn(
str(small_ac.ident),
choice['smallOrthoViewSet']
)
def test_can_set_answer(self):
update_form = self._bank.get_question_form_for_update(self._item.ident)
update_form.set_ortho_choice(small_asset_data=DataInputStream(self.choice0sm),
large_asset_data=DataInputStream(self.choice0big),
name='Choice 1')
self._bank.update_question(update_form)
choice_id = self._bank.get_item(self._item.ident).get_question().object_map['choices'][0]['id']
answer_id = next(self._item.get_answer_ids())
update_form = self._bank.get_answer_form_for_update(answer_id)
update_form.add_choice_id(choice_id)
self._bank.update_answer(update_form)
answer_map = self._bank.get_item(self._item.ident).get_answers().next().object_map
self.assertEqual(
choice_id,
answer_map['choiceIds'][0]
)
def test_can_set_review_options(self):
def update_offered(offered_id, payload):
offering_form = self._bank.get_assessment_offered_form_for_update(offered_id)
offering_form.set_review_whether_correct(**payload)
return self._bank.update_assessment_offered(offering_form)
assessment2 = self.create_assessment_for_items(self._bank, [self._item])
reviewable_type = Type(**{
'authority': 'MOODLE.ORG',
'namespace': 'assessment-offered-record-type',
'identifier': 'review-options'
})
offering_form = self._bank.get_assessment_offered_form_for_create(assessment2.ident,
[reviewable_type])
new_offered = self._bank.create_assessment_offered(offering_form)
offered = self._bank.get_assessment_offered(new_offered.ident).object_map
for option, val in offered['reviewOptions']['whetherCorrect'].items():
self.assertTrue(val)
update_offered(new_offered.ident, {'during_attempt': False})
offered = self._bank.get_assessment_offered(new_offered.ident).object_map
self.assertFalse(offered['reviewOptions']['whetherCorrect']['duringAttempt'])
self.assertTrue(offered['reviewOptions']['whetherCorrect']['afterAttempt'])
self.assertTrue(offered['reviewOptions']['whetherCorrect']['beforeDeadline'])
self.assertTrue(offered['reviewOptions']['whetherCorrect']['afterDeadline'])
update_offered(new_offered.ident, {'after_attempt': False})
offered = self._bank.get_assessment_offered(new_offered.ident).object_map
self.assertFalse(offered['reviewOptions']['whetherCorrect']['duringAttempt'])
self.assertFalse(offered['reviewOptions']['whetherCorrect']['afterAttempt'])
self.assertTrue(offered['reviewOptions']['whetherCorrect']['beforeDeadline'])
self.assertTrue(offered['reviewOptions']['whetherCorrect']['afterDeadline'])
update_offered(new_offered.ident, {'before_deadline': False})
offered = self._bank.get_assessment_offered(new_offered.ident).object_map
self.assertFalse(offered['reviewOptions']['whetherCorrect']['duringAttempt'])
self.assertFalse(offered['reviewOptions']['whetherCorrect']['afterAttempt'])
self.assertFalse(offered['reviewOptions']['whetherCorrect']['beforeDeadline'])
self.assertTrue(offered['reviewOptions']['whetherCorrect']['afterDeadline'])
update_offered(new_offered.ident, {'after_deadline': False})
offered = self._bank.get_assessment_offered(new_offered.ident).object_map
self.assertFalse(offered['reviewOptions']['whetherCorrect']['duringAttempt'])
self.assertFalse(offered['reviewOptions']['whetherCorrect']['afterAttempt'])
self.assertFalse(offered['reviewOptions']['whetherCorrect']['beforeDeadline'])
self.assertFalse(offered['reviewOptions']['whetherCorrect']['afterDeadline'])
def test_can_set_learning_objectives(self):
form = self._bank.get_item_form_for_update(self._item.ident)
id_list = ['bank@123:MIT']
id_list_obj = [Id(id_list[0])]
form.set_learning_objectives(id_list_obj)
updated_item = self._bank.update_item(form)
item_map = self._bank.get_item(self._item.ident).object_map
self.assertEqual(
item_map['learningObjectiveIds'],
id_list
)
def test_can_query_by_learning_objective(self):
form = self._bank.get_item_form_for_update(self._item.ident)
id_list = ['bank@123:MIT']
id_list_obj = [Id(id_list[0])]
form.set_learning_objectives(id_list_obj)
updated_item = self._bank.update_item(form)
querier = self._bank.get_item_query()
querier.match_learning_objective_id(id_list_obj[0], match=True)
results = self._bank.get_items_by_query(querier)
self.assertEqual(
results.available(),
1
)
self.assertEqual(
str(results.next().ident),
str(self._item.ident)
)
querier = self._bank.get_item_query()
querier.match_learning_objective_id(Id('foo@bar:baz'), match=True)
results = self._bank.get_items_by_query(querier)
self.assertEqual(
results.available(),
0
)
def test_can_set_max_attempts_on_offered(self):
reviewable_type = Type(**{
'authority': 'MOODLE.ORG',
'namespace': 'assessment-offered-record-type',
'identifier': 'review-options'
})
assessment2 = self.create_assessment_for_items(self._bank, [self._item])
offering_form = self._bank.get_assessment_offered_form_for_create(assessment2.ident,
[reviewable_type])
offering_form.set_max_attempts(2)
new_offered = self._bank.create_assessment_offered(offering_form)
offered = self._bank.get_assessment_offered(new_offered.ident).object_map
self.assertEqual(
offered['maxAttempts'],
2
)
offering_form = self._bank.get_assessment_offered_form_for_update(new_offered.ident)
offering_form.set_max_attempts(5)
new_offered = self._bank.update_assessment_offered(offering_form)
offered = self._bank.get_assessment_offered(new_offered.ident).object_map
self.assertEqual(
offered['maxAttempts'],
5
)
def test_exception_thrown_if_taker_attempts_more_than_max_allowed(self):
"""taking = # takens...but can submit multiple times to the same taken"""
reviewable_type = Type(**{
'authority': 'MOODLE.ORG',
'namespace': 'assessment-offered-record-type',
'identifier': 'review-options'
})
assessment2 = self.create_assessment_for_items(self._bank, [self._item])
offering_form = self._bank.get_assessment_offered_form_for_create(assessment2.ident,
[reviewable_type])
offering_form.set_max_attempts(1)
new_offered = self._bank.create_assessment_offered(offering_form)
form = self._bank.get_assessment_taken_form_for_create(new_offered.ident, [])
taken2 = self._bank.create_assessment_taken(form)
form = self._bank.get_assessment_taken_form_for_create(new_offered.ident, [])
self.assertRaises(errors.PermissionDenied, self._bank.create_assessment_taken, form)
class QuestionLOTests(DLKitTestCase):
def add_item(self, bank):
form = bank.get_item_form_for_create([])
form.display_name = 'a test item!'
form.description = 'for testing with'
form.set_learning_objectives([Id(self._lo)])
new_item = bank.create_item(form)
question_form = bank.get_question_form_for_create(new_item.ident,
[])
question_form.display_name = 'Question for ' + new_item.display_name.text
question_form.description = ''
new_question = bank.create_question(question_form)
answer_form = bank.get_answer_form_for_create(new_item.ident,
[])
answer_form.display_name = 'Answer for ' + new_item.display_name.text
new_answer = bank.create_answer(answer_form)
item = bank.get_item(new_item.ident)
return item
def setUp(self):
super(QuestionLOTests, self).setUp()
self._lo = 'foo%3A1%40MIT'
self._bank = self._get_test_bank()
self._item = self.add_item(self._bank)
def tearDown(self):
"""
Remove the test user from all groups in Membership
Start from the smallest groupId because need to
remove "parental" roles like for DepartmentAdmin / DepartmentOfficer
"""
super(QuestionLOTests, self).tearDown()
def test_question_object_map_has_item_learning_objective_ids(self):
question = self._item.get_question()
data = question.object_map
self.assertEqual(
data['learningObjectiveIds'],
[self._lo]
)
class QTITests(DLKitTestCase):
def setUp(self):
super(QTITests, self).setUp()
self._bank = self._get_test_bank()
# don't use the 'b' flag when reading, because we need it as string, not bytes
self.test_file_1 = open(ABS_PATH + '/files/qti_multi_choice.xml', 'r')
self.right_answer_choice_id = 'id8d815c87-4f7e-4ac6-b4ea-77124057eb33'
self.item_types = [Type('item-record-type%3Aqti%40ODL.MIT.EDU'),
Type('osid-object%3Amulti-language%40ODL.MIT.EDU')]
self.question_types = [Type('question-record-type%3Aqti%40ODL.MIT.EDU'),
Type('osid-object%3Amulti-language%40ODL.MIT.EDU')]
self.answer_types = [Type('answer-record-type%3Aqti%40ODL.MIT.EDU'),
Type('answer-record-type%3Amulti-language-answer-with-feedback%40ODL.MIT.EDU')]
def tearDown(self):
"""
"""
self.test_file_1.close()
super(QTITests, self).tearDown()
def test_can_import_qti_multi_choice_xml(self):
qti_xml = self.test_file_1.read()
form = self._bank.get_item_form_for_create(self.item_types)
form.display_name = 'qti multiple choice test'
item = self._bank.create_item(form)
q_form = self._bank.get_question_form_for_create(item.ident, self.question_types)
q_form.load_from_qti_item(qti_xml)
self._bank.create_question(q_form)
a_form = self._bank.get_answer_form_for_create(item.ident, self.answer_types)
a_form.load_from_qti_item(qti_xml, correct=True)
self._bank.create_answer(a_form)
item = self._bank.get_item(item.ident)
item_map = item.object_map
self.assertEqual(
item_map['answers'][0]['choiceIds'][0],
self.right_answer_choice_id
)
self.assertEqual(
len(item_map['question']['choices']),
4
)
choice_ids = [c['id'] for c in item_map['question']['choices']]
self.assertIn(self.right_answer_choice_id, choice_ids)
class SearchItemPaginationTests(DLKitTestCase):
def create_item(self, name="my new item"):
form = self._bank.get_item_form_for_create([])
form.display_name = str(name)
form.description = 'Test item'
return self._bank.create_item(form)
def setUp(self):
super(SearchItemPaginationTests, self).setUp()
self._bank = self._get_test_bank()
for i in range(1, 20):
self.create_item(name=str(i))
def tearDown(self):
"""
Remove the test user from all groups in Membership
Start from the smallest groupId because need to
remove "parental" roles like for DepartmentAdmin / DepartmentOfficer
"""
super(SearchItemPaginationTests, self).tearDown()
def test_specifying_start_and_end_returns_right_objects(self):
querier = self._bank.get_item_query()
querier.match_keyword('1', WORDIGNORECASE_STRING_MATCH_TYPE, True)
searcher = self._bank.get_item_search()
searcher.limit_result_set(1, 5) # should return 5 results, numbered 1, 10, 11, 12, 13
results = self._bank.get_items_by_search(querier, searcher)
self.assertEqual(
results.get_result_size(),
5
)
items_found = results.get_items()
self.assertEqual(
items_found.available(),
5
)
expected_names = ['1', '10', '11', '12', '13']
for expected_name in expected_names:
self.assertEqual(
items_found.next().display_name.text,
expected_name
)
def test_null_start_and_end_throws_exception(self):
searcher = self._bank.get_item_search()
self.assertRaises(errors.NullArgument, searcher.limit_result_set, 1, None)
self.assertRaises(errors.NullArgument, searcher.limit_result_set, None, 5)
self.assertRaises(errors.NullArgument, searcher.limit_result_set, None, None)
def test_end_less_than_start_throws_exception(self):
searcher = self._bank.get_item_search()
self.assertRaises(errors.InvalidArgument, searcher.limit_result_set, 5, 1)
def | |
from __future__ import absolute_import
from __future__ import print_function
__author__ = 'marafi'
import os
# from numba import jit
def ExtractGroundMotion(Folder_Location):
GMids = []
GMFiles = {}
GMData = {}
Dt = {}
NumPoints = {}
# for subdir, dirs, files in os.walk(Folder_Location, True):
files = [f for f in os.listdir(Folder_Location) if os.path.isfile(os.path.join(Folder_Location, f))]
for file in files:
def GetGMid(file):
index = file.find('(')
indexend = file.find(')')
return file[index+1:indexend]
def ExtractFirstLine(file):
line = open(Folder_Location+'//'+file,'r').readline()
return line
def ExtractTable(file):
lines = open(Folder_Location+'//'+file,'r').readlines()
GMData = []
for line in lines:
for point in line.split():
GMData.append(float(point))
import numpy as np
return np.array(GMData)
if file.startswith('SortedEQFile_'):
id = GetGMid(file)
GMids.append(id)
GMFiles[id]=file
GMData[id]=ExtractTable(file)
if file.startswith('DtFile_('):
id = GetGMid(file)
Dt[id]=float(ExtractFirstLine(file))
if file.startswith('NumPointsFile_('):
id = GetGMid(file)
NumPoints[id]=int(ExtractFirstLine(file))
return GMids, GMFiles, Dt, NumPoints, GMData
def ExtractGroundMotionIds(Folder_Location):
GMids = []
files = [f for f in os.listdir(Folder_Location) if os.path.isfile(os.path.join(Folder_Location, f))]
for file in files:
def GetGMid(file):
index = file.find('(')
indexend = file.find(')')
return file[index+1:indexend]
if file.startswith('NumPointsFile_('):
id = GetGMid(file)
GMids.append(id)
return GMids
def ExtractOneGroundMotion(Folder_Location, GMid):
import numpy as np
try:
GMData = np.loadtxt(Folder_Location+'SortedEQFile_(%s).dat'%GMid)
except:
GMData = np.loadtxt(Folder_Location+'SortedEQFile_(%s).txt'%GMid)
try:
Dt = np.loadtxt(Folder_Location+'DtFile_(%s).dat'%GMid)
except:
Dt = np.loadtxt(Folder_Location+'DtFile_(%s).txt'%GMid)
try:
NumPoints = np.loadtxt(Folder_Location+'NumPointsFile_(%s).dat'%GMid)
except:
NumPoints = np.loadtxt(Folder_Location+'NumPointsFile_(%s).txt'%GMid)
return Dt, NumPoints, GMData
def ExtractGroundMotion2(Folder_Location):
GMids = []
GMFiles = {}
GMData = {}
Dt = {}
NumPoints = {}
# for subdir, dirs, files in os.walk(Folder_Location, True):
files = [f for f in os.listdir(Folder_Location) if os.path.isfile(os.path.join(Folder_Location, f))]
for file in files:
def GetGMid(file):
index = file.find('(')
indexend = file.find(')')
return file[index+1:indexend]
def ExtractFirstLine(file):
line = open(Folder_Location+'//'+file,'r').readline()
return line
def ExtractTable(file):
lines = open(Folder_Location+'//'+file,'r').readlines()
GMData = []
for line in lines:
for point in line.split():
GMData.append(float(point))
import numpy as np
return np.array(GMData)
if file.startswith('SortedEQFile_'):
id = GetGMid(file)
GMids.append(id)
GMFiles[id]=file
GMData[id]=ExtractTable(file)
if file.startswith('DtFile_('):
id = GetGMid(file)
Dt[id]=float(ExtractFirstLine(file))
if file.startswith('NumPointsFile_('):
id = GetGMid(file)
NumPoints[id]=int(ExtractFirstLine(file))
try:
#Get Mad and R from GM Parameter File
Output = GetGMParameters2(Folder_Location)
except:
class output():
pass
Output = output()
Output.M = None
Output.R = None
class Ouput:
def __init__(self):
self.GMids = GMids
self.GMFiles = GMFiles
self.Dt = Dt
self.NumPoints = NumPoints
self.GMData = GMData
self.M = Output.M
self.R = Output.R
O = Ouput()
return O
# @jit
# def FindSa(GMData, Dt, T, Zeta):
# import numpy as np
#
# #Using Newmarks: Linear System
# #Using Linear Acceleration Method
# #From Chopra, Page 177
#
# #Mass
# m = 1
# wn = 2*np.pi/T
#
# #Stiffness
# k = wn**2.0*m
#
# #Damping 2%
# c=Zeta*2*m*wn
#
# gamma = 0.5
# beta = 1./4.
#
# u = np.zeros(len(GMData))
# v = np.zeros(len(GMData))
# a = np.zeros(len(GMData))
# p = np.array(GMData)
#
# a[0]=p[0]/m
#
# a1 = 1/beta/Dt**2*m+gamma/beta/Dt*c
# a2 = 1/beta/Dt*m+(gamma/beta-1)*c
# a3 = (1./2./beta-1.)*m+Dt*(gamma/2./beta-1.)*c
#
# khat = k + a1
#
# for i in range(1,len(GMData)):
# phat = p[i] + a1*u[i-1] + a2*v[i-1] + a3*a[i-1]
# u[i] = phat/khat
# v[i] = gamma/beta/Dt*(u[i]-u[i-1]) + (1-gamma/beta)*v[i-1]+Dt*(1.-gamma/2./beta)*a[i-1]
# a[i] = 1./beta/Dt**2.0*(u[i]-u[i-1])-1/beta/Dt*v[i-1]-(1/beta/2-1)*a[i-1]
#
# a = a-p
#
# return max(abs(max(u)),abs(min(u))),max(abs(max(v)),abs(min(v))),max(abs(max(a)),abs(min(a)))
import numba
import numpy as np
# nopython=True means an error will be raised
# if fast compilation is not possible.
@numba.jit(nopython=True)
def FindSa(GMData, Dt, T, Zeta):
# Using Newmarks: Linear System
# Using Linear Acceleration Method
# From Chopra, Page 177
#Mass
m = 1
wn = 2*np.pi/T
#Stiffness
k = wn**2.0*m
#Damping 2%
c=Zeta*2*m*wn
gamma = 0.5
beta = 1./4.
u = np.zeros(len(GMData))
v = np.zeros(len(GMData))
a = np.zeros(len(GMData))
p = GMData #np.array(GMData)
a[0]=p[0]/m
a1 = 1/beta/Dt**2*m+gamma/beta/Dt*c
a2 = 1/beta/Dt*m+(gamma/beta-1)*c
a3 = (1./2./beta-1.)*m+Dt*(gamma/2./beta-1.)*c
khat = k + a1
for i in range(1,len(GMData)):
phat = p[i] + a1*u[i-1] + a2*v[i-1] + a3*a[i-1]
u[i] = phat/khat
v[i] = gamma/beta/Dt*(u[i]-u[i-1]) + (1-gamma/beta)*v[i-1]+Dt*(1.-gamma/2./beta)*a[i-1]
a[i] = 1./beta/Dt**2.0*(u[i]-u[i-1])-1/beta/Dt*v[i-1]-(1/beta/2-1)*a[i-1]
a = a-p
a = np.abs(a)
u = np.abs(u)
v = np.abs(v)
return np.max(u), np.max(v), np.max(a)
# nopython=True means an error will be raised
# if fast compilation is not possible.
@numba.jit
def FindSaQuick(GMData, Dt, T, Zeta):
# Using Newmarks: Linear System
# Using Linear Acceleration Method
# From Chopra, Page 177
#Mass
m = 1
wn = 2*np.pi/T
#Stiffness
k = wn**2.0*m
#Damping 2%
c=Zeta*2*m*wn
gamma = 0.5
beta = 1./4.
u = np.zeros(len(GMData))
v = np.zeros(len(GMData))
a = np.zeros(len(GMData))
p = GMData #np.array(GMData)
a[0]=p[0]/m
a1 = 1/beta/Dt**2*m+gamma/beta/Dt*c
a2 = 1/beta/Dt*m+(gamma/beta-1)*c
a3 = (1./2./beta-1.)*m+Dt*(gamma/2./beta-1.)*c
khat = k + a1
for i in range(1,len(GMData)):
phat = p[i] + a1*u[i-1] + a2*v[i-1] + a3*a[i-1]
u[i] = phat/khat
v[i] = gamma/beta/Dt*(u[i]-u[i-1]) + (1-gamma/beta)*v[i-1]+Dt*(1.-gamma/2./beta)*a[i-1]
a[i] = 1./beta/Dt**2.0*(u[i]-u[i-1])-1/beta/Dt*v[i-1]-(1/beta/2-1)*a[i-1]
a = a-p
a = np.abs(a)
u = np.abs(u)
v = np.abs(v)
return np.max(a)
# @numba.jit
def FindSaHistory(GMData, Dt, T, Zeta):
# Using Newmarks: Linear System
# Using Linear Acceleration Method
# From Chopra, Page 177
#Mass
m = 1
wn = 2*np.pi/T
#Stiffness
k = wn**2.0*m
#Damping 2%
c=Zeta*2*m*wn
gamma = 0.5
beta = 1./4.
u = np.zeros(len(GMData))
v = np.zeros(len(GMData))
a = np.zeros(len(GMData))
p = GMData #np.array(GMData)
a[0]=p[0]/m
a1 = 1/beta/Dt**2*m+gamma/beta/Dt*c
a2 = 1/beta/Dt*m+(gamma/beta-1)*c
a3 = (1./2./beta-1.)*m+Dt*(gamma/2./beta-1.)*c
khat = k + a1
for i in range(1,len(GMData)):
phat = p[i] + a1*u[i-1] + a2*v[i-1] + a3*a[i-1]
u[i] = phat/khat
v[i] = gamma/beta/Dt*(u[i]-u[i-1]) + (1-gamma/beta)*v[i-1]+Dt*(1.-gamma/2./beta)*a[i-1]
a[i] = 1./beta/Dt**2.0*(u[i]-u[i-1])-1/beta/Dt*v[i-1]-(1/beta/2-1)*a[i-1]
a = a-p
return np.array(a)
def ComputeResponseSpectrum(GMData, Dt, Periods, Zeta=0.05):
Sa = []
for T in Periods:
u,v,a = FindSa(GMData, Dt, T, Zeta)
Sa.append(a)
return Sa
def FindSaOutput(GMData, Dt, T, Zeta):
import numpy as np
#Using Newmarks: Linear System
#Using Linear Acceleration Method
#From Chopra, Page 177
#Mass
m = 1
wn = 2*np.pi/T
#Stiffness
k = wn**2.0*m
#Damping 2%
c=Zeta*2*m*wn
gamma = 0.5
beta = 1./4.
u = np.zeros(len(GMData))
v = np.zeros(len(GMData))
a = np.zeros(len(GMData))
p = np.array(GMData)
a[0]=p[0]/m
a1 = 1/beta/Dt**2*m+gamma/beta/Dt*c
a2 = 1/beta/Dt*m+(gamma/beta-1)*c
a3 = (1./2./beta-1.)*m+Dt*(gamma/2./beta-1.)*c
khat = k + a1
for i in range(1,len(GMData)):
phat = p[i] + a1*u[i-1] + a2*v[i-1] + a3*a[i-1]
u[i] = phat/khat
v[i] = gamma/beta/Dt*(u[i]-u[i-1]) + (1-gamma/beta)*v[i-1]+Dt*(1.-gamma/2./beta)*a[i-1]
a[i] = 1./beta/Dt**2.0*(u[i]-u[i-1])-1/beta/Dt*v[i-1]-(1/beta/2-1)*a[i-1]
a = a-p
class Output():
def __init__(self):
self.Sd = max(abs(u))
self.Sv = max(abs(v))
self.Sa = max(abs(a))
self.TimeAtMaxSa = float(list(abs(a)).index(self.Sa))/len(a)
return Output()
def RunElasticSDOF(GMData, Dt, T, Zeta):
import numpy as np
#Using Newmarks: Linear System
#Using Linear Acceleration Method
#From Chopra, Page 177
#Mass
m = 1
wn = 2*np.pi/T
#Stiffness
k = wn**2.0*m
#Damping 2%
c=Zeta*2*m*wn
gamma = 0.5
beta = 1./4.
u = np.zeros(len(GMData))
v = np.zeros(len(GMData))
a = np.zeros(len(GMData))
p = np.array(GMData)
a[0]=p[0]/m
a1 = 1/beta/Dt**2*m+gamma/beta/Dt*c
a2 = 1/beta/Dt*m+(gamma/beta-1)*c
a3 = (1./2./beta-1.)*m+Dt*(gamma/2./beta-1.)*c
khat = k + a1
for i in range(1,len(GMData)):
phat = p[i] + a1*u[i-1] + a2*v[i-1] + a3*a[i-1]
u[i] = phat/khat
v[i] = gamma/beta/Dt*(u[i]-u[i-1]) + (1-gamma/beta)*v[i-1]+Dt*(1.-gamma/2./beta)*a[i-1]
a[i] = 1./beta/Dt**2.0*(u[i]-u[i-1])-1/beta/Dt*v[i-1]-(1/beta/2-1)*a[i-1]
a = a-p/m
class Output:
def __init__(self):
self.Displacement = u
self.Velocity = v
self.Accelecation = a
self.GMData = GMData
self.TimeStep = Dt
self.Period = T
self.Zeta = Zeta
self.MaxU = max(abs(u))
self.MaxV = max(abs(v))
self.MaxA = max(abs(a))
O = Output()
return O
def RunElastoPlasticSDOF(GMData, Dt, T, Dy, Zeta=0.05):
def getFs(k, delta_y, u, fOLD, uOLD):
Fmax = k*delta_y
fnew = fOLD + k*(u-uOLD)
if abs(fnew) > Fmax:
return Fmax*fnew/abs(fnew), 0.0
else:
return fnew, k
import numpy as np
#Mass
g = 386.4
m = 1
wn = 2*np.pi/T
#Stiffness
k = wn**2.0*m
#Damping 2%
c=Zeta*2*m*wn
#Using Average Acceleration Method
gamma = 0.5
beta = 1./4.
#How many iterations in cycle
maxj = 1000
#Arrays
u = np.zeros(len(GMData))
v = np.zeros(len(GMData))
a = np.zeros(len(GMData))
p = np.array(GMData)
phat = np.zeros(len(GMData))
fs = np.zeros(len(GMData))
kt = np.zeros(len(GMData))
kthat = np.zeros(len(GMData))
Rhat = np.zeros((len(GMData),maxj))
#Initial Calculations
a[0]=(p[0])/m
fs[0]=0.0
kt[0]=k
a1 = 1/beta/Dt**2*m+gamma/beta/Dt*c
a2 = 1/beta/Dt*m+(gamma/beta-1)*c
a3 = (1./2./beta-1.)*m+Dt*(gamma/2./beta-1.)*c
#Convergence
tol = 1*10**-5
for i in range(1,len(GMData)):
u[i] = u[i-1]
fs[i]=fs[i-1]
kt[i]=kt[i-1]
phat[i] = p[i] + a1*u[i-1]+ a2*v[i-1] + a3*a[i-1]
for j in range(0,maxj):
Rhat[i,j] = phat[i] - fs[i] - a1*u[i]
if abs(Rhat[i,j]) < tol:
break
kthat[i] = kt[i] + a1
deltau = Rhat[i,j]/kthat[i]
u[i] = u[i] + deltau
fs[i], kt[i] = getFs(k, Dy, u[i], fs[i-1], u[i-1])
v[i]=gamma/beta/Dt*(u[i]-u[i-1])+(1.0-gamma/beta)*v[i-1]+Dt*(1-gamma/2.0/beta)*a[i-1]
a[i]=1/beta/(Dt**2.0)*(u[i]-u[i-1])-1.0/(beta*Dt)*v[i-1]-(1.0/2.0/beta-1.0)*a[i-1]
########################## Plot Push Over ##########################
t = np.arange(0,len(GMData),1.0)*Dt
a = a-p
class Output:
def __init__(self):
self.Displacement = u
self.Velocity = v
self.Accelecation = a
self.Reaction = fs
self.GMData = GMData
self.TimeStep = Dt
self.time = t
self.Period = T
self.Zeta = Zeta
self.MaxU = max(abs(u))
self.MaxV = max(abs(v))
self.MaxA = max(abs(a))
O = Output()
return O
def FindSDI_NumericalIntegration(GMData, Dt, T, Dy):
import numpy as np
# def | |
<gh_stars>1-10
# coding: utf-8
import typing
import weakref
from math import floor
import pyglet
import time
from cocos.collision_model import AARectShape
from pyglet.window import mouse
import cocos
from cocos import collision_model
from cocos import euclid
from cocos.audio.pygame import mixer
from cocos.layer import ScrollableLayer
from synergine2.config import Config
from synergine2.exceptions import SynergineException
from synergine2.log import get_logger
from synergine2.terminals import Terminal
from synergine2.terminals import TerminalPackage
from synergine2_cocos2d.actor import Actor
from synergine2_cocos2d.const import SELECTION_COLOR_RGB
from synergine2_cocos2d.const import DEFAULT_SELECTION_COLOR_RGB
from synergine2_cocos2d.exception import InteractionNotFound
from synergine2_cocos2d.exception import OuterWorldPosition
from synergine2_cocos2d.gl import draw_rectangle
from synergine2_cocos2d.gl import rectangle_positions_type
from synergine2_cocos2d.interaction import InteractionManager
from synergine2_cocos2d.layer import LayerManager
from synergine2_cocos2d.middleware import MapMiddleware
from synergine2_cocos2d.middleware import TMXMiddleware
from synergine2_cocos2d.user_action import UserAction
from synergine2_cocos2d.util import ensure_dir_exist
from synergine2_xyz.physics import Physics
from synergine2_xyz.xyz import XYZSubjectMixin
class GridManager(object):
def __init__(
self,
cell_width: int,
cell_height: int,
world_width: int,
world_height: int,
) -> None:
self.cell_width = cell_width
self.cell_height = cell_height
self.world_width = world_width
self.world_height = world_height
def get_grid_position(self, pixel_position: typing.Tuple[int, int]) -> typing.Tuple[int, int]:
pixel_x, pixel_y = pixel_position
cell_x = int(floor(pixel_x / self.cell_width))
cell_y = int(floor(pixel_y / self.cell_height))
if cell_x > self.world_width or cell_y > self.world_height or cell_x < 0 or cell_y < 0:
raise OuterWorldPosition('Position "{}" is outer world ({}x{})'.format(
(cell_x, cell_y),
self.world_width,
self.world_height,
))
return cell_x, cell_y
def get_world_position_of_grid_position(self, grid_position: typing.Tuple[int, int]) -> typing.Tuple[int, int]:
return grid_position[0] * self.cell_width + (self.cell_width // 2),\
grid_position[1] * self.cell_height + (self.cell_height // 2)
def get_rectangle_positions(
self,
grid_position: typing.Tuple[int, int],
) -> rectangle_positions_type:
"""
A<---D
| |
B--->C
:param grid_position:grid position to exploit
:return: grid pixel corners positions
"""
grid_x, grid_y = grid_position
a = grid_x * self.cell_width, grid_y * self.cell_height + self.cell_height
b = grid_x * self.cell_width, grid_y * self.cell_height
c = grid_x * self.cell_width + self.cell_width, grid_y * self.cell_height
d = grid_x * self.cell_width + self.cell_width, grid_y * self.cell_height + self.cell_height
return a, d, c, b
class MinMaxRect(cocos.cocosnode.CocosNode):
def __init__(self, layer_manager: LayerManager):
super(MinMaxRect, self).__init__()
self.layer_manager = layer_manager
self.color3 = (20, 20, 20)
self.color3f = (0, 0, 0, 0.2)
self.vertexes = [(0.0, 0.0), (0.0, 0.0), (0.0, 0.0), (0.0, 0.0)]
self.visible = False
def adjust_from_w_minmax(self, wminx, wmaxx, wminy, wmaxy):
# asumes world to screen preserves order
sminx, sminy = self.layer_manager.scrolling_manager.world_to_screen(wminx, wminy)
smaxx, smaxy = self.layer_manager.scrolling_manager.world_to_screen(wmaxx, wmaxy)
self.vertexes = [(sminx, sminy), (sminx, smaxy), (smaxx, smaxy), (smaxx, sminy)]
def draw(self):
if not self.visible:
return
draw_rectangle(
self.vertexes,
self.color3,
self.color3f,
)
def set_vertexes_from_minmax(self, minx, maxx, miny, maxy):
self.vertexes = [(minx, miny), (minx, maxy), (maxx, maxy), (maxx, miny)]
class FinishedCallback(Exception):
pass
class Callback(object):
def __init__(
self,
func: typing.Callable[[], None],
duration: float,
delay: float=None,
end_callback: typing.Callable[[], None]=None,
start_callback: typing.Callable[[], None]=None,
) -> None:
self.func = func
self.duration = duration
# Started timestamp
self.started = None # type: float
self.require_delay = False
self.delay = delay
if delay is not None:
self.require_delay = True
self.end_callback = end_callback
self.start_callback = start_callback
def execute(self) -> None:
if self.started is None and self.start_callback:
self.start_callback()
if self.require_delay and not self.started:
self.started = time.time()
return
elif self.require_delay and time.time() - self.started < self.delay:
return
elif self.require_delay:
self.started = None
self.require_delay = False
if self.started is None:
self.started = time.time()
if time.time() - self.started <= self.duration:
self.func()
elif not self.duration:
self.func()
if self.end_callback is not None:
self.end_callback()
raise FinishedCallback()
else:
if self.end_callback is not None:
self.end_callback()
raise FinishedCallback()
class EditLayer(cocos.layer.Layer):
is_event_handler = True
def __init__(
self,
config: Config,
layer_manager: LayerManager,
grid_manager: GridManager,
worldview,
bindings=None,
fastness=None,
autoscroll_border=10,
autoscroll_fastness=None,
wheel_multiplier=None,
zoom_min=None,
zoom_max=None,
zoom_fastness=None,
mod_modify_selection=None,
mod_restricted_mov=None,
):
# TODO: Clean init params
super().__init__()
self.config = config
self.logger = get_logger('EditLayer', config)
self.layer_manager = layer_manager
self.grid_manager = grid_manager
self.bindings = bindings
buttons = {}
modifiers = {}
for k in bindings:
buttons[bindings[k]] = 0
modifiers[bindings[k]] = 0
self.buttons = buttons
self.modifiers = modifiers
self.fastness = fastness
self.autoscroll_border = autoscroll_border
self.autoscroll_fastness = autoscroll_fastness
self.wheel_multiplier = wheel_multiplier
self.zoom_min = zoom_min
self.zoom_max = zoom_max
self.zoom_fastness = zoom_fastness
self.mod_modify_selection = mod_modify_selection
self.mod_restricted_mov = mod_restricted_mov
self.weak_scroller = weakref.ref(self.layer_manager.scrolling_manager)
self.weak_worldview = weakref.ref(worldview)
self.wwidth = worldview.width
self.wheight = worldview.height
self.autoscrolling = False
self.drag_selecting = False
self.drag_moving = False
self.restricted_mov = False
self.wheel = 0
self.dragging = False
self.keyscrolling = False
self.keyscrolling_descriptor = (0, 0)
self.wdrag_start_point = (0, 0)
self.elastic_box = None # type: MinMaxRect
self.elastic_box_wminmax = 0, 0, 0, 0
self.selection = {} # type: typing.Dict[Actor, AARectShape]
self.screen_mouse = (0, 0)
self.world_mouse = (0, 0)
self.sleft = None
self.sright = None
self.sbottom = None
self.s_top = None
self.user_action_pending = None # type: UserAction
# opers that change cshape must ensure it goes to False,
# selection opers must ensure it goes to True
self.selection_in_collman = True
# TODO: Hardcoded here, should be obtained from level properties or calc
# from available actors or current actors in worldview
gsize = 32 * 1.25
self.collision_manager = collision_model.CollisionManagerGrid(
-gsize,
self.wwidth + gsize,
-gsize,
self.wheight + gsize,
gsize,
gsize,
)
self.schedule(self.update)
self.selectable_actors = []
self.callbacks = [] # type: typing.List[Callback]
def append_callback(
self,
callback: typing.Callable[[], None],
duration: float,
delay: float=None,
start_callback: typing.Callable[[], None]=None,
end_callback: typing.Callable[[], None]=None,
) -> None:
self.callbacks.append(Callback(
callback,
duration,
delay=delay,
start_callback=start_callback,
end_callback=end_callback,
))
def set_selectable(self, actor: Actor) -> None:
self.selectable_actors.append(actor)
self.collision_manager.add(actor)
def unset_selectable(self, actor: Actor) -> None:
self.selectable_actors.remove(actor)
self.collision_manager.remove_tricky(actor)
def draw(self, *args, **kwargs) -> None:
self.draw_update_cshapes()
self.draw_selection()
self.draw_interactions()
self.execute_callbacks()
def execute_callbacks(self) -> None:
for callback in self.callbacks[:]:
try:
callback.execute()
except FinishedCallback:
self.callbacks.remove(callback)
def draw_update_cshapes(self) -> None:
for actor in self.selectable_actors:
if actor.need_update_cshape:
if self.collision_manager.knows(actor):
self.collision_manager.remove_tricky(actor)
actor.update_cshape()
self.collision_manager.add(actor)
def draw_selection(self) -> None:
for actor, cshape in self.selection.items():
grid_position = self.grid_manager.get_grid_position(actor.position)
rect_positions = self.grid_manager.get_rectangle_positions(grid_position)
draw_rectangle(
self.layer_manager.scrolling_manager.world_to_screen_positions(rect_positions),
actor.subject.properties.get(
SELECTION_COLOR_RGB,
self.config.get(DEFAULT_SELECTION_COLOR_RGB, (0, 81, 211))
),
)
def draw_interactions(self) -> None:
if self.user_action_pending:
try:
interaction = self.layer_manager.interaction_manager.get_for_user_action(self.user_action_pending)
interaction.draw_pending()
except InteractionNotFound:
pass
def on_enter(self):
super().on_enter()
scene = self.get_ancestor(cocos.scene.Scene)
if self.elastic_box is None:
self.elastic_box = MinMaxRect(self.layer_manager)
scene.add(self.elastic_box, z=10)
def update(self, dt):
mx = self.buttons['right'] - self.buttons['left']
my = self.buttons['up'] - self.buttons['down']
dz = self.buttons['zoomin'] - self.buttons['zoomout']
# scroll
if self.autoscrolling:
self.update_autoscroll(dt)
else:
# care for keyscrolling
new_keyscrolling = ((len(self.selection) == 0) and
(mx != 0 or my != 0))
new_keyscrolling_descriptor = (mx, my)
if ((new_keyscrolling != self.keyscrolling) or
(new_keyscrolling_descriptor != self.keyscrolling_descriptor)):
self.keyscrolling = new_keyscrolling
self.keyscrolling_descriptor = new_keyscrolling_descriptor
fastness = 1.0
if mx != 0 and my != 0:
fastness *= 0.707106 # 1/sqrt(2)
self.autoscrolling_sdelta = (0.5 * fastness * mx, 0.5 * fastness * my)
if self.keyscrolling:
self.update_autoscroll(dt)
# selection move
if self.drag_moving:
# update positions
wx, wy = self.world_mouse
dx = wx - self.wdrag_start_point[0]
dy = wy - self.wdrag_start_point[1]
if self.restricted_mov:
if abs(dy) > abs(dx):
dx = 0
else:
dy = 0
dpos = euclid.Vector2(dx, dy)
for actor in self.selection:
old_pos = self.selection[actor].center
new_pos = old_pos + dpos
try:
grid_pos = self.grid_manager.get_grid_position(new_pos)
grid_pixel_pos = self.grid_manager.get_world_position_of_grid_position(grid_pos)
actor.update_position(grid_pixel_pos)
except OuterWorldPosition:
# don't update position
pass
scroller = self.weak_scroller()
# zoom
zoom_change = (dz != 0 or self.wheel != 0)
if zoom_change:
if self.mouse_into_world():
wzoom_center = self.world_mouse
szoom_center = self.screen_mouse
else:
# decay to scroller unadorned
wzoom_center = None
if self.wheel != 0:
dt_dz = 0.01666666 * self.wheel
self.wheel = 0
else:
dt_dz = dt * dz
zoom = scroller.scale + dt_dz * self.zoom_fastness
if zoom < self.zoom_min:
zoom = self.zoom_min
elif zoom > self.zoom_max:
zoom = self.zoom_max
scroller.scale = zoom
if wzoom_center is not None:
# postprocess toward 'world point under mouse the same before
# and after zoom' ; other restrictions may prevent fully comply
wx1, wy1 = self.layer_manager.scrolling_manager.screen_to_world(*szoom_center)
fx = scroller.restricted_fx + (wzoom_center[0] - wx1)
fy = scroller.restricted_fy + (wzoom_center[1] - wy1)
scroller.set_focus(fx, fy)
def update_mouse_position(self, sx, sy):
self.screen_mouse = sx, sy
self.world_mouse = self.layer_manager.scrolling_manager.screen_to_world(sx, sy)
# handle autoscroll
border = self.autoscroll_border
if border is not None:
# sleft and companions includes the border
scroller = self.weak_scroller()
self.update_view_bounds()
sdx = 0.0
if sx < self.sleft:
sdx = sx - self.sleft
elif sx > self.sright:
sdx = sx - self.sright
sdy = 0.0
if sy < self.sbottom:
sdy = sy - self.sbottom
elif sy > self.s_top:
sdy = sy - self.s_top
self.autoscrolling = sdx != 0.0 or sdy != 0.0
if self.autoscrolling:
self.autoscrolling_sdelta = (sdx / border, sdy / border)
def update_autoscroll(self, dt):
fraction_sdx, fraction_sdy = self.autoscrolling_sdelta
scroller = self.weak_scroller()
worldview = self.weak_worldview()
f = self.autoscroll_fastness
wdx = (fraction_sdx * f * dt) / scroller.scale / worldview.scale
wdy = (fraction_sdy | |
None:
# That tx finished. No current tx.
return None
assert tx.depth == 0
return tx
except IndexError:
return None
@property
def accounts(self):
return list(self._world_state.keys())
@property
def normal_accounts(self):
accs = []
for address in self.accounts:
if len(self.get_code(address)) == 0:
accs.append(address)
return accs
@property
def contract_accounts(self):
accs = []
for address in self.accounts:
if len(self.get_code(address)) > 0:
accs.append(address)
return accs
@property
def deleted_accounts(self):
return self._deleted_accounts
def delete_account(self, address):
if address in self._world_state:
self._deleted_accounts.add(address)
def get_storage_data(self, storage_address, offset):
"""
Read a value from a storage slot on the specified account
:param storage_address: an account address
:param offset: the storage slot to use.
:type offset: int or BitVec
:return: the value
:rtype: int or BitVec
"""
value = self._world_state[storage_address]["storage"].get(offset, 0)
return simplify(value)
def set_storage_data(self, storage_address, offset, value):
"""
Writes a value to a storage slot in specified account
:param storage_address: an account address
:param offset: the storage slot to use.
:type offset: int or BitVec
:param value: the value to write
:type value: int or BitVec
"""
self._world_state[storage_address]["storage"][offset] = value
def get_storage_items(self, address):
"""
Gets all items in an account storage
:param address: account address
:return: all items in account storage. items are tuple of (index, value). value can be symbolic
:rtype: list[(storage_index, storage_value)]
"""
storage = self._world_state[address]["storage"]
items = []
array = storage.array
while not isinstance(array, ArrayVariable):
items.append((array.index, array.value))
array = array.array
return items
def has_storage(self, address):
"""
True if something has been written to the storage.
Note that if a slot has been erased from the storage this function may
lose any meaning.
"""
storage = self._world_state[address]["storage"]
array = storage.array
while not isinstance(array, ArrayVariable):
if isinstance(array, ArrayStore):
return True
array = array.array
return False
def get_storage(self, address):
"""
Gets the storage of an account
:param address: account address
:return: account storage
:rtype: bytearray or ArrayProxy
"""
return self._world_state[address]["storage"]
def _set_storage(self, address, storage):
"""Private auxiliary function to replace the storage"""
self._world_state[address]["storage"] = storage
def get_nonce(self, address):
if issymbolic(address):
raise ValueError(f"Cannot retrieve the nonce of symbolic address {address}")
elif address not in self._world_state:
# assume that the caller is a regular account, so initialize its nonce to zero
ret = 0
elif "nonce" not in self._world_state[address]:
if self._world_state[address]["code"]:
# this is a contract account, so set the nonce to 1 per EIP 161
ret = 1
else:
ret = 0
else:
ret = self._world_state[address]["nonce"]
return ret
def increase_nonce(self, address):
new_nonce = self.get_nonce(address) + 1
self._world_state[address]["nonce"] = new_nonce
return new_nonce
def set_balance(self, address, value):
self._world_state[int(address)]["balance"] = value
def get_balance(self, address):
if address not in self._world_state:
return 0
return self._world_state[address]["balance"]
def add_to_balance(self, address, value):
assert address in self._world_state
self._world_state[address]["balance"] += value
def send_funds(self, sender, recipient, value):
self._world_state[sender]["balance"] -= value
self._world_state[recipient]["balance"] += value
def get_code(self, address):
if address not in self._world_state:
return bytes()
return self._world_state[address]["code"]
def set_code(self, address, data):
assert data is not None and isinstance(data, (bytes, Array))
if self._world_state[address]["code"]:
raise EVMException("Code already set")
self._world_state[address]["code"] = data
def has_code(self, address):
return len(self._world_state[address]["code"]) > 0
def get_nonce(self, address):
if address not in self._world_state:
return 0
return self._world_state[address]["nonce"]
def log(self, address, topics, data):
self._logs.append(EVMLog(address, data, topics))
logger.info("LOG %r %r", data, topics)
def log_storage(self, addr):
pass
def add_refund(self, value):
pass
def block_prevhash(self):
return 0
def block_coinbase(self):
return self._coinbase
def block_timestamp(self):
return self._timestamp
def block_number(self):
return self._blocknumber
def block_difficulty(self):
return self._difficulty
def block_gaslimit(self):
return self._gaslimit
def block_hash(self, block_number=None, force_recent=True):
"""
Calculates a block's hash
:param block_number: the block number for which to calculate the hash, defaulting to the most recent block
:param force_recent: if True (the default) return zero for any block that is in the future or older than 256 blocks
:return: the block hash
"""
if block_number is None:
block_number = self.block_number() - 1
# We are not maintaining an actual -block-chain- so we just generate
# some hashes for each virtual block
value = sha3.keccak_256((repr(block_number) + "NONCE").encode()).hexdigest()
value = int(value, 16)
if force_recent:
# 0 is left on the stack if the looked for block number is greater or equal
# than the current block number or more than 256 blocks behind the current
# block. (Current block hash is unknown from inside the tx)
bnmax = Operators.ITEBV(256, self.block_number() > 256, 256, self.block_number())
value = Operators.ITEBV(
256,
Operators.OR(block_number >= self.block_number(), block_number < bnmax),
0,
value,
)
return value
def tx_origin(self):
if self.current_human_transaction:
return self.current_human_transaction.caller
def tx_gasprice(self):
if self.current_human_transaction:
return self.current_human_transaction.price
@property
def depth(self):
return len(self._callstack)
def new_address(self, sender=None, nonce=None):
"""Create a fresh 160bit address"""
if sender is not None and nonce is None:
nonce = self.get_nonce(sender)
new_address = self.calculate_new_address(sender, nonce)
if sender is None and new_address in self:
return self.new_address(sender, nonce)
return new_address
@staticmethod
def calculate_new_address(sender=None, nonce=None):
if sender is None:
# Just choose a random address for regular accounts:
new_address = random.randint(100, pow(2, 160))
elif issymbolic(sender):
# TODO(<NAME>): In the interim before we come up with a better solution,
# consider breaking Yellow Paper comability and just returning
# a random contract address here
raise EthereumError(
"Manticore does not yet support contracts with symbolic addresses creating new contracts"
)
else:
if nonce is None:
# assume that the sender is a contract account, which is initialized with a nonce of 1
nonce = 1
new_address = int(sha3.keccak_256(rlp.encode([sender, nonce])).hexdigest()[24:], 16)
return new_address
def execute(self):
self._process_pending_transaction()
if self.current_vm is None:
raise TerminateState("Trying to execute an empty transaction", testcase=False)
try:
self.current_vm.execute()
except StartTx:
pass
except EndTx as ex:
self._close_transaction(ex.result, ex.data, rollback=ex.is_rollback())
def create_account(self, address=None, balance=0, code=None, storage=None, nonce=None):
"""
Low level account creation. No transaction is done.
:param address: the address of the account, if known. If omitted, a new address will be generated as closely to the Yellow Paper as possible.
:param balance: the initial balance of the account in Wei
:param code: the runtime code of the account, if a contract
:param storage: storage array
:param nonce: the nonce for the account; contracts should have a nonce greater than or equal to 1
"""
if code is None:
code = bytes()
else:
if not isinstance(code, (bytes, Array)):
raise EthereumError("Wrong code type")
# nonce default to initial nonce
if nonce is None:
# As per EIP 161, contract accounts are initialized with a nonce of 1
nonce = 1 if code else 0
if address is None:
address = self.new_address()
if not isinstance(address, int):
raise EthereumError("You must provide an address")
if address in self.accounts:
# FIXME account may have been created via selfdestruct destination
# or CALL and may contain some ether already, though if it was a
# selfdestructed address, it can not be reused
raise EthereumError("The account already exists")
if storage is None:
# Uninitialized values in a storage are 0 by spec
storage = self.constraints.new_array(
index_bits=256,
value_bits=256,
name=f"STORAGE_{address:x}",
avoid_collisions=True,
default=0,
)
else:
if isinstance(storage, ArrayProxy):
if storage.index_bits != 256 or storage.value_bits != 256:
raise TypeError("An ArrayProxy 256bits -> 256bits is needed")
else:
if any((k < 0 or k >= 1 << 256 for k, v in storage.items())):
raise TypeError(
"Need a dict like object that maps 256 bits keys to 256 bits values"
)
# Hopefully here we have a mapping from 256b to 256b
self._world_state[address] = {}
self._world_state[address]["nonce"] = nonce
self._world_state[address]["balance"] = balance
self._world_state[address]["storage"] = storage
self._world_state[address]["code"] = code
# adds hash of new address
data = binascii.unhexlify("{:064x}{:064x}".format(address, 0))
value = sha3.keccak_256(data).hexdigest()
value = int(value, 16)
self._publish("on_concrete_sha3", data, value)
return address
def create_contract(self, price=0, address=None, caller=None, balance=0, init=None, gas=None):
"""
Create a contract account. Sends a transaction to initialize the contract
:param address: the address of the new account, if known. If omitted, a new address will be generated as closely to the Yellow Paper as possible.
:param balance: the initial balance of the account in Wei
:param init: the initialization code of the contract
The way that the Solidity compiler expects the constructor arguments to
be passed is by appending the arguments to the byte code produced by the
Solidity compiler. The arguments | |
<reponame>dgerod/more-dmps
import numpy as np
from cs import CanonicalSystem
class TdwFormulation(object):
def __init__(self, ay=None, by=None):
# Parameters as defined in Schaal 2012
if ay is None: ay = 25.
self.ay = ay
if by is None: by = ay/4.
self.by = by
def acceleration(self, x, dx, start, goal, tau, f, s):
return (self.ay * (self.by * (goal-x) - dx/tau) + (goal-start)*f) * tau
def fs(self, x, dx, ddx, start, goal, tau, s):
return (ddx - self.ay * (self.by * (goal-x) - dx) / (goal-start))
class OriginalFormulation(object):
def __init__(self, K=100., D=None):
self.K = K
if D is None:
D = 2.0 * np.sqrt(self.K)
self.D = D
def acceleration(self, x, dx, start, goal, tau, f, s):
return (self.K*(goal-x) - self.D*dx + (goal-start)*f) / tau
def fs(self, x, dx, ddx, start, goal, tau, s):
return (tau*ddx - self.K*(goal-x) + self.D*dx) / (goal-start)
class ImprovedFormulation(object):
def __init__(self, K=100., D=None):
self.K = K
if D is None:
D = 2.0 * np.sqrt(self.K)
self.D = D
def acceleration(self, x, dx, start, goal, tau, f, s):
return (self.K*(goal-x) - self.D*dx - self.K*(goal-start)*s + self.K*f) / tau
def fs(self, x, dx, ddx, start, goal, tau, s):
return ((tau**2*ddx + self.D*dx*tau) / self.K) - (goal-x) + (goal-start)*s
# -----------------------------------------------------------------------------
class DMPs_discrete(object):
def __init__(self, dims, bfs, ts=None, dt=.01,
y0=0, goal=1, w=None,
ay=None, by=None, **kwargs):
'''
dims int: number of dynamic motor primitives
bfs int: number of basis functions per DMP
dt float: timestep for simulation
y0 list: initial state of DMPs
goal list: goal state of DMPs
w list: tunable parameters, control amplitude of basis functions
ay int: gain on attractor term y dynamics
by int: gain on attractor term y dynamics
'''
self.dmps = dims
self.bfs = bfs
self.dt = dt
# set up the Transformation System
if ts is None:
ts = TdwFormulation()
self.ts = ts
# start and goal
if isinstance(y0, (int, float)):
y0 = np.ones(self.dmps)*y0
self.y0 = y0
if isinstance(goal, (int, float)):
goal = np.ones(self.dmps)*goal
self.goal = goal
if w is None:
# default is f = 0
w = np.zeros((self.dmps, self.bfs))
self.w = w
self.f_desired = np.array([])
self.f_predicted = np.array([])
self.f = np.zeros(self.dmps)
# set up the CS
self.cs = CanonicalSystem(pattern='discrete', dt=self.dt, **kwargs)
self.time_steps = int(self.cs.run_time / self.dt)
# set up the DMP system
self.reset_state()
self.prep_centers_and_variances()
self.check_offset()
def prep_centers_and_variances(self):
'''
Set the centre of the Gaussian basis functions be spaced evenly
throughout run time.
'''
# desired spacings along x
# need to be spaced evenly between 1 and exp(-ax)
# lowest number should be only as far as x gets
first = np.exp(-self.cs.ax * self.cs.run_time)
last = 1.05 - first
des_c = np.linspace(first, last, self.bfs)
self.c = np.ones(len(des_c))
for n in range(len(des_c)):
self.c[n] = -np.log(des_c[n])
# set variance of Gaussian basis functions
# trial and error to find this spacing
self.h = np.ones(self.bfs) * self.bfs**1.5 / self.c
def check_offset(self):
'''
Check to see if initial position and goal are the same
if they are, offset slightly so that the forcing term is not 0.
'''
for idx in range(self.dmps):
if (self.y0[idx] == self.goal[idx]):
self.goal[idx] += 1e-4
def set_goal(self, y_des):
'''
Generate the goal for path imitation. For rhythmic DMPs the goal is the
average of the desired trajectory.
y_des np.array: the desired trajectory to follow
'''
return y_des[:,-1].copy()
def gen_psi(self, x):
'''
Generates the activity of the basis functions for a given state of the
canonical system.
x float: the current state of the canonical system
'''
if isinstance(x, np.ndarray):
x = x[:,None]
return np.exp(-self.h * (x - self.c)**2)
def gen_forcing_term(self, x, dmp_num, scale=False):
"""Calculates the complete forcing term .
x float: the current value of the canonical system (s)
dmp_num int: the index of the current dmp
scaled bool: apply scalation (g-y0) to the forcing term (T/F)
"""
psi = self.gen_psi(x)
f = x * ((np.dot(psi, self.w[dmp_num])) / np.sum(psi))
if scale:
f = f * (self.goal[dmp_num] - self.y0[dmp_num])
return f
def find_force_function(self, dmp_num, y, dy, ddy, s):
d = dmp_num
f_target = self.ts.fs(y[d], dy[d], ddy[d], self.y0[d], self.goal[d], 1.0, s)
return f_target
def calculate_acceleration(self, dmp_num, tau, f, s):
d = dmp_num
ddy = self.ts.acceleration(self.y[d], self.dy[d], self.y0[d], self.goal[d], tau, f, s)
return ddy
def compute_weights(self, f_target):
'''
Generate a set of weights over the basis functions such that the target forcing
term trajectory is matched.
f_target np.array: the desired forcing term
'''
# calculate x/s and psi(s)
x_track = self.cs.rollout()
psi_track = self.gen_psi(x_track)
# efficiently calculate weights for BFs using weighted linear regression
self.w = np.zeros((self.dmps, self.bfs))
for d in range(self.dmps):
for b in range(self.bfs):
numer = np.sum(x_track * psi_track[:,b] * f_target[:,d])
denom = np.sum(x_track**2 * psi_track[:,b])
self.w[d,b] = numer / denom
return (x_track, psi_track)
def reset_state(self):
'''
Reset the system state
'''
self.y = self.y0.copy()
self.dy = np.zeros(self.dmps)
self.ddy = np.zeros(self.dmps)
self.cs.reset_state()
def step(self, tau=1.0, state_fb=None, external_force=None):
'''
Run the DMP system for a single timestep.
tau float: scales the timestep
increase tau to make the system execute faster
state_fb np.array: optional system feedback
'''
# run canonical system
cs_args = {'tau': tau, 'error_coupling': 1.0}
if state_fb is not None:
# take the 2 norm of the overall error
state_fb = state_fb.reshape(1,self.dmps)
dist = np.sqrt(np.sum((state_fb - self.y)**2))
cs_args['error_coupling'] = 1.0 / (1.0 + 10*dist)
x = self.cs.step(**cs_args)
for idx in range(self.dmps):
# Calcualte acceleration based on f(s)
self.f[idx] = self.gen_forcing_term(x, idx)
self.ddy[idx] = self.calculate_acceleration(idx, tau, self.f[idx], x)
# Correct acceleration
if external_force is not None:
self.ddy[idx] += external_force[idx]
self.dy[idx] += self.ddy[idx] * tau * self.dt * cs_args['error_coupling']
self.y[idx] += self.dy[idx] * self.dt * cs_args['error_coupling']
return self.y, self.dy, self.ddy
def learn(self, y_des):
'''
Takes in a desired trajectory and generates the set of system parameters
that best realize this path.
y_des list/array: the desired trajectories of each DMP
should be shaped [dmps, run_time]
'''
# set initial state and goal
# ---
if y_des.ndim == 1:
y_des = y_des.reshape(1,len(y_des))
self.y0 = y_des[:,0].copy()
self.y_des = y_des.copy()
self.goal = self.set_goal(y_des)
self.check_offset()
# generate function to interpolate the desired trajectory
# ---
import scipy.interpolate
path = np.zeros((self.dmps, self.time_steps))
x = np.linspace(0, self.cs.run_time, y_des.shape[1])
for idx in range(self.dmps):
path_gen = scipy.interpolate.interp1d(x, y_des[idx])
for t in range(self.time_steps):
path[idx, t] = path_gen(t * self.dt)
y_des = path
# Calculate velocity and acceleration profiles
# ---
# calculate velocity of y_des
dy_des = np.diff(y_des) / self.dt
# add zero to the beginning of every row
dy_des = np.hstack((np.zeros((self.dmps, 1)), dy_des))
# calculate acceleration of y_des
ddy_des = np.diff(dy_des) / self.dt
# add zero to the beginning of every row
ddy_des = np.hstack((np.zeros((self.dmps, 1)), ddy_des))
# Compute F and weights
# ---
# run canonical system
x = self.cs.rollout()
# find the force required to move along this trajectory
f_desired = np.zeros((y_des.shape[1], self.dmps))
for idx in range(self.dmps):
f_desired[:,idx] = self.find_force_function(idx, y_des, dy_des, ddy_des, x)
# efficiently generate weights to realize f_target
self.x_track, self.psi_track = self.compute_weights(f_desired)
self.f_desired = f_desired
self.reset_state()
return y_des, dy_des, ddy_des
def plan(self, time_steps=None, **kwargs):
'''
Generate a system trial, no feedback is incorporated.
'''
self.reset_state()
if time_steps is None:
if kwargs.has_key('tau'):
time_steps = int(self.time_steps / kwargs['tau'])
else:
time_steps = self.time_steps
# set up tracking vectors
y_track = np.zeros((time_steps, self.dmps))
dy_track = np.zeros((time_steps, self.dmps))
ddy_track = np.zeros((time_steps, self.dmps))
f_predicted = np.zeros((time_steps, self.dmps))
for t in range(time_steps):
y, dy, ddy = self.step(**kwargs)
f_predicted[t] = self.f
# record timestep
y_track[t] = y
dy_track[t] = dy
ddy_track[t] = ddy
self.f_predicted = f_predicted
return y_track, | |
<filename>batchgenerators/transforms/color_transforms.py<gh_stars>0
# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
import numpy as np
from scipy.ndimage import gaussian_filter
from batchgenerators.augmentations.color_augmentations import augment_contrast, augment_brightness_additive, \
augment_brightness_multiplicative, augment_gamma, augment_illumination, augment_PCA_shift
from batchgenerators.transforms.abstract_transforms import AbstractTransform
from typing import Union, Tuple, Callable, List
import scipy.stats as st
class ContrastAugmentationTransform(AbstractTransform):
def __init__(self,
contrast_range: Union[Tuple[float, float], Callable[[], float]] = (0.75, 1.25),
preserve_range: bool = True,
per_channel: bool = True,
data_key: str = "data",
p_per_sample: float = 1,
p_per_channel: float = 1):
"""
Augments the contrast of data
:param contrast_range:
(float, float): range from which to sample a random contrast that is applied to the data. If
one value is smaller and one is larger than 1, half of the contrast modifiers will be >1
and the other half <1 (in the inverval that was specified)
callable : must be contrast_range() -> float
:param preserve_range: if True then the intensity values after contrast augmentation will be cropped to min and
max values of the data before augmentation.
:param per_channel: whether to use the same contrast modifier for all color channels or a separate one for each
channel
:param data_key:
:param p_per_sample:
"""
self.p_per_sample = p_per_sample
self.data_key = data_key
self.contrast_range = contrast_range
self.preserve_range = preserve_range
self.per_channel = per_channel
self.p_per_channel = p_per_channel
def __call__(self, **data_dict):
for b in range(len(data_dict[self.data_key])):
if np.random.uniform() < self.p_per_sample:
data_dict[self.data_key][b] = augment_contrast(data_dict[self.data_key][b],
contrast_range=self.contrast_range,
preserve_range=self.preserve_range,
per_channel=self.per_channel,
p_per_channel=self.p_per_channel)
return data_dict
class NormalizeTransform(AbstractTransform):
def __init__(self, means, stds, data_key='data'):
self.data_key = data_key
self.stds = stds
self.means = means
def __call__(self, **data_dict):
for c in range(data_dict[self.data_key].shape[1]):
data_dict[self.data_key][:, c] -= self.means[c]
data_dict[self.data_key][:, c] /= self.stds[c]
return data_dict
class BrightnessTransform(AbstractTransform):
def __init__(self, mu, sigma, per_channel=True, data_key="data", p_per_sample=1, p_per_channel=1):
"""
Augments the brightness of data. Additive brightness is sampled from Gaussian distribution with mu and sigma
:param mu: mean of the Gaussian distribution to sample the added brightness from
:param sigma: standard deviation of the Gaussian distribution to sample the added brightness from
:param per_channel: whether to use the same brightness modifier for all color channels or a separate one for
each channel
:param data_key:
:param p_per_sample:
"""
self.p_per_sample = p_per_sample
self.data_key = data_key
self.mu = mu
self.sigma = sigma
self.per_channel = per_channel
self.p_per_channel = p_per_channel
def __call__(self, **data_dict):
data = data_dict[self.data_key]
for b in range(data.shape[0]):
if np.random.uniform() < self.p_per_sample:
data[b] = augment_brightness_additive(data[b], self.mu, self.sigma, self.per_channel,
p_per_channel=self.p_per_channel)
data_dict[self.data_key] = data
return data_dict
class BrightnessMultiplicativeTransform(AbstractTransform):
def __init__(self, multiplier_range=(0.5, 2), per_channel=True, data_key="data", p_per_sample=1):
"""
Augments the brightness of data. Multiplicative brightness is sampled from multiplier_range
:param multiplier_range: range to uniformly sample the brightness modifier from
:param per_channel: whether to use the same brightness modifier for all color channels or a separate one for
each channel
:param data_key:
:param p_per_sample:
"""
self.p_per_sample = p_per_sample
self.data_key = data_key
self.multiplier_range = multiplier_range
self.per_channel = per_channel
def __call__(self, **data_dict):
for b in range(len(data_dict[self.data_key])):
if np.random.uniform() < self.p_per_sample:
data_dict[self.data_key][b] = augment_brightness_multiplicative(data_dict[self.data_key][b],
self.multiplier_range,
self.per_channel)
return data_dict
class GammaTransform(AbstractTransform):
def __init__(self, gamma_range=(0.5, 2), invert_image=False, per_channel=False, data_key="data", retain_stats=False,
p_per_sample=1):
"""
Augments by changing 'gamma' of the image (same as gamma correction in photos or computer monitors
:param gamma_range: range to sample gamma from. If one value is smaller than 1 and the other one is
larger then half the samples will have gamma <1 and the other >1 (in the inverval that was specified).
Tuple of float. If one value is < 1 and the other > 1 then half the images will be augmented with gamma values
smaller than 1 and the other half with > 1
:param invert_image: whether to invert the image before applying gamma augmentation
:param per_channel:
:param data_key:
:param retain_stats: Gamma transformation will alter the mean and std of the data in the patch. If retain_stats=True,
the data will be transformed to match the mean and standard deviation before gamma augmentation
:param p_per_sample:
"""
self.p_per_sample = p_per_sample
self.retain_stats = retain_stats
self.per_channel = per_channel
self.data_key = data_key
self.gamma_range = gamma_range
self.invert_image = invert_image
def __call__(self, **data_dict):
for b in range(len(data_dict[self.data_key])):
if np.random.uniform() < self.p_per_sample:
data_dict[self.data_key][b] = augment_gamma(data_dict[self.data_key][b], self.gamma_range,
self.invert_image,
per_channel=self.per_channel,
retain_stats=self.retain_stats)
return data_dict
class IlluminationTransform(AbstractTransform):
"""Do not use this for now"""
def __init__(self, white_rgb, data_key="data"):
self.data_key = data_key
self.white_rgb = white_rgb
def __call__(self, **data_dict):
data_dict[self.data_key] = augment_illumination(data_dict[self.data_key], self.white_rgb)
return data_dict
class FancyColorTransform(AbstractTransform):
"""Do not use this for now"""
def __init__(self, U, s, sigma=0.2, data_key="data"):
self.data_key = data_key
self.s = s
self.U = U
self.sigma = sigma
def __call__(self, **data_dict):
data_dict[self.data_key] = augment_PCA_shift(data_dict[self.data_key], self.U, self.s, self.sigma)
return data_dict
class ClipValueRange(AbstractTransform):
def __init__(self, min=None, max=None, data_key="data"):
"""
Clips the value range of data to [min, max]
:param min:
:param max:
:param data_key:
"""
self.data_key = data_key
self.min = min
self.max = max
def __call__(self, **data_dict):
data_dict[self.data_key] = np.clip(data_dict[self.data_key], self.min, self.max)
return data_dict
class LocalGaussianSomethingTransform(ABC):
def __init__(self,
scale: Union[Tuple[float, float], float, Callable[[Union[Tuple[int, ...], List[int]], int], float]],
loc: Union[Tuple[float, float], Callable[[Union[Tuple[int, ...], List[int]], int], float]] = (-1, 2),
):
self.loc = loc
self.scale = scale
def _get_scale(self, image_shape, dimension):
if isinstance(self.scale, float):
return self.scale
elif isinstance(self.scale, (list, tuple)):
assert len(self.scale) == 2
return np.random.uniform(*self.scale)
elif callable(self.scale):
return self.scale(image_shape, dimension)
def _get_loc(self, image_shape, dimension):
if isinstance(self.loc, float):
return self.loc
elif isinstance(self.loc, (list, tuple)):
assert len(self.loc) == 2
return np.random.uniform(*self.loc)
elif callable(self.loc):
return self.loc(image_shape, dimension)
else:
raise RuntimeError()
def _generate_kernel(self, img_shp: Tuple[int, ...]) -> np.ndarray:
assert len(img_shp) <= 3
kernels = []
for d in range(len(img_shp)):
image_size_here = img_shp[d]
loc = self._get_loc(img_shp, d)
scale = self._get_scale(img_shp, d)
loc_rescaled = loc * image_size_here
x = np.arange(-0.5, image_size_here + 0.5)
kernels.append(np.diff(st.norm.cdf(x, loc=loc_rescaled, scale=scale)))
kernel_2d = kernels[0][:, None].dot(kernels[1][None])
if len(kernels) > 2:
# trial and error got me here lol
kernel = kernel_2d[:, :, None].dot(kernels[2][None])
else:
kernel = kernel_2d
return kernel
class BrightnessGradientAdditiveTransform(LocalGaussianSomethingTransform):
def __init__(self,
scale: Union[Tuple[float, float], float, Callable[[Union[Tuple[int, ...], List[int]], int], float]],
loc: Union[Tuple[float, float], Callable[[Union[Tuple[int, ...], List[int]], int], float]] = (-1, 2),
max_strength: Union[float, Tuple[float, float], Callable[[np.ndarray, np.ndarray], float]] = 1.,
same_for_all_channels: bool = True,
mean_centered: bool = True,
p_per_sample: float = 1.,
p_per_channel: float = 1.,
clip_intensities: bool = False,
data_key: str = "data"):
"""
Applies an additive intensity gradient to the image. The intensity gradient is zero-centered (sum(add) = 0;
will not shift the global mean of the image. Some pixels will be brighter, some darker after application)
The gradient is implemented by placing a Gaussian distribution with sigma=scale somewhere in the image. The
location of the kernel is selected independently for each image dimension. The location is encoded in % of the
image size. The default value of (-1, 2) means that the location will be sampled uniformly from
(-image.shape[i], 2* image.shape[i]). It is important to allow the center of the kernel to be outside of the image.
IMPORTANT: Try this with different parametrizations and visualize the outcome to get a better feeling for how
to use this!
:param scale: scale of the gradient. Large values recommended!
float: fixed value
(float, float): will be sampled independently for each dimension from the interval [scale[0], scale[1]]
callable: you get all the freedom you want. Will be called as scale(image.shape, dimension) where dimension
is the index in image.shape we are requesting the scale for. Must return scalar (float).
:param loc:
(float, float): sample location uniformly from interval [scale[0], scale[1]] (see main description)
callable: you get all the freedom you want. Will be called as loc(image.shape, dimension) where dimension
is the index in image.shape we are requesting the location for. Must return a scalar value denoting a relative
position along axis dimension (0 for index 0, 1 for | |
<reponame>demirdogukan/InsiderPycraft
if not __name__ == "__main__":
print("Started <Pycraft_Settings>")
class GenerateSettings:
def __init__(self):
pass
def settings(self):
try:
self.Display.fill(self.BackgroundCol)
self.mod_Pygame__.display.flip()
self.mod_CaptionUtils__.GenerateCaptions.GetNormalCaption(self, "Settings")
VersionFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 15)
MainTitleFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 60)
InfoTitleFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 35)
LOWFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 15)
MEDIUMFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 15)
HIGHFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 15)
ADAPTIVEFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 15)
LightThemeFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 15)
DarkThemeFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 15)
DataFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 15)
TempMx = 0
Mx, My = 0, 0
mousebuttondown = False
SettingsInformationFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 15)
scroll = 50
while True:
realWidth, realHeight = self.mod_Pygame__.display.get_window_size()
if realWidth < 1280:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight)
if realHeight < 720:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720)
xScaleFact = realWidth/1280
TempMx = Mx
tempFPS = self.mod_DisplayUtils__.DisplayUtils.GetPlayStatus(self)
self.Iteration += 1
Mx, My = self.mod_Pygame__.mouse.get_pos()
self.eFPS = self.clock.get_fps()
self.aFPS += self.eFPS
for event in self.mod_Pygame__.event.get():
if event.type == self.mod_Pygame__.QUIT or (event.type == self.mod_Pygame__.KEYDOWN and event.key == self.mod_Pygame__.K_ESCAPE):
if self.sound == True:
self.mod_SoundUtils__.PlaySound.PlayClickSound(self)
return None
elif event.type == self.mod_Pygame__.KEYDOWN:
if event.key == self.mod_Pygame__.K_SPACE and self.Devmode < 10:
self.Devmode += 1
if event.key == self.mod_Pygame__.K_x:
self.Devmode = 1
if event.key == self.mod_Pygame__.K_q:
self.mod_TkinterUtils__.TkinterInfo.CreateTkinterWindow(self)
if event.key == self.mod_Pygame__.K_F11:
self.mod_DisplayUtils__.DisplayUtils.UpdateDisplay(self)
if event.key == self.mod_Pygame__.K_x:
self.Devmode = 1
elif event.type == self.mod_Pygame__.MOUSEBUTTONDOWN:
mousebuttondown = True
elif event.type == self.mod_Pygame__.MOUSEBUTTONUP:
mousebuttondown = False
if event.type == self.mod_Pygame__.MOUSEWHEEL and realHeight <= 760:
self.mod_Pygame__.mouse.set_cursor(self.mod_Pygame__.SYSTEM_CURSOR_SIZENS)
if str(event.y)[0] == "-":
scroll -= 5
else:
scroll += 5
else:
self.mod_Pygame__.mouse.set_cursor(self.mod_Pygame__.SYSTEM_CURSOR_ARROW)
self.mod_CaptionUtils__.GenerateCaptions.GetNormalCaption(self, "Settings")
if scroll > 35:
scroll = 35
elif scroll < 0:
scroll = 0
titletFont = MainTitleFont.render("Pycraft", self.aa, self.FontCol)
TitleWidth = titletFont.get_width()
InfoFont = InfoTitleFont.render("Settings", self.aa, self.SecondFontCol)
FPSFont = VersionFont.render(f"FPS: Actual: {int(self.eFPS)} Max: {int(self.FPS)} Average: {int((self.aFPS/self.Iteration))}", self.aa, self.FontCol)
FOVFont = VersionFont.render(f"FOV: {self.FOV}", self.aa, self.FontCol)
CamRotFont = VersionFont.render(f"Camera Rotation Speed: {round(self.cameraANGspeed, 1)}", self.aa, self.FontCol)
ModeFont = VersionFont.render("Mode; , , , .", self.aa, self.FontCol)
AAFont = VersionFont.render(f"Anti-Aliasing: {self.aa}", self.aa, self.FontCol)
RenderFogFont = VersionFont.render(f"Render Fog: {self.RenderFOG}", self.aa, self.FontCol)
FancySkyFont = VersionFont.render(f"Fancy Skies: {self.FanSky}", self.aa, self.FontCol)
FancyParticleFont = VersionFont.render(f"Fancy Partices: {self.FanPart}", self.aa, self.FontCol)
SoundFont = VersionFont.render(f"Sound: {self.sound}", self.aa, self.FontCol)
if self.sound == True:
SoundVoltFont = VersionFont.render(f"Sound Volume: {self.soundVOL}%", self.aa, self.FontCol)
else:
SoundVoltFont = VersionFont.render(f"Sound Volume: {self.soundVOL}%", self.aa, self.ShapeCol)
MusicFont = VersionFont.render(f"Music: {self.music}", self.aa, self.FontCol)
if self.music == True:
MusicVoltFont = VersionFont.render(f"Music Volume: {self.musicVOL}%", self.aa, self.FontCol)
else:
MusicVoltFont = VersionFont.render(f"Music Volume: {self.musicVOL}%", self.aa, self.ShapeCol)
ThemeFont = VersionFont.render(f"Theme: , | Current Theme: {self.theme}", self.aa, self.FontCol)
ThemeInformationFont = SettingsInformationFont.render("Gives you control over which theme you can use", self.aa, self.AccentCol)
ModeInformationFont = SettingsInformationFont.render("Gives you 4 separate per-sets for settings, Adaptive mode will automatically adjust your settings", self.aa, self.AccentCol)
FPSInformationFont = SettingsInformationFont.render("Controls the maximum frame rate the game will limit to, does not guarantee that FPS unfortunately", self.aa, self.AccentCol)
FOVInformationFont = SettingsInformationFont.render("Controls the FOV of the camera in-game", self.aa, self.AccentCol)
CameraRotationSpeedInformationFont = SettingsInformationFont.render("Controls the rotation speed of the camera in-game (1 is low, 5 is high)", self.aa, self.AccentCol)
AAInformationFont = SettingsInformationFont.render("Enables/Disables anti-aliasing in game and in the GUI, will give you a minor performance improvement, mainly for low powered devices", self.aa, self.AccentCol)
self.RenderFogInformationFont = SettingsInformationFont.render("Enables/Disables fog effects in game, for a small performance benefit", self.aa, self.AccentCol)
FancySkiesInformationFont = SettingsInformationFont.render("Enables/Disables a fancy sky box for better visuals in game, does not control anti aliasing for the sky box", self.aa, self.AccentCol)
FancyParticlesInformationFont = SettingsInformationFont.render("Enables/Disables particles in game as particles can have a significant performance decrease", self.aa, self.AccentCol)
SoundInformationFont = SettingsInformationFont.render("Enables/Disables sound effects in game, like for example the click sound and footsteps in game", self.aa, self.AccentCol)
SoundVolInformationFont = SettingsInformationFont.render("Controls the volume of the sound effects, where 100% is maximum and 0% is minimum volume", self.aa, self.AccentCol)
MusicInformationFont = SettingsInformationFont.render("Enables/Disables music in game, like for example the GUI music", self.aa, self.AccentCol)
MusicVolInformationFont = SettingsInformationFont.render("Controls the volume of the music, some effects may not apply until the game reloads", self.aa, self.AccentCol)
self.Display.fill(self.BackgroundCol)
FPS_rect = self.mod_Pygame__.Rect(50, 180+scroll, 450*xScaleFact, 10)
FOV_rect = self.mod_Pygame__.Rect(50, 230+scroll, 450*xScaleFact, 10)
CAM_rect = self.mod_Pygame__.Rect(50, 280+scroll, 450*xScaleFact, 10)
sound_rect = self.mod_Pygame__.Rect(50, 580+scroll, 450*xScaleFact, 10)
music_rect = self.mod_Pygame__.Rect(50, 680+scroll, 450*xScaleFact, 10)
aa_rect = self.mod_Pygame__.Rect(50, 330+scroll, 50, 10)
RenderFOG_Rect = self.mod_Pygame__.Rect(50, 380+scroll, 50, 10)
Fansky_Rect = self.mod_Pygame__.Rect(50, 430+scroll, 50, 10)
FanPart_Rect = self.mod_Pygame__.Rect(50, 480+scroll, 50, 10)
sound_Rect = self.mod_Pygame__.Rect(50, 530+scroll, 50, 10)
music_Rect = self.mod_Pygame__.Rect(50, 630+scroll, 50, 10)
slider_Rect = self.mod_Pygame__.Rect(realWidth-15, scroll, 10, 665)
self.mod_Pygame__.draw.rect(self.Display, self.ShapeCol, FPS_rect, 0)
self.mod_Pygame__.draw.rect(self.Display, self.ShapeCol, FOV_rect, 0)
self.mod_Pygame__.draw.rect(self.Display, self.ShapeCol, CAM_rect, 0)
self.mod_Pygame__.draw.rect(self.Display, self.ShapeCol, sound_rect, 0)
self.mod_Pygame__.draw.rect(self.Display, self.ShapeCol, music_rect, 0)
self.mod_Pygame__.draw.rect(self.Display, self.ShapeCol, aa_rect, 0)
self.mod_Pygame__.draw.rect(self.Display, self.ShapeCol, RenderFOG_Rect, 0)
self.mod_Pygame__.draw.rect(self.Display, self.ShapeCol, Fansky_Rect, 0)
self.mod_Pygame__.draw.rect(self.Display, self.ShapeCol, FanPart_Rect, 0)
self.mod_Pygame__.draw.rect(self.Display, self.ShapeCol, sound_Rect, 0)
self.mod_Pygame__.draw.rect(self.Display, self.ShapeCol, music_Rect, 0)
if mousebuttondown == True:
if My > 180+scroll and My < 190+scroll:
self.mod_Pygame__.mouse.set_cursor(self.mod_Pygame__.SYSTEM_CURSOR_HAND)
if Mx > TempMx and self.FPS < 445:
self.FPS += 1
elif Mx < TempMx and self.FPS > 15:
self.FPS -= 1
if self.FPS < 15:
self.FPS = 16
elif self.FPS > 445:
self.FPS = 444
self.mod_Pygame__.draw.circle(self.Display, self.AccentCol, (int(self.FPS+45)*xScaleFact, 185+scroll), 9)
else:
self.mod_Pygame__.draw.circle(self.Display, (255, 255, 255), (int(self.FPS+45)*xScaleFact, 185+scroll), 9)
if My > 230+scroll and My < 240+scroll:
self.mod_Pygame__.mouse.set_cursor(self.mod_Pygame__.SYSTEM_CURSOR_HAND)
if Mx > TempMx and self.FOV < 98:
self.FOV += 1
elif Mx < TempMx and self.FOV > 12:
self.FOV -= 1
if self.FOV < 12:
self.FOV = 13
elif self.FOV > 98:
self.FOV = 97
self.mod_Pygame__.draw.circle(self.Display, self.AccentCol, (int(self.FOV*5)*xScaleFact, 235+scroll), 9)
else:
self.mod_Pygame__.draw.circle(self.Display, (255, 255, 255), (int(self.FOV*5)*xScaleFact, 235+scroll), 9)
if My > 280+scroll and My < 290+scroll:
self.mod_Pygame__.mouse.set_cursor(self.mod_Pygame__.SYSTEM_CURSOR_HAND)
if Mx > TempMx and self.cameraANGspeed < 5.0:
self.cameraANGspeed += 0.1
elif Mx < TempMx and self.cameraANGspeed > 0.0:
self.cameraANGspeed -= 0.1
if self.cameraANGspeed > 5.0:
self.cameraANGspeed = 4.9
elif self.cameraANGspeed <= 0:
self.cameraANGspeed = 0.1
self.mod_Pygame__.draw.circle(self.Display, self.AccentCol, ((int(self.cameraANGspeed*89)+45)*xScaleFact, 285+scroll), 9)
else:
self.mod_Pygame__.draw.circle(self.Display, (255, 255, 255), ((int(self.cameraANGspeed*89)+45)*xScaleFact, 285+scroll), 9)
if My > 580+scroll and My < 590+scroll and self.sound == True:
self.mod_Pygame__.mouse.set_cursor(self.mod_Pygame__.SYSTEM_CURSOR_HAND)
if Mx > TempMx and self.soundVOL < 100:
self.soundVOL += 1
elif Mx < TempMx and self.soundVOL > 0:
self.soundVOL -= 1
if self.soundVOL > 100:
self.soundVOL = 100
elif self.soundVOL < 0:
self.soundVOL = 0
self.mod_Pygame__.draw.circle(self.Display, self.AccentCol, ((int(self.soundVOL*4.4)+50)*xScaleFact, 585+scroll), 9)
else:
self.mod_Pygame__.draw.circle(self.Display, (255, 255, 255), ((int(self.soundVOL*4.4)+50)*xScaleFact, 585+scroll), 9)
if My > 680+scroll and My < 690+scroll and self.music == True:
self.mod_Pygame__.mouse.set_cursor(self.mod_Pygame__.SYSTEM_CURSOR_HAND)
if Mx > TempMx and self.musicVOL < 100:
self.musicVOL += 1
elif Mx < TempMx and self.musicVOL > 0:
self.musicVOL -= 1
if self.musicVOL > 100:
self.musicVOL = 100
elif self.musicVOL < 0:
self.musicVOL = 0
self.mod_Pygame__.draw.circle(self.Display, self.AccentCol, ((int(self.musicVOL*4.4)+50)*xScaleFact, 685+scroll), 9)
else:
self.mod_Pygame__.draw.circle(self.Display, (255, 255, 255), ((int(self.musicVOL*4.4)+50)*xScaleFact, 685+scroll), 9)
if My > 330+scroll and My < 340+scroll:
self.mod_Pygame__.mouse.set_cursor(self.mod_Pygame__.SYSTEM_CURSOR_HAND)
if self.aa == True:
self.aa = False
if self.sound == True:
self.mod_SoundUtils__.PlaySound.PlayClickSound(self)
mousebuttondown = False
elif self.aa == False:
self.aa = True
if self.sound == True:
self.mod_SoundUtils__.PlaySound.PlayClickSound(self)
mousebuttondown = False
if self.aa == True:
self.mod_Pygame__.draw.circle(self.Display, (255, 255, 255), (90, 335+scroll), 9)
self.mod_Pygame__.draw.circle(self.Display, self.ShapeCol, (90, 335+scroll), 6)
elif self.aa == False:
self.mod_Pygame__.draw.circle(self.Display, (255, 255, 255), (60, 335+scroll), 9)
self.mod_Pygame__.draw.circle(self.Display, self.ShapeCol, (60, 335+scroll), 6)
if My > 380+scroll and My < 390+scroll:
self.mod_Pygame__.mouse.set_cursor(self.mod_Pygame__.SYSTEM_CURSOR_HAND)
if self.RenderFOG == True:
self.RenderFOG = False
if self.sound == True:
self.mod_SoundUtils__.PlaySound.PlayClickSound(self)
mousebuttondown = False
elif self.RenderFOG == False:
self.RenderFOG = True
if self.sound == True:
self.mod_SoundUtils__.PlaySound.PlayClickSound(self)
mousebuttondown = False
if self.RenderFOG == True:
self.mod_Pygame__.draw.circle(self.Display, (255, 255, 255), (90, 385+scroll), 9)
self.mod_Pygame__.draw.circle(self.Display, self.ShapeCol, (90, 385+scroll), 6)
elif self.RenderFOG == False:
self.mod_Pygame__.draw.circle(self.Display, (255, 255, 255), (60, 385+scroll), 9)
self.mod_Pygame__.draw.circle(self.Display, self.ShapeCol, (60, 385+scroll), 6)
if My > 430+scroll and My < 440+scroll:
self.mod_Pygame__.mouse.set_cursor(self.mod_Pygame__.SYSTEM_CURSOR_HAND)
if self.FanSky == True:
self.FanSky = False
if self.sound == True:
self.mod_SoundUtils__.PlaySound.PlayClickSound(self)
mousebuttondown = False
elif self.FanSky == False:
self.FanSky = True
if self.sound == True:
self.mod_SoundUtils__.PlaySound.PlayClickSound(self)
mousebuttondown = False
if self.FanSky == True:
self.mod_Pygame__.draw.circle(self.Display, (255, 255, 255), (90, 435+scroll), 9)
self.mod_Pygame__.draw.circle(self.Display, self.ShapeCol, (90, 435+scroll), 6)
elif self.FanSky == False:
self.mod_Pygame__.draw.circle(self.Display, (255, 255, 255), (60, 435+scroll), 9)
self.mod_Pygame__.draw.circle(self.Display, self.ShapeCol, (60, 435+scroll), 6)
if My > 480+scroll and My < 490+scroll:
self.mod_Pygame__.mouse.set_cursor(self.mod_Pygame__.SYSTEM_CURSOR_HAND)
if self.FanPart == | |
ForegroundLayer = None
ItemIndexMethod = None
ItemLayer = None
NoIndex = None
SceneLayer = None
SceneLayers = None
__new__ = None
changed = None
focusItemChanged = None
sceneRectChanged = None
selectionChanged = None
staticMetaObject = None
class QFileIconProvider(_Object):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def icon(*args, **kwargs):
pass
def options(*args, **kwargs):
pass
def setOptions(*args, **kwargs):
pass
def type(*args, **kwargs):
pass
Computer = None
Desktop = None
DontUseCustomDirectoryIcons = None
Drive = None
File = None
Folder = None
IconType = None
Network = None
Option = None
Options = None
Trashcan = None
__new__ = None
class QLayoutItem(_Object):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def alignment(*args, **kwargs):
pass
def controlTypes(*args, **kwargs):
pass
def expandingDirections(*args, **kwargs):
pass
def geometry(*args, **kwargs):
pass
def hasHeightForWidth(*args, **kwargs):
pass
def heightForWidth(*args, **kwargs):
pass
def invalidate(*args, **kwargs):
pass
def isEmpty(*args, **kwargs):
pass
def layout(*args, **kwargs):
pass
def maximumSize(*args, **kwargs):
pass
def minimumHeightForWidth(*args, **kwargs):
pass
def minimumSize(*args, **kwargs):
pass
def setAlignment(*args, **kwargs):
pass
def setGeometry(*args, **kwargs):
pass
def sizeHint(*args, **kwargs):
pass
def spacerItem(*args, **kwargs):
pass
def widget(*args, **kwargs):
pass
align = None
__new__ = None
class QUndoGroup(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def activeStack(*args, **kwargs):
pass
def addStack(*args, **kwargs):
pass
def canRedo(*args, **kwargs):
pass
def canUndo(*args, **kwargs):
pass
def createRedoAction(*args, **kwargs):
pass
def createUndoAction(*args, **kwargs):
pass
def isClean(*args, **kwargs):
pass
def redo(*args, **kwargs):
pass
def redoText(*args, **kwargs):
pass
def removeStack(*args, **kwargs):
pass
def setActiveStack(*args, **kwargs):
pass
def stacks(*args, **kwargs):
pass
def undo(*args, **kwargs):
pass
def undoText(*args, **kwargs):
pass
__new__ = None
activeStackChanged = None
canRedoChanged = None
canUndoChanged = None
cleanChanged = None
indexChanged = None
redoTextChanged = None
staticMetaObject = None
undoTextChanged = None
class QAction(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def actionGroup(*args, **kwargs):
pass
def activate(*args, **kwargs):
pass
def associatedGraphicsWidgets(*args, **kwargs):
pass
def associatedWidgets(*args, **kwargs):
pass
def autoRepeat(*args, **kwargs):
pass
def data(*args, **kwargs):
pass
def event(*args, **kwargs):
pass
def font(*args, **kwargs):
pass
def hover(*args, **kwargs):
pass
def icon(*args, **kwargs):
pass
def iconText(*args, **kwargs):
pass
def isCheckable(*args, **kwargs):
pass
def isChecked(*args, **kwargs):
pass
def isEnabled(*args, **kwargs):
pass
def isIconVisibleInMenu(*args, **kwargs):
pass
def isSeparator(*args, **kwargs):
pass
def isVisible(*args, **kwargs):
pass
def menu(*args, **kwargs):
pass
def menuRole(*args, **kwargs):
pass
def parentWidget(*args, **kwargs):
pass
def priority(*args, **kwargs):
pass
def setActionGroup(*args, **kwargs):
pass
def setAutoRepeat(*args, **kwargs):
pass
def setCheckable(*args, **kwargs):
pass
def setChecked(*args, **kwargs):
pass
def setData(*args, **kwargs):
pass
def setDisabled(*args, **kwargs):
pass
def setEnabled(*args, **kwargs):
pass
def setFont(*args, **kwargs):
pass
def setIcon(*args, **kwargs):
pass
def setIconText(*args, **kwargs):
pass
def setIconVisibleInMenu(*args, **kwargs):
pass
def setMenu(*args, **kwargs):
pass
def setMenuRole(*args, **kwargs):
pass
def setPriority(*args, **kwargs):
pass
def setSeparator(*args, **kwargs):
pass
def setShortcut(*args, **kwargs):
pass
def setShortcutContext(*args, **kwargs):
pass
def setShortcuts(*args, **kwargs):
pass
def setStatusTip(*args, **kwargs):
pass
def setText(*args, **kwargs):
pass
def setToolTip(*args, **kwargs):
pass
def setVisible(*args, **kwargs):
pass
def setWhatsThis(*args, **kwargs):
pass
def shortcut(*args, **kwargs):
pass
def shortcutContext(*args, **kwargs):
pass
def shortcuts(*args, **kwargs):
pass
def showStatusText(*args, **kwargs):
pass
def statusTip(*args, **kwargs):
pass
def text(*args, **kwargs):
pass
def toggle(*args, **kwargs):
pass
def toolTip(*args, **kwargs):
pass
def trigger(*args, **kwargs):
pass
def whatsThis(*args, **kwargs):
pass
AboutQtRole = None
AboutRole = None
ActionEvent = None
ApplicationSpecificRole = None
HighPriority = None
Hover = None
LowPriority = None
MenuRole = None
NoRole = None
NormalPriority = None
PreferencesRole = None
Priority = None
QuitRole = None
TextHeuristicRole = None
Trigger = None
__new__ = None
changed = None
hovered = None
staticMetaObject = None
toggled = None
triggered = None
class QGraphicsItemAnimation(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def afterAnimationStep(*args, **kwargs):
pass
def beforeAnimationStep(*args, **kwargs):
pass
def clear(*args, **kwargs):
pass
def horizontalScaleAt(*args, **kwargs):
pass
def horizontalShearAt(*args, **kwargs):
pass
def item(*args, **kwargs):
pass
def matrixAt(*args, **kwargs):
pass
def posAt(*args, **kwargs):
pass
def posList(*args, **kwargs):
pass
def reset(*args, **kwargs):
pass
def rotationAt(*args, **kwargs):
pass
def rotationList(*args, **kwargs):
pass
def scaleList(*args, **kwargs):
pass
def setItem(*args, **kwargs):
pass
def setPosAt(*args, **kwargs):
pass
def setRotationAt(*args, **kwargs):
pass
def setScaleAt(*args, **kwargs):
pass
def setShearAt(*args, **kwargs):
pass
def setStep(*args, **kwargs):
pass
def setTimeLine(*args, **kwargs):
pass
def setTranslationAt(*args, **kwargs):
pass
def shearList(*args, **kwargs):
pass
def timeLine(*args, **kwargs):
pass
def translationList(*args, **kwargs):
pass
def verticalScaleAt(*args, **kwargs):
pass
def verticalShearAt(*args, **kwargs):
pass
def xTranslationAt(*args, **kwargs):
pass
def yTranslationAt(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
class QCompleter(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def caseSensitivity(*args, **kwargs):
pass
def complete(*args, **kwargs):
pass
def completionColumn(*args, **kwargs):
pass
def completionCount(*args, **kwargs):
pass
def completionMode(*args, **kwargs):
pass
def completionModel(*args, **kwargs):
pass
def completionPrefix(*args, **kwargs):
pass
def completionRole(*args, **kwargs):
pass
def currentCompletion(*args, **kwargs):
pass
def currentIndex(*args, **kwargs):
pass
def currentRow(*args, **kwargs):
pass
def event(*args, **kwargs):
pass
def eventFilter(*args, **kwargs):
pass
def filterMode(*args, **kwargs):
pass
def maxVisibleItems(*args, **kwargs):
pass
def model(*args, **kwargs):
pass
def modelSorting(*args, **kwargs):
pass
def pathFromIndex(*args, **kwargs):
pass
def popup(*args, **kwargs):
pass
def setCaseSensitivity(*args, **kwargs):
pass
def setCompletionColumn(*args, **kwargs):
pass
def setCompletionMode(*args, **kwargs):
pass
def setCompletionPrefix(*args, **kwargs):
pass
def setCompletionRole(*args, **kwargs):
pass
def setCurrentRow(*args, **kwargs):
pass
def setFilterMode(*args, **kwargs):
pass
def setMaxVisibleItems(*args, **kwargs):
pass
def setModel(*args, **kwargs):
pass
def setModelSorting(*args, **kwargs):
pass
def setPopup(*args, **kwargs):
pass
def setWidget(*args, **kwargs):
pass
def setWrapAround(*args, **kwargs):
pass
def splitPath(*args, **kwargs):
pass
| |
these to a non-empty value.
request = library.ListBooksRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_books),
'__call__') as call:
call.return_value = library.ListBooksResponse()
client.list_books(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_books_field_headers_async():
client = LibraryServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = library.ListBooksRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_books),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(library.ListBooksResponse())
await client.list_books(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_books_flattened():
client = LibraryServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_books),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = library.ListBooksResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_books(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
def test_list_books_flattened_error():
client = LibraryServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_books(
library.ListBooksRequest(),
parent='parent_value',
)
@pytest.mark.asyncio
async def test_list_books_flattened_async():
client = LibraryServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_books),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = library.ListBooksResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(library.ListBooksResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_books(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_list_books_flattened_error_async():
client = LibraryServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_books(
library.ListBooksRequest(),
parent='parent_value',
)
def test_list_books_pager():
client = LibraryServiceClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_books),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
library.ListBooksResponse(
books=[
library.Book(),
library.Book(),
library.Book(),
],
next_page_token='abc',
),
library.ListBooksResponse(
books=[],
next_page_token='def',
),
library.ListBooksResponse(
books=[
library.Book(),
],
next_page_token='ghi',
),
library.ListBooksResponse(
books=[
library.Book(),
library.Book(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_books(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, library.Book)
for i in results)
def test_list_books_pages():
client = LibraryServiceClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_books),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
library.ListBooksResponse(
books=[
library.Book(),
library.Book(),
library.Book(),
],
next_page_token='abc',
),
library.ListBooksResponse(
books=[],
next_page_token='def',
),
library.ListBooksResponse(
books=[
library.Book(),
],
next_page_token='ghi',
),
library.ListBooksResponse(
books=[
library.Book(),
library.Book(),
],
),
RuntimeError,
)
pages = list(client.list_books(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_books_async_pager():
client = LibraryServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_books),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
library.ListBooksResponse(
books=[
library.Book(),
library.Book(),
library.Book(),
],
next_page_token='abc',
),
library.ListBooksResponse(
books=[],
next_page_token='def',
),
library.ListBooksResponse(
books=[
library.Book(),
],
next_page_token='ghi',
),
library.ListBooksResponse(
books=[
library.Book(),
library.Book(),
],
),
RuntimeError,
)
async_pager = await client.list_books(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, library.Book)
for i in responses)
@pytest.mark.asyncio
async def test_list_books_async_pages():
client = LibraryServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_books),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
library.ListBooksResponse(
books=[
library.Book(),
library.Book(),
library.Book(),
],
next_page_token='abc',
),
library.ListBooksResponse(
books=[],
next_page_token='def',
),
library.ListBooksResponse(
books=[
library.Book(),
],
next_page_token='ghi',
),
library.ListBooksResponse(
books=[
library.Book(),
library.Book(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_books(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_delete_book(transport: str = 'grpc', request_type=library.DeleteBookRequest):
client = LibraryServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_book),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_book(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == library.DeleteBookRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_book_from_dict():
test_delete_book(request_type=dict)
def test_delete_book_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = LibraryServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_book),
'__call__') as call:
client.delete_book()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == library.DeleteBookRequest()
@pytest.mark.asyncio
async def test_delete_book_async(transport: str = 'grpc_asyncio', request_type=library.DeleteBookRequest):
client = LibraryServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_book),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_book(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == library.DeleteBookRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_book_async_from_dict():
await test_delete_book_async(request_type=dict)
def test_delete_book_field_headers():
client = LibraryServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = library.DeleteBookRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_book),
'__call__') as call:
call.return_value = None
client.delete_book(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_book_field_headers_async():
client = LibraryServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = library.DeleteBookRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_book),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_book(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_book_flattened():
| |
baseclass.
"""
def __init__(self):
self._id = 1
self._store = {}
self._base = PEAKS_DICT
def iter_assigned(self):
"""
Iterate over all assigned peaks
:return: Assigned peak
:rtype: dict
"""
for peak_id, peak in self._store.items():
if self.is_assigned(peak_id):
yield peak_id, peak
def iter_nonassigned(self):
"""
Iterate over all non-assigned peaks
:return: None assigned peak
:rtype: dict
"""
for peak_id, peak in self._store.items():
if not self.is_assigned(peak_id):
yield peak_id, peak
def is_assigned(self, peak_id):
"""
Return True or False if the peak is assigned.
e.a if all dimensions have an assignment associated.
:param peak_id: peak identifier
:type peak_id: int
"""
assert peak_id in self._store, logging.error('No peak with ID: {0}'.format(peak_id))
return all([self._store[peak_id][ass] != 0 for ass in ('ass1','ass2','ass3')])
def calculatedistance(vector1, vector2):
"""
Calculate the distance between two vectors (atoms)
Returns the distance squared or None if vectors not aligned.
:param vector1: First atom coordinate (x,y,z)
:type vector1: list
:param vector2: Second atom coordinate (x,y,z)
:type vector2: list
:return: squared distance between two atoms
"""
d= float(0)
if len(vector1) == len(vector2):
for i in range(len(vector1)):
d=d+(vector1[i]-vector2[i])**2
return d
else:
return None
def parse_sparky_proj(proj_file):
"""
Parse Sparky project file (.proj) to Assignments object
:param proj_file: Sparky project file path
:type proj_file: string
"""
assert os.path.exists(proj_file), 'Sparky project file {0} does not exist.'.format(proj_file)
logging.debug('Parsing Sparky project file {0}'.format(proj_file))
read = False
assignments = Assignments()
with open(proj_file) as sparkyproject:
for line in sparkyproject.readlines():
if line.startswith('<resonances>'):
read = True
continue
elif line.startswith('<end resonances>'):
read = False
if read:
line = line.split()
if not len(line) == 3:
logging.warning("Malformed assignment in %s, line: %s" % (proj_file, line))
continue
atomid = [a for a in line[0].split('|') if len(a)]
resi = [n for n in re.split('\d', atomid[0]) if len(n)]
resn = [int(n) for n in re.split('\D', atomid[0]) if len(n)]
if len(resn):
resn = resn[0]
else:
resn = None
# Translate residue names
resi = translate_residue_names("".join(resi))
if not resi:
logging.warning('No valid residue name in line: {0}'.format(line))
for r in resi:
# Translate atom names, returns multiple atoms in case of ambiguity
atom = translate_atom_names(atomid[1], residue=r)
if not len(atom):
logging.warning('No valid atom name in line: {0}'.format(line))
for a in atom:
assignment = {
'resi': r,
'resn': resn,
'atom': a,
'shift': float(line[1]),
'nuc': line[2],
}
assignments.add(assignment)
return assignments
def parse_xeasy_peaks(peak_file):
"""
Parse Xeasy3D peakfile to Peaks object
Xeasy file format stores a column labled 'unused'
to indicate rather the peak has been used in a
structure calculation procedure (0 or 1). This
column is, however, not assigned automatically
and may not be set at all by the user.
:param peak_file: Xeasy3D peak file path
:type peak_file: string
"""
assert os.path.exists(peak_file), 'Xeasy3D peakfile {0} does not exist.'.format(peak_file)
logging.debug('Parsing Xeasy3D peakfile {0}'.format(peak_file))
peaks = Peaks()
with open(peak_file) as peakfile:
for line in peakfile.readlines():
if not line.startswith('#') and len(line):
line = line.strip().split()
if len(line) > 10:
peak = {
'id':int(line[0]),
'w1':float(line[1]),
'w2':float(line[2]),
'w3':float(line[3]),
'spec_type':line[5],
'vol':float(line[6]),
'vol_err':float(line[7]),
'intm':line[8],
'unused':int(line[9]),
'ass1':int(line[10]),
'ass2':int(line[11]),
'ass3':int(line[12])
}
peaks.add(peak)
return peaks
def parse_pdb(pdb_file):
"""
Parse RCSB PDB file to Structure object
:param pdb_file: PDB structure file path
:type pdb_file: str
"""
assert os.path.exists(pdb_file), 'RCSB PDB file {0} does not exist.'.format(pdb_file)
logging.debug('Parsing RCSB PDB file {0}'.format(pdb_file))
structure = Structure()
with open(pdb_file) as pdb:
for line in pdb.readlines():
if line.startswith('ATOM') and len(line):
line = line.strip().split()
atom = {
'id': int(line[1]),
'atom': line[2],
'resi': translate_residue_names(line[3]),
'resn': int(line[4]),
'coor':(float(line[5]), float(line[6]), float(line[7]))
}
structure.add(atom)
elif line.startswith('END'):
break
return structure
def get_chh_couple(peaks, assignments, chem_shift_ctol=0.5, chem_shift_ptol=0.05):
"""
Find carbond-proton assignment pairs for unassigned peaks matching carbon
chemical shift +/- tolerance and proton chemical shift +/- tolerance in
w2 and w3 dimensions respectivly. In addition look for proton assignments
in the w1 dimension for previous identified C-H couple.
:param peaks: peaks
:type peaks: peak object
:param assignments: assignments
:type assignments: assignemnt object
:param chem_shift_ctol: carbon chemical shift tolerance range in ppm
:type chem_shift_ctol: float
:param chem_shift_ctol: Proton chemical shift tolerance range in ppm
:type chem_shift_ctol: float
:return: enumerated dictionary with tuples containing
in sequence; peak ID and assignment ID's for
C (w2), H (w3) and H (w1).
:rtype: dict
"""
logging.info('Looking for Carbon-Proton-Proton pairs. C chemical shift tolerance: {0:.3f}, H chemical shift tolerance: {1:.3f}'.format(chem_shift_ctol, chem_shift_ptol))
# For all unassigned peaks, search assignments for residues
# that match the carbon shift +/- chem_shift_ctol
chh_couple = {}
enum = 1
for pid,peak in peaks.iter_nonassigned():
for id2,ass2 in assignments.iter_items():
if not (peak['w2'] > (ass2['shift'] - chem_shift_ctol) and
peak['w2'] < (ass2['shift'] + chem_shift_ctol) and
ass2['nuc'] == '13C'):
continue
# Carbon found, continue to look for proton
for id3,ass3 in assignments.iter_items():
if (peak['w3'] > (ass3['shift'] - chem_shift_ptol) and
peak['w3'] < (ass3['shift'] + chem_shift_ptol) and
ass2['resn'] == ass3['resn'] and
ass3['nuc'] == '1H'):
chh_couple[enum] = (peak['id'], id2, id3)
logging.debug('Peak {0}: carbon-proton assignment pair found {1}-{2}'.format(peak['id'],ass2['id'],ass3['id']))
# Continue to look for unassigned peaks in w1 dimension
w1_found = False
for id1,ass1 in assignments.iter_items():
if (peak['w1'] > (ass1['shift'] - chem_shift_ptol) and
peak['w1'] < (ass1['shift'] + chem_shift_ptol) and
ass1['nuc'] == '1H'):
chh_couple[enum] = (peak['id'], id2, id3, id1)
logging.debug('Peak {0}: found proton assignment in w1: {1} for C-H couple: {2}-{3}'.format(peak['id'],ass1['id'],ass2['id'],ass3['id']))
enum += 1
w1_found = True
if not w1_found:
enum += 1
if not peak['id'] in chh_couple:
logging.info('No C-H couple found for peak {0}'.format(peak['id']))
logging.info('Identified {0} C-H-(H) assignment couples for unassigned peaks'.format(len(chh_couple)))
return chh_couple
def find_unused(chh_couple, peaks, assignments, structure=None, dist_cutoff=6.0, chem_shift_ctol=0.5, chem_shift_ptol=0.05):
"""
For each previously identified Carbon-Proton-(Proton) (w2-w3-(w1)) pair do:
- Check if the carbon coordinates can be retrieved from the structure if
available.
- If carbon coordinates, check if the attached proton (w3) can be found in
structure and validate the covalent bond length.
- If w3 proton coordinates, check if the assignment in the w1 dimension
(the unassigned peaks) can be found in the structure.
- If coordinates where found, check if the distance is below dist_cutoff
then store the distance.
- Catagorize the carbon-proton-proton pair using the 'catagorize_assignment'
and print the results to standard out.
:param chh_couple: C-H-(H) (W2-W3-(W1)) couples as identified by
get_chh_couple function.
:type chh_couple: dict
:param peaks: peaks object
:param assignments: assignments object
:param structure: structure object
:param dist_cutoff: distance cutoff in Angstrom
:type dist_cutoff: float
:param chem_shift_ctol: carbon chemical shift tolerance range in ppm
:type chem_shift_ctol: float
:param chem_shift_ctol: Proton chemical shift tolerance range in ppm
:type chem_shift_ctol: float
"""
logging.info('Looking for Carbon-Proton-Proton pairs. C chemical shift tolerance: {0:.3f}, H chemical shift tolerance: {1:.3f}, Distance cutoff: {2:.3f}'.format(chem_shift_ctol, chem_shift_ptol, dist_cutoff))
dist_cutoff = dist_cutoff**2
print(" <Carbon dimension (w2)> <Proton dimension (w3) <Proton dimension (w1)>")
print("Peak ID resi resn atom shift ID resi resn atom shift ID resi resn atom shift Cat. Distance (A)")
# Using peaks for which CH(H) couple was identified, validate distances
for pid in sorted(chh_couple.keys()):
peak = peaks[chh_couple[pid][0]]
assign_cw2 = assignments[chh_couple[pid][1]]
assign_hw3 = assignments[chh_couple[pid][2]]
assign_hw1 = assignments[chh_couple[pid][3]] or None
# Look for carbon atom in structure and store coordinates.
dist = 999
if structure:
carbon = None
for aid_cw2,atom_cw2 in structure.iter_items():
if atom_cw2['resn'] == assign_cw2['resn'] and atom_cw2['atom'] == assign_cw2['atom']:
carbon = atom_cw2['coor']
break
# If carbon found, continue
if carbon and assign_hw1:
# Find proton W3 and calculate distance to carbon to ensure it is
# covalently attached.
w3proton = None
for aid_w3,atom_w3 in structure.iter_items():
if atom_w3['resn'] == assign_hw3['resn'] and atom_w3['atom'] == assign_hw3['atom']:
cw2_w3_dist = calculatedistance(carbon, atom_w3['coor'])
if 1.16 < cw2_w3_dist < 1.21:
w3proton = atom_w3['coor']
if not w3proton:
logging.debug('Proton {0} {1}-{2} of carbon-proton pair {3}-{4} (peak {5}) not found in structure'.format(assign_hw3['atom'],
assign_hw3['resi'], assign_hw3['resn'], assign_hw3['id'], assign_hw3['id'], pid))
continue
# Calculate distance between proton W3 and proton W1.
# There could be ambiquity in atom numbering/naming resulting in multiple
| |
#!/usr/bin/env python
# Copyright (C) 2007 <NAME>' <<EMAIL>>.
# Use of this source code is governed by MIT license that can be
# found in the LICENSE file.
import contextlib
import errno
import select
import socket
import sys
import time
from pyftpdlib._compat import PY3
from pyftpdlib.ioloop import Acceptor
from pyftpdlib.ioloop import AsyncChat
from pyftpdlib.ioloop import IOLoop
from pyftpdlib.ioloop import RetryError
from pyftpdlib.test import mock
from pyftpdlib.test import POSIX
from pyftpdlib.test import unittest
from pyftpdlib.test import VERBOSITY
import pyftpdlib.ioloop
if hasattr(socket, 'socketpair'):
socketpair = socket.socketpair
else:
def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0):
with contextlib.closing(socket.socket(family, type, proto)) as l:
l.bind(("localhost", 0))
l.listen(5)
c = socket.socket(family, type, proto)
try:
c.connect(l.getsockname())
caddr = c.getsockname()
while True:
a, addr = l.accept()
# check that we've got the correct client
if addr == caddr:
return c, a
a.close()
except OSError:
c.close()
raise
# TODO: write more tests.
class BaseIOLoopTestCase(object):
ioloop_class = None
def make_socketpair(self):
rd, wr = socketpair()
self.addCleanup(rd.close)
self.addCleanup(wr.close)
return rd, wr
def test_register(self):
s = self.ioloop_class()
self.addCleanup(s.close)
rd, wr = self.make_socketpair()
handler = AsyncChat(rd)
s.register(rd, handler, s.READ)
s.register(wr, handler, s.WRITE)
self.assertIn(rd, s.socket_map)
self.assertIn(wr, s.socket_map)
return (s, rd, wr)
def test_unregister(self):
s, rd, wr = self.test_register()
s.unregister(rd)
s.unregister(wr)
self.assertNotIn(rd, s.socket_map)
self.assertNotIn(wr, s.socket_map)
def test_unregister_twice(self):
s, rd, wr = self.test_register()
s.unregister(rd)
s.unregister(rd)
s.unregister(wr)
s.unregister(wr)
def test_modify(self):
s, rd, wr = self.test_register()
s.modify(rd, s.WRITE)
s.modify(wr, s.READ)
def test_loop(self):
# no timeout
s, rd, wr = self.test_register()
s.call_later(0, s.close)
s.loop()
# with timeout
s, rd, wr = self.test_register()
s.call_later(0, s.close)
s.loop(timeout=0.001)
def test_close(self):
s, rd, wr = self.test_register()
s.close()
self.assertEqual(s.socket_map, {})
def test_close_w_handler_exc(self):
# Simulate an exception when close()ing a socket handler.
# Exception should be logged and ignored.
class Handler(AsyncChat):
def close(self):
1 / 0
s = self.ioloop_class()
self.addCleanup(s.close)
rd, wr = self.make_socketpair()
handler = Handler(rd)
s.register(rd, handler, s.READ)
with mock.patch("pyftpdlib.ioloop.logger.error") as m:
s.close()
assert m.called
self.assertIn('ZeroDivisionError', m.call_args[0][0])
def test_close_w_handler_ebadf_exc(self):
# Simulate an exception when close()ing a socket handler.
# Exception should be ignored (and not logged).
class Handler(AsyncChat):
def close(self):
raise OSError(errno.EBADF, "")
s = self.ioloop_class()
self.addCleanup(s.close)
rd, wr = self.make_socketpair()
handler = Handler(rd)
s.register(rd, handler, s.READ)
with mock.patch("pyftpdlib.ioloop.logger.error") as m:
s.close()
assert not m.called
def test_close_w_callback_exc(self):
# Simulate an exception when close()ing the IO loop and a
# scheduled callback raises an exception on cancel().
with mock.patch("pyftpdlib.ioloop.logger.error") as logerr:
with mock.patch("pyftpdlib.ioloop._CallLater.cancel",
side_effect=lambda: 1 / 0) as cancel:
s = self.ioloop_class()
self.addCleanup(s.close)
s.call_later(1, lambda: 0)
s.close()
assert cancel.called
assert logerr.called
self.assertIn('ZeroDivisionError', logerr.call_args[0][0])
class DefaultIOLoopTestCase(unittest.TestCase, BaseIOLoopTestCase):
ioloop_class = pyftpdlib.ioloop.IOLoop
# ===================================================================
# select()
# ===================================================================
class SelectIOLoopTestCase(unittest.TestCase, BaseIOLoopTestCase):
ioloop_class = pyftpdlib.ioloop.Select
def test_select_eintr(self):
# EINTR is supposed to be ignored
with mock.patch('pyftpdlib.ioloop.select.select',
side_effect=select.error()) as m:
m.side_effect.errno = errno.EINTR
s, rd, wr = self.test_register()
s.poll(0)
# ...but just that
with mock.patch('pyftpdlib.ioloop.select.select',
side_effect=select.error()) as m:
m.side_effect.errno = errno.EBADF
s, rd, wr = self.test_register()
self.assertRaises(select.error, s.poll, 0)
# ===================================================================
# poll()
# ===================================================================
@unittest.skipUnless(hasattr(pyftpdlib.ioloop, 'Poll'),
"poll() not available on this platform")
class PollIOLoopTestCase(unittest.TestCase, BaseIOLoopTestCase):
ioloop_class = getattr(pyftpdlib.ioloop, "Poll", None)
poller_mock = "pyftpdlib.ioloop.Poll._poller"
@unittest.skipIf(sys.version_info[:2] == (3, 2), "")
def test_eintr_on_poll(self):
# EINTR is supposed to be ignored
with mock.patch(self.poller_mock, return_vaue=mock.Mock()) as m:
if not PY3:
m.return_value.poll.side_effect = select.error
m.return_value.poll.side_effect.errno = errno.EINTR
else:
m.return_value.poll.side_effect = OSError(errno.EINTR, "")
s, rd, wr = self.test_register()
s.poll(0)
assert m.called
# ...but just that
with mock.patch(self.poller_mock, return_vaue=mock.Mock()) as m:
if not PY3:
m.return_value.poll.side_effect = select.error
m.return_value.poll.side_effect.errno = errno.EBADF
else:
m.return_value.poll.side_effect = OSError(errno.EBADF, "")
s, rd, wr = self.test_register()
self.assertRaises(select.error, s.poll, 0)
assert m.called
def test_eexist_on_register(self):
# EEXIST is supposed to be ignored
with mock.patch(self.poller_mock, return_vaue=mock.Mock()) as m:
m.return_value.register.side_effect = \
EnvironmentError(errno.EEXIST, "")
s, rd, wr = self.test_register()
# ...but just that
with mock.patch(self.poller_mock, return_vaue=mock.Mock()) as m:
m.return_value.register.side_effect = \
EnvironmentError(errno.EBADF, "")
self.assertRaises(EnvironmentError, self.test_register)
def test_enoent_ebadf_on_unregister(self):
# ENOENT and EBADF are supposed to be ignored
for errnum in (errno.EBADF, errno.ENOENT):
with mock.patch(self.poller_mock, return_vaue=mock.Mock()) as m:
m.return_value.unregister.side_effect = \
EnvironmentError(errnum, "")
s, rd, wr = self.test_register()
s.unregister(rd)
# ...but just those
with mock.patch(self.poller_mock, return_vaue=mock.Mock()) as m:
m.return_value.unregister.side_effect = \
EnvironmentError(errno.EEXIST, "")
s, rd, wr = self.test_register()
self.assertRaises(EnvironmentError, s.unregister, rd)
def test_enoent_on_modify(self):
# ENOENT is supposed to be ignored
with mock.patch(self.poller_mock, return_vaue=mock.Mock()) as m:
m.return_value.modify.side_effect = \
OSError(errno.ENOENT, "")
s, rd, wr = self.test_register()
s.modify(rd, s.READ)
# ===================================================================
# epoll()
# ===================================================================
@unittest.skipUnless(hasattr(pyftpdlib.ioloop, 'Epoll'),
"epoll() not available on this platform (Linux only)")
class EpollIOLoopTestCase(PollIOLoopTestCase):
ioloop_class = getattr(pyftpdlib.ioloop, "Epoll", None)
poller_mock = "pyftpdlib.ioloop.Epoll._poller"
# ===================================================================
# /dev/poll
# ===================================================================
@unittest.skipUnless(hasattr(pyftpdlib.ioloop, 'DevPoll'),
"/dev/poll not available on this platform (Solaris only)")
class DevPollIOLoopTestCase(unittest.TestCase, BaseIOLoopTestCase):
ioloop_class = getattr(pyftpdlib.ioloop, "DevPoll", None)
# ===================================================================
# kqueue
# ===================================================================
@unittest.skipUnless(hasattr(pyftpdlib.ioloop, 'Kqueue'),
"/dev/poll not available on this platform (BSD only)")
class KqueueIOLoopTestCase(unittest.TestCase, BaseIOLoopTestCase):
ioloop_class = getattr(pyftpdlib.ioloop, "Kqueue", None)
class TestCallLater(unittest.TestCase):
"""Tests for CallLater class."""
def setUp(self):
self.ioloop = IOLoop.instance()
for task in self.ioloop.sched._tasks:
if not task.cancelled:
task.cancel()
del self.ioloop.sched._tasks[:]
def scheduler(self, timeout=0.01, count=100):
while self.ioloop.sched._tasks and count > 0:
self.ioloop.sched.poll()
count -= 1
time.sleep(timeout)
def test_interface(self):
def fun():
return 0
self.assertRaises(AssertionError, self.ioloop.call_later, -1, fun)
x = self.ioloop.call_later(3, fun)
self.assertEqual(x.cancelled, False)
x.cancel()
self.assertEqual(x.cancelled, True)
self.assertRaises(AssertionError, x.call)
self.assertRaises(AssertionError, x.reset)
x.cancel()
def test_order(self):
def fun(x):
l.append(x)
l = []
for x in [0.05, 0.04, 0.03, 0.02, 0.01]:
self.ioloop.call_later(x, fun, x)
self.scheduler()
self.assertEqual(l, [0.01, 0.02, 0.03, 0.04, 0.05])
# The test is reliable only on those systems where time.time()
# provides time with a better precision than 1 second.
if not str(time.time()).endswith('.0'):
def test_reset(self):
def fun(x):
l.append(x)
l = []
self.ioloop.call_later(0.01, fun, 0.01)
self.ioloop.call_later(0.02, fun, 0.02)
self.ioloop.call_later(0.03, fun, 0.03)
x = self.ioloop.call_later(0.04, fun, 0.04)
self.ioloop.call_later(0.05, fun, 0.05)
time.sleep(0.1)
x.reset()
self.scheduler()
self.assertEqual(l, [0.01, 0.02, 0.03, 0.05, 0.04])
def test_cancel(self):
def fun(x):
l.append(x)
l = []
self.ioloop.call_later(0.01, fun, 0.01).cancel()
self.ioloop.call_later(0.02, fun, 0.02)
self.ioloop.call_later(0.03, fun, 0.03)
self.ioloop.call_later(0.04, fun, 0.04)
self.ioloop.call_later(0.05, fun, 0.05).cancel()
self.scheduler()
self.assertEqual(l, [0.02, 0.03, 0.04])
def test_errback(self):
l = []
self.ioloop.call_later(
0.0, lambda: 1 // 0, _errback=lambda: l.append(True))
self.scheduler()
self.assertEqual(l, [True])
def test__repr__(self):
repr(self.ioloop.call_later(0.01, lambda: 0, 0.01))
def test__lt__(self):
a = self.ioloop.call_later(0.01, lambda: 0, 0.01)
b = self.ioloop.call_later(0.02, lambda: 0, 0.02)
self.assertTrue(a < b)
def test__le__(self):
a = self.ioloop.call_later(0.01, lambda: 0, 0.01)
b = self.ioloop.call_later(0.02, lambda: 0, 0.02)
self.assertTrue(a <= b)
class TestCallEvery(unittest.TestCase):
"""Tests for CallEvery class."""
def setUp(self):
self.ioloop = IOLoop.instance()
for task in self.ioloop.sched._tasks:
if not task.cancelled:
task.cancel()
del self.ioloop.sched._tasks[:]
def scheduler(self, timeout=0.003):
stop_at = time.time() + timeout
while time.time() < stop_at:
self.ioloop.sched.poll()
def test_interface(self):
def fun():
return 0
self.assertRaises(AssertionError, self.ioloop.call_every, -1, fun)
x = self.ioloop.call_every(3, fun)
self.assertEqual(x.cancelled, False)
x.cancel()
self.assertEqual(x.cancelled, True)
self.assertRaises(AssertionError, x.call)
self.assertRaises(AssertionError, x.reset)
x.cancel()
def test_only_once(self):
# make sure that callback is called only once per-loop
def fun():
l1.append(None)
l1 = []
self.ioloop.call_every(0, fun)
self.ioloop.sched.poll()
self.assertEqual(l1, [None])
def test_multi_0_timeout(self):
# make sure a 0 timeout callback is called as many times
# as the number of loops
def fun():
l.append(None)
l = []
self.ioloop.call_every(0, fun)
for x in range(100):
self.ioloop.sched.poll()
self.assertEqual(len(l), 100)
# run it on systems where time.time() has a higher precision
if POSIX:
def test_low_and_high_timeouts(self):
# make sure a callback with a lower timeout is called more
# frequently than another with a greater timeout
def fun():
l1.append(None)
l1 = []
self.ioloop.call_every(0.001, fun)
self.scheduler()
def fun():
l2.append(None)
l2 = []
self.ioloop.call_every(0.005, fun)
self.scheduler(timeout=0.01)
self.assertTrue(len(l1) > len(l2))
def test_cancel(self):
# make sure a cancelled callback doesn't get called anymore
def fun():
l.append(None)
l = []
call = self.ioloop.call_every(0.001, fun)
self.scheduler()
len_l = len(l)
call.cancel()
self.scheduler()
self.assertEqual(len_l, len(l))
def test_errback(self):
l = []
self.ioloop.call_every(
0.0, lambda: 1 // 0, _errback=lambda: l.append(True))
self.scheduler()
self.assertTrue(l)
class TestAsyncChat(unittest.TestCase):
def get_connected_handler(self):
s = socket.socket()
self.addCleanup(s.close)
ac = AsyncChat(sock=s)
self.addCleanup(ac.close)
return ac
def test_send_retry(self):
ac = self.get_connected_handler()
for errnum in pyftpdlib.ioloop._ERRNOS_RETRY:
with mock.patch("pyftpdlib.ioloop.socket.socket.send",
side_effect=socket.error(errnum, "")) as m:
self.assertEqual(ac.send(b"x"), 0)
assert m.called
def test_send_disconnect(self):
ac = self.get_connected_handler()
for errnum in pyftpdlib.ioloop._ERRNOS_DISCONNECTED:
with mock.patch("pyftpdlib.ioloop.socket.socket.send",
side_effect=socket.error(errnum, "")) as send:
with mock.patch.object(ac, "handle_close") as handle_close:
self.assertEqual(ac.send(b"x"), 0)
assert send.called
assert handle_close.called
def test_recv_retry(self):
ac = self.get_connected_handler()
for errnum in pyftpdlib.ioloop._ERRNOS_RETRY:
with mock.patch("pyftpdlib.ioloop.socket.socket.recv",
side_effect=socket.error(errnum, "")) as m:
self.assertRaises(RetryError, ac.recv, 1024)
assert m.called
def test_recv_disconnect(self):
ac = self.get_connected_handler()
for errnum in pyftpdlib.ioloop._ERRNOS_DISCONNECTED:
with mock.patch("pyftpdlib.ioloop.socket.socket.recv",
side_effect=socket.error(errnum, "")) as send:
with mock.patch.object(ac, "handle_close") as handle_close:
self.assertEqual(ac.recv(b"x"), b'')
assert send.called
assert handle_close.called
def test_connect_af_unspecified_err(self):
ac = AsyncChat()
with mock.patch.object(
ac, "connect",
side_effect=socket.error(errno.EBADF, "")) as m:
self.assertRaises(socket.error,
ac.connect_af_unspecified, ("localhost", 0))
assert m.called
self.assertIsNone(ac.socket)
class TestAcceptor(unittest.TestCase):
def test_bind_af_unspecified_err(self):
ac = Acceptor()
with mock.patch.object(
ac, "bind",
side_effect=socket.error(errno.EBADF, "")) as | |
left and right names.
Just to make it easier for people, we generate a name
which is easy to display of the form "LEFT - RIGHT".
The individual properties are available if the user
wants to generate a more complex name.
>>> x = {}
>>> _make_joined_name(x)
>>> x
{}
>>> x = {'name:left':'Left'}
>>> _make_joined_name(x)
>>> x
{'name': 'Left', 'name:left': 'Left'}
>>> x = {'name:right':'Right'}
>>> _make_joined_name(x)
>>> x
{'name': 'Right', 'name:right': 'Right'}
>>> x = {'name:left':'Left', 'name:right':'Right'}
>>> _make_joined_name(x)
>>> x
{'name:right': 'Right', 'name': 'Left - Right', 'name:left': 'Left'}
>>> x = {'name:left':'Left', 'name:right':'Right', 'name': 'Already Exists'}
>>> _make_joined_name(x)
>>> x
{'name:right': 'Right', 'name': 'Already Exists', 'name:left': 'Left'}
"""
# don't overwrite an existing name
if 'name' in props:
return
lname = props.get('name:left')
rname = props.get('name:right')
if lname is not None:
if rname is not None:
props['name'] = '%s - %s' % (lname, rname)
else:
props['name'] = lname
elif rname is not None:
props['name'] = rname
def _linemerge(geom):
"""
Try to extract all the linear features from the geometry argument
and merge them all together into the smallest set of linestrings
possible.
This is almost identical to Shapely's linemerge, and uses it,
except that Shapely's throws exceptions when passed a single
linestring, or a geometry collection with lines and points in it.
So this can be thought of as a "safer" wrapper around Shapely's
function.
"""
geom_type = geom.type
result_geom = None
if geom_type == 'GeometryCollection':
# collect together everything line-like from the geometry
# collection and filter out anything that's empty
lines = []
for line in geom.geoms:
line = _linemerge(line)
if not line.is_empty:
lines.append(line)
result_geom = linemerge(lines) if lines else None
elif geom_type == 'LineString':
result_geom = geom
elif geom_type == 'MultiLineString':
result_geom = linemerge(geom)
else:
result_geom = None
if result_geom is not None:
# simplify with very small tolerance to remove duplicate points.
# almost duplicate or nearly colinear points can occur due to
# numerical round-off or precision in the intersection algorithm, and
# this should help get rid of those. see also:
# http://lists.gispython.org/pipermail/community/2014-January/003236.html
#
# the tolerance here is hard-coded to a fraction of the
# coordinate magnitude. there isn't a perfect way to figure
# out what this tolerance should be, so this may require some
# tweaking.
epsilon = max(map(abs, result_geom.bounds)) * float_info.epsilon * 1000
result_geom = result_geom.simplify(epsilon, True)
result_geom_type = result_geom.type
# the geometry may still have invalid or repeated points if it has zero
# length segments, so remove anything where the length is less than
# epsilon.
if result_geom_type == 'LineString':
if result_geom.length < epsilon:
result_geom = None
elif result_geom_type == 'MultiLineString':
parts = []
for line in result_geom.geoms:
if line.length >= epsilon:
parts.append(line)
result_geom = MultiLineString(parts)
return result_geom if result_geom else MultiLineString([])
def _orient(geom):
"""
Given a shape, returns the counter-clockwise oriented
version. Does not affect points or lines.
This version is required because Shapely's version is
only defined for single polygons, and we want
something that works generically.
In the example below, note the change in order of the
coordinates in `p2`, which is initially not oriented
CCW.
>>> p1 = Polygon([[0, 0], [1, 0], [0, 1], [0, 0]])
>>> p2 = Polygon([[0, 1], [1, 1], [1, 0], [0, 1]])
>>> orient(p1).wkt
'POLYGON ((0 0, 1 0, 0 1, 0 0))'
>>> orient(p2).wkt
'POLYGON ((0 1, 1 0, 1 1, 0 1))'
>>> _orient(MultiPolygon([p1, p2])).wkt
'MULTIPOLYGON (((0 0, 1 0, 0 1, 0 0)), ((0 1, 1 0, 1 1, 0 1)))'
"""
def oriented_multi(kind, geom):
oriented_geoms = [_orient(g) for g in geom.geoms]
return kind(oriented_geoms)
geom_type = geom.type
if geom_type == 'Polygon':
geom = orient(geom)
elif geom_type == 'MultiPolygon':
geom = oriented_multi(MultiPolygon, geom)
elif geom_type == 'GeometryCollection':
geom = oriented_multi(GeometryCollection, geom)
return geom
def _fix_disputed_left_right_kinds(props):
"""
After merging left/right props, we might find that any kind:XX for disputed
borders are mixed up as kind:left:XX or kind:right:XX and we want to merge
them back together again.
"""
keys = []
for k in props.keys():
if k.startswith('kind:left:') or k.startswith('kind:right:'):
keys.append(k)
for k in keys:
prefix = 'kind:left:' if k.startswith('kind:left:') else 'kind:right:'
new_key = 'kind:' + k[len(prefix):]
value = props.pop(k)
props[new_key] = value
def admin_boundaries(ctx):
"""
Given a layer with admin boundaries and inclusion polygons for
land-based boundaries, attempts to output a set of oriented
boundaries with properties from both the left and right admin
boundary, and also cut with the maritime information to provide
a `maritime_boundary: True` value where there's overlap between
the maritime lines and the admin boundaries.
Note that admin boundaries must alread be correctly oriented.
In other words, it must have a positive area and run counter-
clockwise around the polygon for which it is an outer (or
clockwise if it was an inner).
"""
feature_layers = ctx.feature_layers
zoom = ctx.nominal_zoom
base_layer = ctx.params.get('base_layer')
assert base_layer, 'Parameter base_layer missing.'
start_zoom = ctx.params.get('start_zoom', 0)
layer = None
# don't start processing until the start zoom
if zoom < start_zoom:
return layer
layer = _find_layer(feature_layers, base_layer)
if layer is None:
return None
# layer will have polygonal features for the admin
# polygons and also linear features for the maritime
# boundaries. further, we want to group the admin
# polygons by their kind, as this will reduce the
# working set.
admin_features = defaultdict(list)
maritime_features = list()
new_features = list()
# Sorting here so that we have consistent ordering of left/right side
# on boundaries.
sorted_layer = sorted(layer['features'], key=lambda f: f[1]['id'])
for shape, props, fid in sorted_layer:
dims = _geom_dimensions(shape)
kind = props.get('kind')
maritime_boundary = props.get('maritime_boundary')
# the reason to use this rather than compare the
# string of types is to catch the "multi-" types
# as well.
if dims == _LINE_DIMENSION and kind is not None:
admin_features[kind].append((shape, props, fid))
elif dims == _POLYGON_DIMENSION and maritime_boundary:
maritime_features.append((shape, {'maritime_boundary': False}, 0))
# there are separate polygons for each admin level, and
# we only want to intersect like with like because it
# makes more sense to have Country-Country and
# State-State boundaries (and labels) rather than the
# (combinatoric) set of all different levels.
for kind, features in admin_features.iteritems():
num_features = len(features)
envelopes = [g[0].envelope for g in features]
for i, feature in enumerate(features):
boundary, props, fid = feature
prop_id = props['id']
envelope = envelopes[i]
# intersect with *preceding* features to remove
# those boundary parts. this ensures that there
# are no duplicate parts.
for j in range(0, i):
cut_shape, cut_props, cut_fid = features[j]
# don't intersect with self
if prop_id == cut_props['id']:
continue
cut_envelope = envelopes[j]
if envelope.intersects(cut_envelope):
try:
boundary = boundary.difference(cut_shape)
except shapely.errors.TopologicalError:
# NOTE: we have gotten errors Topological errors here
# that look like:
# TopologicalError: This operation could not be
# performed. Reason: unknown"
pass
if boundary.is_empty:
break
# intersect with every *later* feature. now each
# intersection represents a section of boundary
# that we want to keep.
for j in range(i+1, num_features):
cut_shape, cut_props, cut_fid = features[j]
# don't intersect with self
if prop_id == cut_props['id']:
continue
cut_envelope = envelopes[j]
if envelope.intersects(cut_envelope):
try:
inside, boundary = _intersect_cut(boundary, cut_shape)
except (StandardError, shapely.errors.ShapelyError):
# if the inside and remaining boundary can't be
# calculated, then we can't continue to intersect
# anything else with this shape. this means we might
# end up with erroneous one-sided boundaries.
# TODO: log warning!
break
inside = _linemerge(inside)
if not inside.is_empty:
new_props = _merge_left_right_props(props, cut_props)
new_props['id'] = props['id']
_make_joined_name(new_props)
_fix_disputed_left_right_kinds(new_props)
new_features.append((inside, new_props, fid))
if boundary.is_empty:
break
# anything left over at the end is still a boundary,
# but a one-sided boundary to international waters.
boundary = _linemerge(boundary)
if not boundary.is_empty:
new_props = props.copy()
_make_joined_name(new_props)
new_features.append((boundary, new_props, fid))
# use intracut for maritime, but it intersects in a positive
# way - it sets the tag on anything which intersects, whereas
# we want to | |
<reponame>sethsource/Draco
#-----------------------------*-python-*----------------------------------------#
# file config/application_unit_test.py
# author <NAME> <<EMAIL>>
# date Monday, August 12, 2016, 5:44 pm
# brief Provide a python class that aids in creating unit tests that run
# interactive user codes (i.e.: run a binary that reads an
# input file and diff the resulting output file).
# note Copyright (C) 2016-2019, Triad National Security, LLC.
# All rights reserved.
#------------------------------------------------------------------------------#
import platform
import os
import re
import subprocess
import sys
import time
#------------------------------------------------------------------------------#
## Example from draco/src/diagnostics/test/tDracoInfo.cmake
#------------------------------------------------------------------------------#
# Use config/ApplicationUnitTest.cmake test registration:
#
# include( ApplicationUnitTest )
# add_app_unit_test(
# DRIVER ${CMAKE_CURRENT_SOURCE_DIR}/tDracoInfo.py
# APP $<TARGET_FILE_DIR:Exe_draco_info>/$<TARGET_FILE_NAME:Exe_draco_info>
# LABELS nomemcheck )
# The above will generate a test with data similar to this:
#
# add_test(
# NAME ${ctestname_base}${argname}
# COMMAND ${PYTHON_COMMAND}
# ${aut_DRIVER}
# -D APP=${aut_APP}
# -D ARGVALUE=${argvalue}
# -D WORKDIR=${aut_WORKDIR}
# -D TESTNAME=${ctestname_base}${argname}
# -D DRACO_CONFIG_DIR=${DRACO_CONFIG_DIR}
# -D DRACO_INFO=$<TARGET_FILE_DIR:Exe_draco_info>/$<TARGET_FILE_NAME:Exe_draco_info>
# -D STDINFILE=${aut_STDINFILE}
# -D GOLDFILE=${aut_GOLDFILE}
# -D RUN_CMD=${RUN_CMD}
# -D numPE=${numPE}
# -D PROJECT_BINARY_DIR=${PROJECT_BINARY_DIR}
# -D PROJECT_SOURCE_DIR=${PROJECT_SOURCE_DIR}
# ${BUILDENV}
# )
#------------------------------------------------------------------------------#
# function that returns the path of the input string, if found
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# print unit test footer fail message (this is what CMake looks for to
# indicate failure, it does not look for a non-zero return code!)
def print_final_fail_msg(testname):
print("*****************************************************************")
print("**** {0}: FAILED.".format(testname))
print("*****************************************************************")
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# search string with regular expression and return the first matching component
def simple_search(regex, arg_string):
return_str = ""
if (regex.search(arg_string)):
return_str = regex.findall(arg_string)[0]
else:
return_str = "not_found"
return return_str
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# print contents of file
def print_file(file_name):
f_temp = open(file_name, 'r')
for line in f_temp.readlines():
print(line.strip())
f_temp.close()
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# check to see if the varaible name was set with a value in string (copies CMake
# for "if(<variable>)" logic
def is_set(param_string):
return_bool = False
if ( (param_string != "") and (param_string != "not_found")):
return_bool = True
return return_bool
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# check to see if the variable name was found in the string (copies CMake logic
# for DEFINED in CMake)
def is_defined(param_string):
return_bool = False
if (param_string != "not_found"):
return_bool = True
return return_bool
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Class encapsulating all functions needed for unit testing
class UnitTest:
re_app = re.compile("APP=([^\s]*)")
#re_build_env = re.compile("BUILDENV=([^\s]*)")
re_gold_in_file = re.compile("GOLDFILE=([^\s]*)")
re_std_in_file = re.compile("STDINFILE=([^\s]*)")
#re_test_args = re.compile("TEST_ARGS=([^\s]*)")
#re_pass_regex = re.compile("PASS_REGEX=([^\s]*)")
#re_fail_regex = re.compile("FAIL_REGEX=([^\s]*)")
#re_pe_list = re.compile("PE_LIST=([^\s]*)")
re_project_binary_dir = re.compile("PROJECT_BINARY_DIR=([^\s]*)")
re_project_source_dir = re.compile("PROJECT_SOURCE_DIR=([^\s]*)")
re_test_name = re.compile("TESTNAME=([^\s]*)")
re_numPE = re.compile("numPE=([^\s]*)")
re_mpiexec = re.compile("MPIEXEC_EXECUTABLE=([^\s]*)")
re_mpi_cores_per_cpu = re.compile("MPI_CORES_PER_CPU=([^\s]*)")
re_draco_info = re.compile("DRACO_INFO=([^\s]*)")
re_gdiff = re.compile("GDIFF=([^\s]*)")
re_pgdiff = re.compile("PGDIFF=([^\s]*)")
# run command can include spaces, modfiy regular expression for that
re_run_cmd = re.compile("RUN_CMD=(.*?)-D")
re_arg_value = re.compile("ARGVALUE=([^\s]*)")
re_workdir = re.compile("WORKDIR=([^\s]*)")
re_host_system_processor = re.compile("CMAKE_HOST_SYSTEM_PROCESSOR=([^\s]*)")
# Win32 applications use '.exe' suffix. Empty string for Linux.
exe_ext = ""
def __init__(self):
try:
# get all command line arguments into a parseable string
self.full_arg_string = ""
for arg in sys.argv:
self.full_arg_string = "{0} {1}".format(self.full_arg_string,arg)
# setup and sanity check
self.app = simple_search(self.re_app, self.full_arg_string)
if not is_set(self.app):
self.fatal_error("You must provide a value for APP")
self.app = os.path.abspath(self.app)
if any(platform.win32_ver()):
self.exe_ext = ".exe"
# set paths of input, binary directory and gold
self.input = simple_search(self.re_std_in_file, self.full_arg_string)
if is_set(self.input):
self.input = os.path.abspath(self.input)
if (not os.path.exists(self.input)):
self.fatal_error("File not found, STDINFILE={0}.".format(self.input))
self.bindir = os.path.abspath(self.app)
self.gold = simple_search(self.re_gold_in_file, self.full_arg_string)
if is_set(self.gold):
if (not os.path.exists(self.gold)):
self.fatal_error("File not found, GOLDFILE={0}.".format(self.gold))
# Base the output file name off the name of the gold, if set.
if is_set(self.gold):
self.outfile = os.path.basename(self.gold)
else:
self.outfile = os.path.basename(self.app)
# Default filenames for output and error streams, add process IDs
# to filenames to avoid errors when multiple processors run jobs
self.outfile = "{0}_{1}".format(self.outfile, os.getpid())
self.project_binary_dir = simple_search(self.re_project_binary_dir, self.full_arg_string)
self.project_source_dir = simple_search(self.re_project_source_dir, self.full_arg_string)
self.errfile = "{0}/{1}.err".format(self.project_binary_dir, self.outfile)
self.outfile = "{0}/{1}.out".format(self.project_binary_dir, self.outfile)
if (not os.path.exists(self.app)):
self.fatal_error("Cannot find {0}".format(self.app))
else:
print("Testing {0}".format(self.app))
# Initialize number of passes and fails to zero.
self.numpasses = 0
self.numfails = 0
# get the needed variables from the argument string using regex
self.testname = simple_search(self.re_test_name, self.full_arg_string)
self.numPE = simple_search(self.re_numPE, self.full_arg_string)
self.mpi_cores_per_cpu = simple_search(self.re_mpi_cores_per_cpu, \
self.full_arg_string)
self.mpiexec = simple_search(self.re_mpiexec, self.full_arg_string)
self.draco_info = simple_search(self.re_draco_info, self.full_arg_string)
self.run_cmd = simple_search(self.re_run_cmd, self.full_arg_string)
self.arg_value = simple_search(self.re_arg_value, self.full_arg_string)
self.workdir = simple_search(self.re_workdir, self.full_arg_string)
self.host_system_processor = simple_search(self.re_host_system_processor, \
self.full_arg_string)
self.gdiff = simple_search(self.re_gdiff, self.full_arg_string)
self.pgdiff = simple_search(self.re_pgdiff, self.full_arg_string)
debug = False
if (debug):
print("Running with the following parameters")
print(" APP = {0}".format(self.app))
print(" BINDIR = {0}".format(self.bindir))
print(" ARGVALUE = {0}".format(self.arg_value))
print(" PROJECT_BINARY_DIR = {0}".format(self.project_binary_dir))
print(" OUTFILE = {0}".format(self.outfile))
print(" ERRFILE = {0}".format(self.errfile))
if (self.input):
print(" STDINFILE = {0}".format(self.input))
if (self.gold):
print(" GOLDFILE = {0}".format(self.gold))
# make dictionary of argument values for simple mapping between
# cmake commands and python functions
self.cmake_args = {"APP":self.app, "TESTNAME":self.testname, \
"STDINFILE":self.input, "GOLDFILE":self.gold, \
"PROJECT_BINARY_DIR":self.project_binary_dir, \
"PROJECT_SOURCE_DIR":self.project_source_dir, "TESTNAME":self.testname, \
"numPE":self.numPE, "MPI_CORES_PER_CPU":self.mpi_cores_per_cpu, \
"MPIEXEC_EXECUTABLE":self.mpiexec, "DRACO_INFO":self.draco_info, \
"RUN_CMD":self.run_cmd, "ARGVALUE":self.arg_value, \
"WORKDIR":self.workdir, \
"CMAKE_HOST_SYSTEM_PROCESSOR":self.host_system_processor}
# set endinaness for this test
self.little_endian = True
if (self.host_system_processor =="powerpc64") or \
(self.host_system_processor=="ppc64"):
self.litle_endian = False
if is_set(self.mpi_cores_per_cpu):
if self.mpi_cores_per_cpu == 0:
self.fatal_error("Must set a nonzero number for MPI_CORES_PER_CPU")
else:
self.mpi_cores_per_cpu = int(self.mpi_cores_per_cpu)
# Look for numdiff in $PATH
self.numdiff_exe = which("numdiff"+self.exe_ext)
if (not self.numdiff_exe):
self.fatal_error("Numdiff not found in PATH")
if (debug):
print(" exenumdiff = {0}".format(self.numdiff_exe))
except Exception:
print("Caught exception: {0} {1}".format( sys.exc_info()[0], \
sys.exc_info()[1]))
self.fatal_error("Ending test execution after catching exception")
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# Run the application and capture the output.
def aut_runTests(self, continue_on_error=False ):
try:
print("\n=======================================================")
print("=== {0}".format(self.testname))
print("=======================================================")
# run draco --version with correct run command
draco_info_numPE = ""
if is_set(self.numPE):
# Use 1 proc to run draco_info
draco_info_numPE = "1"
if (os.path.exists(self.draco_info)):
# make a list of clean argument (no spaces, no empty strings)
clean_draco_run_args = []
for arg in self.run_cmd.split():
clean_draco_run_args.append(arg)
if draco_info_numPE.strip():
clean_draco_run_args.append(draco_info_numPE.strip())
if self.draco_info.strip():
clean_draco_run_args.append(self.draco_info.strip())
clean_draco_run_args.append("--version")
print("About to run \'{0}\'".format(' '.join(clean_draco_run_args)))
draco_process = \
subprocess.Popen(clean_draco_run_args, stdout=subprocess.PIPE, \
stderr=subprocess.STDOUT )
draco_out, draco_err = draco_process.communicate()
if (draco_process.returncode != 0):
print("Unable to run \'{0}\'".format(' '.join(clean_draco_run_args)))
print(draco_err)
else:
print(draco_out)
# add numPE to the output file
safe_arg_value = ""
if is_set(self.numPE):
self.outfile = self.outfile.replace(".out", "-{0}.out".format(self.numPE))
self.errfile = self.errfile.replace(".err", "-{0}.err".format(self.numPE))
# print run command
if is_defined(self.run_cmd):
print(">>> Running: {0} {1}".format(self.run_cmd, self.numPE))
print(">>> {0}".format(self.app))
if (self.arg_value):
print(">>> {0}".format(self.arg_value))
else:
print(">>> Running: {0} {1}".format(self.app, self.arg_value))
# Run the application capturing all output.
stdin_file = is_set(self.input)
if stdin_file:
f_in = open(self.input, 'r')
# make a list of clean argument (no spaces, no empty strings)
clean_run_args = []
for arg in self.run_cmd.split():
clean_run_args.append(arg)
if self.numPE.strip():
clean_run_args.append(self.numPE.strip())
if self.app.strip():
clean_run_args.append(self.app.strip())
for arg in (self.arg_value.split()):
if arg.strip():
clean_run_args.append(arg.strip())
# if test requires standard input, use the subprocess call to set the file
if (stdin_file):
print("About to run \'{0}\'".format(' '.join(clean_run_args)))
test_process = subprocess.Popen(clean_run_args, stdout=subprocess.PIPE, \
stderr=subprocess.PIPE, stdin=f_in, universal_newlines=True)
else:
test_process = subprocess.Popen(clean_run_args, stdout=subprocess.PIPE, \
stderr=subprocess.PIPE, universal_newlines=True)
test_out, test_err = test_process.communicate()
if (stdin_file): f_in.close();
# Test the return code. Normally, if the return code is non-zero print an
# error message and return control to ctest (don't run the remaining
# checks). If continue_on_error=True, print a message and continue running
# checks.
if (test_process.returncode):
# we have a non-zero return code.
if(continue_on_error):
print("Non-zero return code detected, but continue_on_error=True.")
else:
if test_err:
print("Test FAILED: stderr is:")
print(test_err)
else:
print("Test FAILED: stderr is empty")
print("Test FAILED: stdout is:")
print(test_out)
self.fatal_error("See {0} for full details.".format(self.outfile))
else:
# The return code was zero. Record this success and continue running the
# checks.
print(test_out)
self.passmsg("Application ran to completion")
# make output files
f_out = open(self.outfile,'w')
f_out.write(test_out)
f_out.close()
f_err = open(self.errfile, 'w')
if (test_err):
f_err.write(test_err)
f_err.close()
except Exception:
print("Caught exception: {0} {1}".format( sys.exc_info()[0], \
sys.exc_info()[1]))
self.fatal_error("Ending test execution after catching exception")
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# check to see if the error file contains a given string
def error_contains(self, search_string):
# search file for string
return_bool = False
with open(self.errfile) as f:
for line in f:
if (search_string in line):
return_bool = True
return return_bool
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# check to see if the output file contains a given string
def output_contains(self, search_string):
# search file for string
return_bool = False
with open(self.outfile) as f:
for line in f:
if (search_string in line):
return_bool = True
return return_bool
#------------------------------------------------------------------------------#
#------------------------------------------------------------------------------#
# get a value with REGEX, see if it matches reference values
def output_contains_value(self, search_regex, reference_value):
# search file for string
return_bool = | |
<gh_stars>1-10
"""
Modules to Set default parameters:
<NAME> FMP Berlin
"""
import de.bruker.nmr.mfw.root as root
import sys
sys.path.append(root.UtilPath.getTopspinHome()+ '/exp/stan/nmr/py/BioPY/modules/')
import math
import TopCmds
import IntShape
import CPDtools
import TS_Version as Ver
#Parameter handling Bru vs FMP vs some other definition
if Ver.get()[0].find("3.2") >=0:
import bruTS3p2 as pul
def dBtoW(dB):
watts=math.pow(10,-dB/10.)
#TopCmds.MSG("watts " + str(watts))
return watts
def WtodB(watts):
dB=-10.*math.log10(watts)
#TopCmds.MSG("dB " + str(dB))
return dB
def PutPars(p90H,ampH,p90C,ampC,p90N,ampN,MAS,units):
"""
TopCmds.PUTPAR("P 3",str(p90H))
TopCmds.PUTPAR("PL"+units+" 1",str(ampC))
TopCmds.PUTPAR("P 21",str(p90N))
TopCmds.PUTPAR("PL"+units+" 3",str(ampN))
TopCmds.PUTPAR("CNST 31",str(MAS))
TopCmds.PUTPAR("MASR",str(MAS))
TopCmds.PUTPAR("P 4",str(2*p90H))
TopCmds.PUTPAR("P 2",str(2*p90C))
TopCmds.PUTPAR("P 22",str(2*p90N))
TopCmds.PUTPAR("PL"+units+" 2",str(ampH))
"""
pul.setPar('pH90',p90H,"")
pul.setPar('pC90',p90C,"")
pul.setPar('pN90',p90N,"")
pul.setPar('pH180',2*p90H,"")
pul.setPar('pC180',2*p90C,"")
pul.setPar('pN180',2*p90N,"")
pul.setPar('aH90',ampH,units)
pul.setPar('aC90',ampC,units)
pul.setPar('aN90',ampN,units)
pul.setPar('MAS',MAS,"")
def LoadFromData(units):
p90H=float(TopCmds.GETPAR("P 3"))
ampH=float(TopCmds.GETPAR("PL"+units+" 2"))
p90C=float(TopCmds.GETPAR(pul.pC90))
ampC=float(TopCmds.GETPAR("PL"+units+" 1"))
p90N=float(TopCmds.GETPAR("P 21"))
ampN=float(TopCmds.GETPAR("PL"+units+" 3"))
MAS =float(TopCmds.GETPAR("CNST 31"))
index = TopCmds.INPUT_DIALOG("Mr Setup Input", "Pulse Widths and Power", \
["1H 90 pw","1H ampl","13C 90 pw","13C ampl","15N 90 pw","15N ampl","MAS"],\
[str('%.2f' %p90H),str('%.2f' %ampH),str('%.2f' %p90C),str('%.2f' %ampC),str('%.2f' %p90N),str('%.2f' %ampN),str('%.2f' %MAS)],\
["us",units,"us",units,"us",units," Hz"],\
["1","1","1","1","1","1","1"],\
["Accept","Close"], ['a','c'], 10)
p90H=float(index[0])
ampH=float(index[1])
p90C=float(index[2])
ampC=float(index[3])
p90N=float(index[4])
ampN=float(index[5])
MAS =float(index[6])
TauR= 1000000./MAS
PutPars(p90H,ampH,p90C,ampC,p90N,ampN,MAS,units)
def LoadDefault(units):
#
# Should have an external file to read from, but for now, this will do.
#
#ReadPars(p90H,ampH,p90C,ampC,p90N,ampN,MAS)
index = TopCmds.INPUT_DIALOG("Mr Setup Input", "Pulse Widths and Power", \
["1H 90 pw","1H ampl","13C 90 pw","13C ampl","15N 90 pw","15N ampl","MAS"],\
["2.5","100","3.0","200.0","5.0","500.0","10000"],\
["us",units,"us",units,"us",units," Hz"],\
["1","1","1","1","1","1","1"],\
["Accept","Close"], ['a','c'], 10)
p90H=float(index[0])
ampH=float(index[1])
p90C=float(index[2])
ampC=float(index[3])
p90N=float(index[4])
ampN=float(index[5])
MAS =float(index[6])
TauR= 1000000./MAS
PutPars(p90H,ampH,p90C,ampC,p90N,ampN,MAS,units)
def HC(units):
p90H=float(TopCmds.GETPAR("P 3"))
ampH=float(TopCmds.GETPAR("PLdB 2"))
p90C=float(TopCmds.GETPAR("P 1"))
ampC=float(TopCmds.GETPAR("PLdB 1"))
MAS =float(TopCmds.GETPAR("CNST 31"))
CNCT=float(TopCmds.GETPAR("P 15"))
SPH=TopCmds.GETPAR2("SPNAM 40")
SPX=TopCmds.GETPAR2("SPNAM 41")
if CNCT <= 1.: CNCT = 1000.
if SPH == "gauss" or SPH == "None" or SPH == "":
#TopCmds.MSG("Please set spnam40")
TopCmds.PUTPAR("SPNAM 40","ramp.100")
TopCmds.XCMD("spnam40")
SPH=(TopCmds.GETPAR2("SPNAM 40"))
SPH.join()
if SPX == "gauss" or SPX == "None" or SPX == "":
#TopCmds.MSG("Please set spnam10")
TopCmds.PUTPAR("SPNAM 41","square.100")
TopCmds.XCMD("spnam41")
SPX=(TopCmds.GETPAR2("SPNAM 41"))
SPX.join()
MaxB1H = 1000000./4./p90H
MaxB1C = 1000000./4./p90C
#find the channel with the lowest B1
if MaxB1C < MaxB1H :
Ratio=int(math.floor(MaxB1C/MAS))
#TopCmds.MSG(str(Ratio))
CCond=(Ratio-0.5)*MAS
HCond=(Ratio+0.5)*MAS
if HCond > MaxB1H:
Ratio=Ratio-1
CCond=(Ratio-0.5)*MAS
HCond=(Ratio+0.5)*MAS
# If spinning very fast or using weak B1s
if Ratio <= 1:
CCond= .25*MAS
HCond= .75*MAS
if MaxB1C >= MaxB1H :
Ratio=int(math.floor(MaxB1H/MAS))
HCond=(Ratio-0.5)*MAS
CCond=(Ratio+0.5)*MAS
if CCond > MaxB1C:
Ratio=Ratio-1
HCond=(Ratio-0.5)*MAS
CCond=(Ratio+0.5)*MAS
# If spinning very fast or using weak B1s
if Ratio <= 1:
CCond= .75*MAS
HCond= .25*MAS
adjust=20*(math.log10(CCond/MaxB1C))
Camp = ampC-adjust
adjust=20*(math.log10(HCond/MaxB1H))
Hamp = ampH-adjust
index = TopCmds.INPUT_DIALOG("HC CP Input", "Proton Carbon Cross Polarization", \
["Proton B1 Field","H Ramp","Carbon B1 Field","C Ramp","Contact Time(P15)"],\
[str(HCond),SPH,str(CCond),SPX,str(CNCT)],\
["kHz","","kHz","","us"],\
["1","1","1","1","1"],\
["Accept","Close"], ['a','c'], 10)
adjust=20*(math.log10(float(index[0])/MaxB1H))
Hamp1 = ampH-adjust
AvgAmp=IntShape.Integrate(index[1])/100.
adjust=20*(math.log10(1./AvgAmp))
Hamp = Hamp1-adjust
adjust=20*(math.log10(float(index[2])/MaxB1C))
Camp1 = ampC-adjust
#Ramp integration adjustment
AvgAmp=IntShape.Integrate(index[3])/100.
adjust=20*(math.log10(1./AvgAmp))
Camp = Camp1-adjust
CNCT = float(index[4])
if units == "W":
Hamp=dBtoW(Hamp)
Camp=dBtoW(Camp)
value = TopCmds.SELECT("Adjusting the HC CP parameters:",\
"This will set\n 1H power to: " + str('%3.2f' %Hamp)+" "+ units+"\n \
13C power to: " +str('%3.2f' %Camp) + units,["Update", "Keep Previous"])
if value != 1:
TopCmds.PUTPAR("SP"+units+" 40",str('%3.2f' %Hamp))
TopCmds.PUTPAR("SP"+units+" 41",str('%3.2f' %Camp))
TopCmds.PUTPAR("PL"+units+" 40",str('%3.2f' %Hamp))
TopCmds.PUTPAR("PL"+units+" 41",str('%3.2f' %Camp))
TopCmds.PUTPAR("P 15" ,str('%3.2f' %CNCT))
TopCmds.PUTPAR("SPNAM 40",index[1])
TopCmds.PUTPAR("SPNAM 41",index[3])
def HN(units):
p90H=float(TopCmds.GETPAR("P 3"))
ampH=float(TopCmds.GETPAR("PLdB 2"))
p90N=float(TopCmds.GETPAR("P 21"))
ampN=float(TopCmds.GETPAR("PLdB 3"))
MAS =float(TopCmds.GETPAR("CNST 31"))
CNCT=float(TopCmds.GETPAR("P 25"))
SPH=TopCmds.GETPAR2("SPNAM 42")
SPX=TopCmds.GETPAR2("SPNAM 43")
if CNCT <= 1.: CNCT = 1000.
if SPH == "gauss" or SPH == "None" or SPH == "":
#TopCmds.MSG("Please set spnam1")
TopCmds.PUTPAR("SPNAM 42","ramp.100")
TopCmds.XCMD("spnam42")
SPH=(TopCmds.GETPAR2("SPNAM 42"))
SPH.join()
if SPX == "gauss" or SPX == "None" or SPX == "":
#TopCmds.MSG("Please set spnam11")
TopCmds.PUTPAR("SPNAM 43","square.100")
TopCmds.XCMD("spnam43")
SPX=(TopCmds.GETPAR2("SPNAM 43"))
SPX.join()
MaxB1H = 1000000./4./p90H
MaxB1N = 1000000./4./p90N
#find the channel with the lowest B1
if MaxB1N < MaxB1H :
Ratio=int(math.floor(MaxB1N/MAS))
NCond=(Ratio-0.5)*MAS
HCond=(Ratio+0.5)*MAS
if HCond > MaxB1H:
Ratio=Ratio-1
NCond=(Ratio-0.5)*MAS
HCond=(Ratio+0.5)*MAS
# If spinning very fast or using weak B1s
if Ratio <= 1:
NCond= .25*MAS
HCond= .75*MAS
if MaxB1N >= MaxB1H :
Ratio=int(math.floor(MaxB1H/MAS))
HCond=(Ratio-0.5)*MAS
NCond=(Ratio+0.5)*MAS
if NCond > MaxB1N:
Ratio=Ratio-1
HCond=(Ratio-0.5)*MAS
NCond=(Ratio+0.5)*MAS
# If spinning very fast or using weak B1s
if Ratio <= 1:
NCond= .25*MAS
HCond= .75*MAS
adjust=20*(math.log10(NCond/MaxB1N))
Namp = ampN-adjust
adjust=20*(math.log10(HCond/MaxB1H))
Hamp = ampH-adjust
index = TopCmds.INPUT_DIALOG("HN CP Input", "Proton Nitrogen Cross Polarization", \
["Proton B1 Field","H Ramp","Nitrogen B1 Field","N Ramp","Contact Time(P25)"],\
[str(HCond),SPH,str(NCond),SPX,str(CNCT)],\
["kHz","","kHz","","us"],\
["1","1","1","1","1"],\
["Accept","Close"], ['a','c'], 10)
adjust=20*(math.log10(float(index[0])/MaxB1H))
Hamp1 = ampH-adjust
AvgAmp=IntShape.Integrate(index[1])/100.
adjust=20*(math.log10(1./AvgAmp))
Hamp = Hamp1-adjust
adjust=20*(math.log10(float(index[2])/MaxB1N))
Namp = ampN-adjust
#Ramp integration adjustment
AvgAmp=IntShape.Integrate(index[3])/100.
adjust=20*(math.log10(1./AvgAmp))
Namp = Namp-adjust
CNCT = float(index[4])
#TopCmds.MSG("Adjusting the HC CP parameters:\n Your Proton Amplitude is set to " + str('%3.2f' %Hamp)+ "dB\n Your Nitrogen Ammplitude is set to " +str('%3.2f' %Namp))
if units == "W":
Hamp=dBtoW(Hamp)
Namp=dBtoW(Namp)
value = TopCmds.SELECT("Adjusting the HN CP parameters:",\
"This will set\n 1H power to: " + str('%3.2f' %Hamp)+" "+ units+"\n \
15N power to: " +str('%3.2f' %Namp) + units,["Update", "Keep Previous"])
if value != 1:
TopCmds.PUTPAR("SP"+units+" 42",str('%3.2f' %Hamp))
TopCmds.PUTPAR("SP"+units+" 43",str('%3.2f' %Namp))
TopCmds.PUTPAR("PL"+units+" 42",str('%3.2f' %Hamp))
TopCmds.PUTPAR("PL"+units+" 43",str('%3.2f' %Namp))
TopCmds.PUTPAR("P 25" ,str('%.2f' %CNCT))
TopCmds.PUTPAR("SPNAM 42",index[1])
TopCmds.PUTPAR("SPNAM 43",index[3])
def NCA(units):
p90C=float(TopCmds.GETPAR("P 1"))
ampC=float(TopCmds.GETPAR("PLdB 1"))
p90N=float(TopCmds.GETPAR("P 21"))
ampN=float(TopCmds.GETPAR("PLdB 3"))
MAS =float(TopCmds.GETPAR("CNST 31"))
SP=TopCmds.GETPAR2("SPNAM 50")
CNCT=float(TopCmds.GETPAR("P 16"))
if CNCT <= 1.: CNCT = 3500.
if SP == "gauss" or SP == "None" or SP == "" :
TopCmds.PUTPAR("SPNAM 42","tancn")
#TopCmds.MSG("Please set spnam2")
TopCmds.XCMD("spnam50")
SP=(TopCmds.GETPAR2("SPNAM 50"))
SP.join()
MaxB1N = 1000000./4./p90N
MaxB1C = 1000000./4./p90C
#find the channel with the lowest B1
NCond=(5./2.)*MAS
CCond=(3./2.)*MAS
while NCond > MaxB1N :
NCond=NCond - MAS
CCond=NCond + MAS
while CCond > MaxB1C :
NCond=NCond - MAS
CCond=NCond + MAS
if NCond < MAS :
NCond= 0.25*MAS
CCond= 0.75*MAS
index = TopCmds.INPUT_DIALOG("NCA CP Input", "N-CA SPECIFIC-CP 3/2*C; 5/2*N", \
["Carbon B1 field","Nitrogen B1 field","Contact Time(P16)","Ramp Name"],\
[str(CCond),str(NCond),str(CNCT),SP],\
["kHz","kHz","us",""],\
["1","1","1","1"],\
["Accept","Close"], ['a','c'], 10)
adjust=20*(math.log10(float(index[0])/MaxB1C))
Camp = ampC-adjust
adjust=20*(math.log10(float(index[1])/MaxB1N))
Namp = ampN-adjust
#Insert ramp calibration here
AvgAmp=IntShape.Integrate(SP)/100.
adjust=20*(math.log10(1./AvgAmp))
Camp = Camp-adjust
CNCT = float(index[2])
if units == "W":
Namp=dBtoW(Namp)
Camp=dBtoW(Camp)
value = TopCmds.SELECT("Adjusting the NC CP parameters:",\
"This will set\n 13C power to: " + str('%3.2f' %Camp)+" "+ units+"\n \
15N power to: " +str('%3.2f' %Namp) + units,["Update", "Keep Previous"])
if value != 1:
TopCmds.PUTPAR("PL"+units+" 5",str('%3.2f' %Namp))
TopCmds.PUTPAR("SP"+units+" 50",str('%3.2f' %Camp))
TopCmds.PUTPAR("P 16",str('%3.2f' %CNCT))
TopCmds.PUTPAR("SPNAM 50",SP)
def NCO(units):
p90C=float(TopCmds.GETPAR("P 1"))
ampC=float(TopCmds.GETPAR("PLdB 1"))
p90N=float(TopCmds.GETPAR("P 21"))
ampN=float(TopCmds.GETPAR("PLdB 3"))
MAS =float(TopCmds.GETPAR("CNST 31"))
SP=TopCmds.GETPAR2("SPNAM 51")
CNCT=float(TopCmds.GETPAR("P 17"))
if CNCT <= 1.: CNCT = 3500.
if SP == "gauss" or SP == "None" or SP == "":
#TopCmds.MSG("Please set spnam2")
TopCmds.XCMD("spnam51")
SP=(TopCmds.GETPAR2("SPNAM 51"))
SP.join()
MaxB1N = 1000000./4./p90N
MaxB1C = 1000000./4./p90C
#find the channel with the lowest B1
NCond=(5./2.)*MAS
CCond=(7./2.)*MAS
while NCond > MaxB1N :
NCond=NCond - MAS
CCond=NCond + MAS
while CCond > MaxB1C :
NCond=NCond - MAS
CCond=NCond + MAS
if NCond < MAS :
NCond= 0.25*MAS
CCond= 0.75*MAS
index = TopCmds.INPUT_DIALOG("NCO CP Input", "N-CO SPECIFIC 7/2*C; 5/2*N", \
["Carbon B1 field","Nitrogen B1 field","Contact Time(P15)","Ramp Name"],\
[str(CCond),str(NCond),str(CNCT),SP],\
["kHz","kHz","us",""],\
["1","1","1","1"],\
["Accept","Close"], ['a','c'], 10)
adjust=20*(math.log10(float(index[0])/MaxB1C))
Camp = ampC-adjust
adjust=20*(math.log10(float(index[1])/MaxB1N))
Namp = ampN-adjust
#Insert ramp calibration here
AvgAmp=IntShape.Integrate(SP)/100.
adjust=20*(math.log10(1./AvgAmp))
Camp = Camp-adjust
CNCT = float(index[2])
if units == "W":
Namp=dBtoW(Namp)
Camp=dBtoW(Camp)
value = TopCmds.SELECT("Adjusting the NC CP parameters:",\
"This will set\n 13C power to: " + str('%3.2f' %Camp)+" "+ units+"\n \
15N power to: " +str('%3.2f' %Namp) + units,["Update", "Keep Previous"])
if value != 1:
TopCmds.PUTPAR("PL"+units+" 6",str('%3.2f' %Namp))
TopCmds.PUTPAR("PL"+units+" 51",str('%3.2f' %Camp))
TopCmds.PUTPAR("SP"+units+" 51",str('%3.2f' %Camp))
TopCmds.PUTPAR("P 17",str('%3.2f' %CNCT))
TopCmds.PUTPAR("SPNAM 51",SP)
def Find_PWdec(Text):
while i < len(Text):
about=''
if Text[i].find("pcpd") >= 0: type="PCPD"
elif Text[i].find("p31") >= 0: type="P 31"
elif Text[i].find("p63") >= 0: type="P 63"
elif Text[i].find("p62") >= 0: type="P 62"
else :
TopCmds.MSG("File for Decoupling not found; Exiting")
TopCmds.EXIT()
def HDec(units):
Stuff = []
p90H=float(TopCmds.GETPAR("P 3"))
ampH=float(TopCmds.GETPAR("PLdB 2"))
MaxB1H = 1000000./4./p90H
CPD=TopCmds.GETPAR2("CPDPRG 2")
if CPD == "mlev" or CPD == "None" :
TopCmds.XCMD("cpdprg2")
CPD=(TopCmds.GETPAR2("CPDPRG 2"))
Stuff=CPDtools.CPDparse(CPD,"1H")
#TopCmds.MSG(str(Stuff))
amp0=CPDtools.Find_old_pl(Stuff[0])
decpw0=CPDtools.Find_old_pw(Stuff[1],"1H")
B1_0=MaxB1H*(math.pow(10,(ampH-amp0)/20.))/1000.
if B1_0 > 1.: B1out='% .1f' % B1_0
if B1_0 <= 1.: B1out='75.0'
index = TopCmds.INPUT_DIALOG("Mr Setup Input", "Decoupling Window", \
["Desired 1H Decoupling Amplitude","File"],\
[B1out,CPD],["kHz",""],["1","1",],\
["Accept","Close"], ['a','c'], 10)
TopCmds.PUTPAR("CPDPRG 2",index[1])
#print(index[0], MaxB1H)
adjust=20*(math.log10(1000.*float(index[0])/MaxB1H))
Hamp = ampH-adjust
decpwH= (MaxB1H/1000./float(index[0]))*(170./180.)*2*p90H
if units =="W":
Hamp=dBtoW(Hamp)
value = TopCmds.SELECT("Adjusting the H decoupling parameters:",\
"This will set\n 1H power ("+ | |
ae.associate('localhost', 11112)
assert not assoc.is_established
assert assoc.is_rejected
scp.shutdown()
def test_unknown_abort_source(self):
"""Test an unknown abort source handled correctly #561"""
def handle_req(event):
pdu = b"\x07\x00\x00\x00\x00\x04\x00\x00\x01\x00"
event.assoc.dul.socket.send(pdu)
# Give the requestor time to process the message before killing
# the connection
time.sleep(0.1)
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
hh = [(evt.EVT_REQUESTED, handle_req)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=hh)
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('localhost', 11112)
assert not assoc.is_established
assert assoc.is_aborted
scp.shutdown()
class TestCStoreSCP(object):
"""Tests for Association._c_store_scp()."""
# Used with C-GET (always) and C-MOVE (over the same association)
def setup(self):
self.ae = None
def teardown(self):
if self.ae:
self.ae.shutdown()
def test_no_context(self):
"""Test correct response if no valid presentation context."""
def handle(event):
return 0x0000
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(CTImageStorage)
ae.add_supported_context(RTImageStorage)
# Storage SCP
scp = ae.start_server(
('', 11112), block=False, evt_handlers=[(evt.EVT_C_STORE, handle)]
)
ae.add_requested_context(RTImageStorage)
role = build_role(CTImageStorage, scu_role=False, scp_role=True)
assoc = ae.associate('localhost', 11112, ext_neg=[role])
assert assoc.is_established
req = C_STORE()
req.MessageID = 1
req.AffectedSOPClassUID = DATASET.SOPClassUID
req.AffectedSOPInstanceUID = DATASET.SOPInstanceUID
req.Priority = 1
req._context_id = 1
bytestream = encode(DATASET, True, True)
req.DataSet = BytesIO(bytestream)
assoc.dimse = DummyDIMSE()
assoc._c_store_scp(req)
assert assoc.dimse.status == 0x0122
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_handler_exception(self):
"""Test correct response if exception raised by handler."""
def handle(event):
raise ValueError()
return 0x0000
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(CTImageStorage, scp_role=True, scu_role=True)
# Storage SCP
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(CTImageStorage)
role = build_role(CTImageStorage, scu_role=False, scp_role=True)
assoc = ae.associate(
'localhost', 11112, ext_neg=[role],
evt_handlers=[(evt.EVT_C_STORE, handle)]
)
assert assoc.is_established
req = C_STORE()
req.MessageID = 1
req.AffectedSOPClassUID = DATASET.SOPClassUID
req.AffectedSOPInstanceUID = DATASET.SOPInstanceUID
req.Priority = 1
req._context_id = 1
bytestream = encode(DATASET, True, True)
req.DataSet = BytesIO(bytestream)
assoc.dimse = DummyDIMSE()
assoc._c_store_scp(req)
assert assoc.dimse.status == 0xC211
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_handler_status_ds_no_status(self):
"""Test handler with status dataset with no Status element."""
def handle(event):
return Dataset()
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(CTImageStorage, scp_role=True, scu_role=True)
# Storage SCP
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(CTImageStorage)
role = build_role(CTImageStorage, scu_role=False, scp_role=True)
assoc = ae.associate(
'localhost', 11112, ext_neg=[role],
evt_handlers=[(evt.EVT_C_STORE, handle)]
)
assert assoc.is_established
req = C_STORE()
req.MessageID = 1
req.AffectedSOPClassUID = DATASET.SOPClassUID
req.AffectedSOPInstanceUID = DATASET.SOPInstanceUID
req.Priority = 1
req._context_id = 1
bytestream = encode(DATASET, True, True)
req.DataSet = BytesIO(bytestream)
assoc.dimse = DummyDIMSE()
assoc._c_store_scp(req)
assert assoc.dimse.status == 0xC001
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_handler_status_ds_unknown_elems(self):
"""Test handler with status dataset with an unknown element."""
def handle(event):
ds = Dataset()
ds.Status = 0x0000
ds.PatientName = 'ABCD'
return ds
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(CTImageStorage, scp_role=True, scu_role=True)
# Storage SCP
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(CTImageStorage)
role = build_role(CTImageStorage, scu_role=False, scp_role=True)
assoc = ae.associate(
'localhost', 11112, ext_neg=[role],
evt_handlers=[(evt.EVT_C_STORE, handle)]
)
assert assoc.is_established
req = C_STORE()
req.MessageID = 1
req.AffectedSOPClassUID = DATASET.SOPClassUID
req.AffectedSOPInstanceUID = DATASET.SOPInstanceUID
req.Priority = 1
req._context_id = 1
bytestream = encode(DATASET, True, True)
req.DataSet = BytesIO(bytestream)
assoc.dimse = DummyDIMSE()
assoc._c_store_scp(req)
rsp = assoc.dimse.rsp
assert rsp.Status == 0x0000
assert not hasattr(rsp, 'PatientName')
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_handler_invalid_status(self):
"""Test handler with invalid status."""
def handle(event):
return 'abcd'
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(CTImageStorage, scp_role=True, scu_role=True)
# Storage SCP
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(CTImageStorage)
role = build_role(CTImageStorage, scu_role=False, scp_role=True)
assoc = ae.associate(
'localhost', 11112, ext_neg=[role],
evt_handlers=[(evt.EVT_C_STORE, handle)]
)
assert assoc.is_established
req = C_STORE()
req.MessageID = 1
req.AffectedSOPClassUID = DATASET.SOPClassUID
req.AffectedSOPInstanceUID = DATASET.SOPInstanceUID
req.Priority = 1
req._context_id = 1
bytestream = encode(DATASET, True, True)
req.DataSet = BytesIO(bytestream)
assoc.dimse = DummyDIMSE()
assoc._c_store_scp(req)
assert assoc.dimse.status == 0xC002
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_handler_unknown_status(self):
"""Test handler with invalid status."""
def handle(event):
return 0xDEFA
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(CTImageStorage, scp_role=True, scu_role=True)
# Storage SCP
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(CTImageStorage)
role = build_role(CTImageStorage, scu_role=False, scp_role=True)
assoc = ae.associate(
'localhost', 11112, ext_neg=[role],
evt_handlers=[(evt.EVT_C_STORE, handle)]
)
assert assoc.is_established
req = C_STORE()
req.MessageID = 1
req.AffectedSOPClassUID = DATASET.SOPClassUID
req.AffectedSOPInstanceUID = DATASET.SOPInstanceUID
req.Priority = 1
req._context_id = 1
bytestream = encode(DATASET, True, True)
req.DataSet = BytesIO(bytestream)
assoc.dimse = DummyDIMSE()
assoc._c_store_scp(req)
assert assoc.dimse.status == 0xDEFA
assoc.release()
assert assoc.is_released
scp.shutdown()
class TestAssociationSendCEcho(object):
"""Run tests on Assocation evt.EVT_C_ECHO handler."""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_must_be_associated(self):
"""Test can't send without association."""
# Test raise if assoc not established
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
assert assoc.is_released
assert not assoc.is_established
with pytest.raises(RuntimeError):
assoc.send_c_echo()
scp.shutdown()
def test_no_abstract_syntax_match(self):
"""Test SCU when no accepted abstract syntax"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(CTImageStorage)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(CTImageStorage)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
with pytest.raises(ValueError):
assoc.send_c_echo()
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_rsp_none(self):
"""Test no response from peer"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('localhost', 11112)
class DummyDIMSE():
msg_queue = queue.Queue()
def send_msg(*args, **kwargs): return
def get_msg(*args, **kwargs): return None, None
assoc._reactor_checkpoint.clear()
while not assoc._is_paused:
time.sleep(0.01)
assoc.dimse = DummyDIMSE()
if assoc.is_established:
assoc.send_c_echo()
assert assoc.is_aborted
scp.shutdown()
def test_rsp_invalid(self):
"""Test invalid response received from peer"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('localhost', 11112)
class DummyResponse():
is_valid_response = False
class DummyDIMSE():
msg_queue = queue.Queue()
def send_msg(*args, **kwargs): return
def get_msg(*args, **kwargs): return None, DummyResponse()
assoc._reactor_checkpoint.clear()
while not assoc._is_paused:
time.sleep(0.01)
assoc.dimse = DummyDIMSE()
if assoc.is_established:
assoc.send_c_echo()
assert assoc.is_aborted
scp.shutdown()
def test_rsp_success(self):
"""Test receiving a success response from the peer"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_echo()
assert result.Status == 0x0000
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_rsp_failure(self):
"""Test receiving a failure response from the peer"""
def handler(event):
return 0x0210
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
handlers = [(evt.EVT_C_ECHO, handler)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_echo()
assert result.Status == 0x0210
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_rsp_unknown_status(self):
"""Test unknown status value returned by peer"""
def handler(event):
return 0xFFF0
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
handlers = [(evt.EVT_C_ECHO, handler)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_echo()
assert result.Status == 0xFFF0
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_rsp_multi_status(self):
"""Test receiving a status with extra elements"""
def handler(event):
ds = Dataset()
ds.Status = 0x0122
ds.ErrorComment = 'Some comment'
return ds
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
handlers = [(evt.EVT_C_ECHO, handler)]
scp = ae.start_server(('', 11112), block=False, evt_handlers=handlers)
ae.add_requested_context(VerificationSOPClass)
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_echo()
assert result.Status == 0x0122
assert result.ErrorComment == 'Some comment'
assoc.release()
assert assoc.is_released
scp.shutdown()
def test_abort_during(self):
"""Test aborting the association during message exchange"""
def handle(event):
event.assoc.abort()
return 0x0000
self.ae = ae = AE()
ae.acse_timeout = 1
ae.dimse_timeout = 1
ae.network_timeout = 1
ae.add_supported_context(VerificationSOPClass)
scp = ae.start_server(
('', 11112), block=False, evt_handlers=[(evt.EVT_C_ECHO, handle)]
)
ae.add_requested_context(VerificationSOPClass)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
result = assoc.send_c_echo()
assert result == Dataset()
time.sleep(0.1)
assert assoc.is_aborted
scp.shutdown()
def test_run_accept_scp_not_implemented(self):
"""Test association is aborted if non-implemented SCP requested."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(VerificationSOPClass)
ae.add_supported_context('192.168.127.12')
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(VerificationSOPClass)
ae.add_requested_context('192.168.127.12')
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
status = assoc.send_n_delete('1.2.3.4', '1.2.3')
assert status == Dataset()
time.sleep(0.1)
assert assoc.is_aborted
scp.shutdown()
def test_rejected_contexts(self):
"""Test receiving a success response from the peer"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout | |
'''
Functions for use
'''
import torch
from torch.autograd import Variable
import numpy as np
import itertools
import os
import random
import pickle
import torch as t
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import ShuffleSplit
from sklearn.feature_extraction.text import TfidfTransformer
# from tsne import tsne
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
torch.cuda.set_device(0)
FloatTensor = torch.cuda.FloatTensor if USE_CUDA else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if USE_CUDA else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if USE_CUDA else torch.ByteTensor
Device = 'cuda:1' if USE_CUDA else 'cpu'
"""process data input to bag-of-words representation"""
def dataset(data_url, monitor=False):
data_withorder = np.load(data_url + '.multi.npy', allow_pickle=True)
seq = [list(x) for x in data_withorder]
if monitor:
print('Converting data to sequence')
try:
labels_with_names = np.load(data_url + '.labels.multi.npy', allow_pickle=True)
labels = labels_with_names[0]
class_names = labels_with_names[1]
except:
if monitor:
print("No labels.")
labels = None
class_names = ['None']
return seq, labels, class_names
def onehot(data, min_length):
return np.bincount(data, minlength=min_length)
def bow_dataset(data_url, vocab_size, additional_text=False, monitor=False):
data_withorder = np.load(data_url + '.multi.npy', allow_pickle=True)
if monitor:
print('Converting data to BoW representation')
data_multihot = np.array([onehot(doc.astype('int'), vocab_size) for doc in data_withorder])
word_count = [np.sum(doc) for doc in data_multihot]
try:
labels_with_names = np.load(data_url + '.labels.multi.npy', allow_pickle=True)
labels = labels_with_names[0]
class_names = labels_with_names[1]
except:
if monitor:
print("No labels.")
labels = None
class_names = ['None']
if additional_text:
return data_multihot, labels, word_count, class_names, data_withorder
return data_multihot, labels, word_count, class_names
'''Create batches'''
def pad_to_batch(x, max_len=80):
x_p = []
for i in range(len(x)):
x_len = len(x[i])
if x_len < max_len:
x_p.append(Variable(LongTensor(x[i] + [0]*(max_len - x_len))))
else:
x_p.append(Variable(LongTensor(x[i][:max_len])))
return torch.stack(x_p, 0)
# TODO: Populate the docstring.
def pad_to_train(batch, max_len=80):
x, y = zip(*batch)
return pad_to_batch(x, max_len=max_len), y
def getBatch(batch_size, train_data, shuffle=False):
if shuffle:
random.shuffle(train_data)
sindex = 0
eindex = batch_size
while eindex < len(train_data):
batch = train_data[sindex: eindex]
temp = eindex
eindex = eindex + batch_size
sindex = temp
yield batch
if eindex >= len(train_data):
batch = train_data[sindex:]
yield batch
def getBatch_iter(batch_size, train_data, shuffle=False):
if shuffle:
random.shuffle(train_data)
ret = []
while True:
for i, data in enumerate(train_data):
ret.append(data)
if i % batch_size == 0:
yield ret
ret = []
if len(ret) > 0:
yield ret
break
'''Build attributes'''
def build_A(data_url, vocab_size, n_attribute):
data, labels, _, _ = bow_dataset(data_url, vocab_size)
n_label = labels.shape[1]
n_vocab = len(data[1])
A_large = np.zeros([n_label, n_vocab])
for i, doc in enumerate(data):
A_large[labels[i]==1] += doc
transformer = TfidfTransformer(smooth_idf=False)
A_tfidf = transformer.fit_transform(A_large).toarray()
A = tsne.tsne(A_tfidf, n_attribute, n_vocab)
return FloatTensor(A)
'''Extract labels based on probability'''
def multi_label_extract(label_dist, threshold=0.5):
labels = torch.zeros(label_dist.size())
labels[label_dist > threshold] = 1
return labels
def plot_threshold(thrs, var, threshold, title, savedir=None):
for v in var:
plt.plot(thrs, v[0], label=v[1])
plt.axvline(x=threshold)
plt.legend()
plt.title(title)
if savedir is not None:
plt.savefig('%s_%s.png'%(savedir, title))
plt.show()
plt.clf()
'''Evaluation'''
# Sorower, <NAME>. "A literature survey on algorithms for multi-label learning." Oregon State University, Corvallis (2010)
def multilabel_eval(true, pred, sample_weight=None, monitor=False, full=False):
n, p = true.shape
score = {}
score['match'] = np.mean([(pred[i][true[i]]==1).any() for i in range(len(pred))])
hit = ((pred==1)*(true==1)).sum(1)
score['HS'] = (hit/(((pred==1)+(true==1))>0).sum(1)).mean()
score['f1'] = (2*hit/((pred==1).sum(1)+(true==1).sum(1))).mean()
if full:
match = (pred==true)
score['HL'] = (pred!=true).mean(1).mean()
score['exact_acc'] = match.all(1).mean()
score['min_acc'] = match.mean(0).min()
score['density_chosen'] = pred.sum(1).mean()/p
score['density'] = true.sum(1).mean()/p
score['precision'] = (hit/(true==1).sum(1)).mean()
score['recal'] = (hit/((pred==1).sum(1)+1e-12)).mean()
score['no_pred'] = (pred!=1).all(1).mean()
if monitor:
print(score)
return score
def singlelabel_eval(true, pred, sample_weight=None, monitor=False):
score = {}
score['acc'] = accuracy_score(true, pred)
score['precision'] = precision_score(true, pred)
score['recal'] = recall_score(true, pred)
score['f1'] = f1_score(true, pred)
score['cfm'] = confusion_matrix(true, pred)
if monitor:
print('Acc: %5f, F1: %5f, precision: %5f, recall: %5f' %(score['acc'], score['f1'], score['precision'], score['recal']))
return score
def inference_analysis(class_word, vocab_url, class_names):
if type(topic_word) is not np.ndarray:
topic_word = topic_word.data.cpu().numpy()
if 'pkl' in vocab_url:
vocab = pickle.load(open(vocab_url, 'rb'))
vocab = list(zip(*sorted(vocab.items(), key=lambda x: x[1])))[0]
else:
vocab = []
with open(vocab_url, 'r') as fin:
for line in fin:
vocab.append(line.split(' ')[0])
for i, weights in enumerate(topic_word):
ind = np.argsort(topic)[-1:-21:-1]
if len(names) == len(topic_word):
print(names[i])
print(np.array(vocab)[ind])
def save_res_multi(tests, vals, trains, class_names, vocab_url):
if 'pkl' in vocab_url:
vocab = pickle.load(open(vocab_url, 'rb'))
vocab = list(zip(*sorted(vocab.items(), key=lambda x: x[1])))[0]
else:
vocab = []
with open(vocab_url, 'r') as fin:
for line in fin:
vocab.append(line.split(' ')[0])
vocab_size = len(vocab)
import csv
with open('res.csv', 'w') as csvfile:
spamwriter = csv.writer(csvfile, delimiter='\t', quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(['sentence', 'true_label', 'prediction', 'word_by_importance', 'dataset'])
for pred, sent, recon in tests:
sent_order = ' '.join([vocab[word] for word in sent])
true_label = ' '
pred_labels = '+'.join(class_names[pred==1])
recon_sent = np.argsort(recon)[::-1]
sent_importance = ' '.join([vocab[word] for word in recon_sent if word in sent])
group = 'test'
spamwriter.writerow([sent_order, true_label, pred_labels, sent_importance, group])
for pred, true, sent, recon in vals:
pred_labels = '+'.join(class_names[pred==1])
true_label = '+'.join(class_names[true==1])
sent_order = ' '.join([vocab[word] for word in sent])
recon_sent = np.argsort(recon)[::-1]
sent_importance = ' '.join([vocab[word] for word in recon_sent if word in sent])
group = 'validation'
spamwriter.writerow([sent_order, true_label, pred_labels, sent_importance, group])
for pred, true, sent, recon in trains:
pred_labels = '+'.join(class_names[pred==1])
true_label = '+'.join(class_names[true==1])
sent_order = ' '.join([vocab[word] for word in sent])
recon_sent = np.argsort(recon)[::-1]
sent_importance = ' '.join([vocab[word] for word in recon_sent if word in sent])
group = 'train'
spamwriter.writerow([sent_order, true_label, pred_labels, sent_importance, group])
print('Result saved in csv.')
def print_res_multi(tests, vals, trains, class_names, topic_word, class_word, vocab_url):
if 'pkl' in vocab_url:
vocab = pickle.load(open(vocab_url, 'rb'))
vocab = list(zip(*sorted(vocab.items(), key=lambda x: x[1])))[0]
else:
with open(vocab_url, 'r') as fin:
for line in fin:
vocab.append(line.split(' ')[0])
vocab_size = len(vocab)
with open('res.html', 'w', encoding='gbk') as f:
if topic_word is not None:
f.write('<p style="background-color:green;">Topic word (beta)</p>')
for i, topic in enumerate(topic_word):
ind = np.argsort(topic)[-1:-21:-1]
f.write('<p> {} </p>'.format(class_names[i]))
for word in ind:
f.write('{} '.format(vocab[word]))
f.write('</p>')
if class_word is not None:
f.write('<p style="background-color:green;">Class word (sum_theta*beta)</p>')
for i, topic in enumerate(class_word):
ind = np.argsort(topic)[-1:-21:-1]
f.write('<p> {} </p>'.format(class_names[i]))
for word in ind:
f.write('{} '.format(vocab[word]))
f.write('</p>')
f.write('<p style="background-color:green;">Test</p>')
for pred_val, sent, recon in tests:
f.write('<p>validation threshold: {}, train threshold: {}</p>'.format(class_names[pred_val==1], class_names[pred_train==1]))
f.write('<p>')
f.write('<p> In Order:')
for word in sent:
f.write('{} '.format(vocab[word]))
f.write('</p>')
f.write('<p> By Importance:')
recon_sent = np.argsort(recon)[::-1]
for word in recon_sent:
if word in sent:
f.write('{} '.format(vocab[word]))
f.write('</p>')
f.write('<p> Reconstruct:')
for word in recon_sent:
if recon[word]>=1/vocab_size*10:
if word in sent:
f.write('<mark class="red">{}</mark> '.format(vocab[word]))
else:
f.write('{} '.format(vocab[word]))
else:
break
f.write('</p>')
f.write('<HR SIZE=5>')
if vals is not None:
f.write('<p style="background-color:green;">Validation</p>')
for pred, true, sent, recon in trains:
if (pred[true==1] != 1).all():
f.write('<p style="background-color:red;">All Wrong</p>')
elif (pred != true).any():
f.write('<p style="background-color:blue;">Partial Wrong</p>')
f.write('<p>prediction: {}, true: {}</p>'.format(class_names[pred==1], class_names[true==1]))
f.write('<p>')
f.write('<p> In Order:')
for word in sent:
f.write('{} '.format(vocab[word]))
f.write('</p>')
f.write('<p> By Importance:')
recon_sent = np.argsort(recon)[::-1]
for word in recon_sent:
if word in sent:
f.write('{} '.format(vocab[word]))
f.write('</p>')
f.write('<p> Reconstruct:')
for word in recon_sent:
if recon[word]>=1/vocab_size*10:
if word in sent:
f.write('<mark class="red">{}</mark> '.format(vocab[word]))
else:
f.write('{} '.format(vocab[word]))
else:
break
f.write('</p>')
f.write('<HR SIZE=5>')
f.write('<p style="background-color:green;">Train</p>')
for pred, true, sent, recon in trains:
if (pred[true==1] != 1).all():
f.write('<p style="background-color:red;">All Wrong</p>')
elif (pred != true).any():
f.write('<p style="background-color:blue;">Partial Wrong</p>')
f.write('<p>prediction: {}, true: {}</p>'.format(class_names[pred==1], class_names[true==1]))
f.write('<p>')
f.write('<p> In Order:')
for word in sent:
f.write('{} '.format(vocab[word]))
f.write('</p>')
f.write('<p> By Importance:')
recon_sent = np.argsort(recon)[::-1]
for word in recon_sent:
if word in sent:
f.write('{} '.format(vocab[word]))
f.write('</p>')
f.write('<p> Reconstruct:')
for word in recon_sent:
if recon[word]>=1/vocab_size*10:
if word in sent:
f.write('<mark class="red">{}</mark> '.format(vocab[word]))
else:
f.write('{} '.format(vocab[word]))
else:
break
f.write('</p>')
f.write('<HR SIZE=5>')
print('Result saved in html.')
'''Visualization for development'''
def plot_training(caches, labels, rec, names, save=False):
n = len(names)
plt.figure(figsize=(5*n, n))
plt.clf()
gs = gridspec.GridSpec(1, n)
gs.update(wspace=0.1, hspace=0.1)
for i in range(n):
plt.subplot(gs[i])
title = '%s_Plot' %(names[i])
plt.title(title)
plt.xlabel('Training Steps')
plt.ylabel(names[i])
for j, values in enumerate(caches[i]):
plt.plot(rec[i], values, label=labels[i][j])
if save:
plt.savefig('fig/log.png')
plt.show()
def multilabel_confusion_matrix(true, pred, labels, normalize=False, cmap=plt.cm.Blues):
from sklearn.metrics import confusion_matrix
conf_mats=[]
for label_col in range(len(labels)):
true_label = true[:, label_col]
pred_label = pred[:, label_col]
conf_mats.append(confusion_matrix(pred_label, true_label))
plt.figure(figsize=(5*len(labels), len(labels)))
plt.clf()
gs = gridspec.GridSpec(1, len(labels))
gs.update(wspace=1./len(labels), hspace=1./len(labels))
for i, label in enumerate(labels):
if normalize:
cm = conf_mats[i].astype('float') / cm.sum(axis=1)[:, np.newaxis]
else:
cm = conf_mats[i]
plt.subplot(gs[i])
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(label)
plt.colorbar()
tick_marks = np.arange(2)
plt.xticks(tick_marks, tick_marks)
plt.yticks(tick_marks, tick_marks)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis].astype('float')
| |
import numpy as np
import scipy.sparse as spsp
from sklearn.preprocessing import StandardScaler
from scedar import utils
from scedar.eda.plot import regression_scatter
from scedar.eda.plot import hist_dens_plot
from scedar.eda import mtype
from scedar.eda import stats
class SampleFeatureMatrix(object):
"""
SampleFeatureMatrix is a (n_samples, n_features) matrix.
In this package, we are only interested in float features as measured
expression levels.
Parameters
----------
x : {array-like, sparse matrix}
data matrix (n_samples, n_features)
sids : homogenous list of int or string
sample ids. Should not contain duplicated elements.
fids : homogenous list of int or string
feature ids. Should not contain duplicated elements.
Attributes
----------
_x : {array-like, sparse matrix}
data matrix (n_samples, n_features)
_is_sparse: boolean
whether the data matrix is sparse matrix or not
_sids : ndarray
sample ids.
_fids : ndarray
sample ids.
"""
def __init__(self, x, sids=None, fids=None):
super(SampleFeatureMatrix, self).__init__()
if x is None:
raise ValueError("x cannot be None")
else:
if spsp.issparse(x):
x = spsp.csr_matrix(x, dtype="float64")
else:
try:
x = np.array(x, copy=False, dtype="float64")
except ValueError as e:
raise ValueError("Features must be float. {}".format(e))
if x.ndim != 2:
raise ValueError("x has shape (n_samples, n_features)")
if sids is None:
sids = list(range(x.shape[0]))
else:
mtype.check_is_valid_sfids(sids)
if len(sids) != x.shape[0]:
raise ValueError("x has shape (n_samples, n_features)")
if fids is None:
fids = list(range(x.shape[1]))
else:
mtype.check_is_valid_sfids(fids)
if len(fids) != x.shape[1]:
raise ValueError("x has shape (n_samples, n_features)")
self._x = x
self._sids = np.array(sids)
self._fids = np.array(fids)
def s_id_to_ind(self, selected_sids):
"""
Convert a list of sample IDs into sample indices.
"""
sid_list = self.sids
return [sid_list.index(i) for i in selected_sids]
def f_id_to_ind(self, selected_fids):
"""
Convert a list of feature IDs into feature indices.
"""
fid_list = self.fids
return [fid_list.index(i) for i in selected_fids]
def ind_x(self, selected_s_inds=None, selected_f_inds=None):
"""
Subset samples by (sample IDs, feature IDs).
Parameters
----------
selected_s_inds: int array
Index array of selected samples. If is None, select all.
selected_f_inds: int array
Index array of selected features. If is None, select all.
Returns
-------
subset: SampleFeatureMatrix
"""
if selected_s_inds is None:
selected_s_inds = slice(None, None)
if selected_f_inds is None:
selected_f_inds = slice(None, None)
return SampleFeatureMatrix(
x=self._x[selected_s_inds, :][:, selected_f_inds].copy(),
sids=self._sids[selected_s_inds].tolist(),
fids=self._fids[selected_f_inds].tolist())
def id_x(self, selected_sids=None, selected_fids=None):
"""
Subset samples by (sample IDs, feature IDs).
Parameters
----------
selected_sids: id array
ID array of selected samples. If is None, select all.
selected_fids: id array
ID array of selected features. If is None, select all.
Returns
-------
subset: SampleFeatureMatrix
"""
if selected_sids is None:
selected_s_inds = None
else:
selected_s_inds = self.s_id_to_ind(selected_sids)
if selected_fids is None:
selected_f_inds = None
else:
selected_f_inds = self.f_id_to_ind(selected_fids)
return self.ind_x(selected_s_inds, selected_f_inds)
@staticmethod
def filter_1d_inds(f, x):
# f_inds filtered index
if f is None:
f_inds = slice(None, None)
else:
if callable(f):
f_inds = [f(ix) for ix in x]
else:
f_inds = f
return f_inds
def s_ind_x_pair(self, xs_ind, ys_ind, feature_filter=None):
x = self._x[xs_ind, :]
y = self._x[ys_ind, :]
if self._is_sparse:
x = x.todense().A1
y = y.todense().A1
if callable(feature_filter):
f_inds = self.filter_1d_inds(
lambda pair: feature_filter(pair[0], pair[1]), zip(x, y))
else:
f_inds = self.filter_1d_inds(feature_filter, zip(x, y))
xf = x[f_inds]
yf = y[f_inds]
return xf, yf
def s_ind_regression_scatter(self, xs_ind, ys_ind, feature_filter=None,
xlab=None, ylab=None, title=None,
**kwargs):
"""
Regression plot on two samples with xs_ind and ys_ind.
Parameters
----------
xs_ind: int
Sample index of x.
ys_ind: int
Sample index of y.
feature_filter: bool array, or int array, or callable(x, y)
If feature_filter is bool / int array, directly select features
with it. If feature_filter is callable, it will be applied on each
(x, y) value tuple.
xlab: str
ylab: str
title: str
"""
xf, yf = self.s_ind_x_pair(xs_ind, ys_ind, feature_filter)
if xlab is None:
xlab = self._sids[xs_ind]
if ylab is None:
ylab = self._sids[ys_ind]
return regression_scatter(x=xf, y=yf, xlab=xlab, ylab=ylab,
title=title, **kwargs)
def s_id_regression_scatter(self, xs_id, ys_id, feature_filter=None,
xlab=None, ylab=None, title=None, **kwargs):
"""
Regression plot on two samples with xs_id and ys_id.
Parameters
----------
xs_ind: int
Sample ID of x.
ys_ind: int
Sample ID of y.
feature_filter: bool array, or int array, or callable(x, y)
If feature_filter is bool / int array, directly select features
with it. If feature_filter is callable, it will be applied on each
(x, y) value tuple.
xlab: str
ylab: str
title: str
"""
xs_ind, ys_ind = self.s_id_to_ind([xs_id, ys_id])
return self.s_ind_regression_scatter(xs_ind, ys_ind,
feature_filter=feature_filter,
xlab=xlab, ylab=ylab, title=title,
**kwargs)
def f_ind_x_pair(self, xf_ind, yf_ind, sample_filter=None):
x = self._x[:, xf_ind]
y = self._x[:, yf_ind]
if self._is_sparse:
x = x.todense().A1
y = y.todense().A1
if callable(sample_filter):
s_inds = self.filter_1d_inds(
lambda pair: sample_filter(pair[0], pair[1]), zip(x, y))
else:
s_inds = self.filter_1d_inds(sample_filter, zip(x, y))
xf = x[s_inds]
yf = y[s_inds]
return (xf, yf)
def f_ind_regression_scatter(self, xf_ind, yf_ind, sample_filter=None,
xlab=None, ylab=None, title=None,
**kwargs):
"""
Regression plot on two features with xf_ind and yf_ind.
Parameters
----------
xf_ind: int
Sample index of x.
yf_ind: int
Sample index of y.
sample_filter: bool array, or int array, or callable(x, y)
If sample_filter is bool / int array, directly select features
with it. If sample_filter is callable, it will be applied on each
(x, y) value tuple.
xlab: str
ylab: str
title: str
"""
xf, yf = self.f_ind_x_pair(xf_ind, yf_ind, sample_filter)
if xlab is None:
xlab = self._fids[xf_ind]
if ylab is None:
ylab = self._fids[yf_ind]
return regression_scatter(x=xf, y=yf, xlab=xlab, ylab=ylab,
title=title, **kwargs)
def f_id_regression_scatter(self, xf_id, yf_id, sample_filter=None,
xlab=None, ylab=None, title=None, **kwargs):
"""
Regression plot on two features with xf_id and yf_id.
Parameters
----------
xf_id: int
Sample ID of x.
yf_ind: int
Sample ID of y.
sample_filter: bool array, or int array, or callable(x, y)
If sample_filter is bool / int array, directly select features
with it. If sample_filter is callable, it will be applied on each
(x, y) value tuple.
xlab: str
ylab: str
title: str
"""
xf_ind, yf_ind = self.f_id_to_ind([xf_id, yf_id])
return self.f_ind_regression_scatter(xf_ind, yf_ind,
sample_filter=sample_filter,
xlab=xlab, ylab=ylab, title=title,
**kwargs)
def s_ind_x_vec(self, s_ind, feature_filter=None):
"""
Access a single vector of a sample.
"""
x = self._x[s_ind, :]
if self._is_sparse:
x = x.todense().A1
f_inds = self.filter_1d_inds(feature_filter, x)
xf = x[f_inds]
return xf
def s_ind_dist(self, s_ind, feature_filter=None, xlab=None, ylab=None,
title=None, figsize=(5, 5), ax=None, **kwargs):
xf = self.s_ind_x_vec(s_ind, feature_filter)
return hist_dens_plot(xf, title=title, xlab=xlab, ylab=ylab,
figsize=figsize, ax=ax, **kwargs)
def s_id_dist(self, s_id, feature_filter=None, xlab=None, ylab=None,
title=None, figsize=(5, 5), ax=None, **kwargs):
s_ind = self.s_id_to_ind([s_id])[0]
return self.s_ind_dist(s_ind, feature_filter=feature_filter,
title=title, xlab=xlab, ylab=ylab,
figsize=figsize, ax=ax, **kwargs)
def f_ind_x_vec(self, f_ind, sample_filter=None, transform=None):
"""
Access a single vector of a sample.
"""
x = self._x[:, f_ind]
if self._is_sparse:
x = x.todense().A1
s_inds = self.filter_1d_inds(sample_filter, x)
xf = x[s_inds]
if transform is not None:
if callable(transform):
xf = np.array(list(map(transform, xf)))
else:
raise ValueError("transform must be a callable")
return xf
def f_id_x_vec(self, f_id, sample_filter=None):
f_ind = self.f_id_to_ind([f_id])[0]
return self.f_ind_x_vec(f_ind, sample_filter=sample_filter)
def f_ind_dist(self, f_ind, sample_filter=None, xlab=None, ylab=None,
title=None, figsize=(5, 5), ax=None, **kwargs):
xf = self.f_ind_x_vec(f_ind, sample_filter)
return hist_dens_plot(xf, title=title, xlab=xlab, ylab=ylab,
figsize=figsize, ax=ax, **kwargs)
def f_id_dist(self, f_id, sample_filter=None, xlab=None, ylab=None,
title=None, figsize=(5, 5), ax=None, **kwargs):
f_ind = self.f_id_to_ind([f_id])[0]
return self.f_ind_dist(f_ind, sample_filter=sample_filter,
title=title, xlab=xlab, ylab=ylab,
figsize=figsize, ax=ax, **kwargs)
def f_sum(self, f_sum_filter=None):
"""
For each sample, compute the sum of all features.
Returns
-------
rowsum: float array
(filtered_n_samples,)
"""
rowsum = self._x.sum(axis=1)
if self._is_sparse:
rowsum = rowsum.A1
s_inds = self.filter_1d_inds(f_sum_filter, rowsum)
rowsumf = rowsum[s_inds]
return rowsumf
def f_sum_dist(self, f_sum_filter=None, xlab=None, ylab=None,
title=None, figsize=(5, 5), ax=None, **kwargs):
"""
Plot the distribution of the feature sum of each sample, (n_samples,).
"""
xf = self.f_sum(f_sum_filter)
return hist_dens_plot(xf, title=title, xlab=xlab, ylab=ylab,
figsize=figsize, ax=ax, **kwargs)
def s_sum(self, s_sum_filter=None):
"""
For each feature, computer the sum of all samples.
Returns
-------
xf: float array
(filtered_n_features,)
"""
colsum = self._x.sum(axis=0)
if self._is_sparse:
colsum = colsum.A1
f_inds = self.filter_1d_inds(s_sum_filter, colsum)
colsumf = colsum[f_inds]
return colsumf
def s_sum_dist(self, s_sum_filter=None, xlab=None, ylab=None,
title=None, figsize=(5, 5), ax=None, **kwargs):
"""
Plot the distribution of the sample sum of each feature, (n_features,).
"""
xf = self.s_sum(s_sum_filter)
return hist_dens_plot(xf, title=title, xlab=xlab, ylab=ylab,
figsize=figsize, ax=ax, **kwargs)
def f_cv(self, f_cv_filter=None):
"""
For each sample, compute the coefficient of variation of all features.
Returns
-------
xf: float array
(filtered_n_samples,)
"""
if self._x.shape[1] == 0:
return np.repeat(np.nan, self._x.shape[0])
ss = StandardScaler(with_mean=False).fit(self._x.T)
n_fts = self._x.shape[1]
rowsd = np.sqrt(ss.var_ * (n_fts / (n_fts - 1)))
rowmean = ss.mean_
| |
# list of lists to set of tuples
cycles_copy = copy.deepcopy(cycles) # store cycles found before fixing
# fix cycles in current subset of edges
if len(cycles_copy)>0:
if verbose:
print '>>%d cycles found!'%len(cycles)
print '>>Finding edges to remove to break cycles in solution'
if fix_cycles_method == 'mip':
edges_to_remove = fix_cycles_mip_heuristic(selected_items.union(kmip_elem), cycles, weights, values, obj=fix_cycles_obj)
elif fix_cycles_method == 'greedy':
edges_to_remove = fix_cycles_greedy(selected_items.union(kmip_elem), cycles, weights, values, obj=fix_cycles_obj, verbose=verbose)
selected_items = selected_items.union(kmip_elem)
for a in range(len(edges_to_remove)):
selected_items.remove(edges_to_remove[a])
if verbose:
print('>>%d edges removed to break cycles'%len(edges_to_remove))
print('>>Decrease in current MLB objective value: %d'%(sum([values[k] for k in edges_to_remove])))
else:
if verbose:
print '>>No cycles found!'
selected_items = selected_items.union(kmip_elem)
# end pass and assess whether another pass is possible
if verbose:
print('------End of Knapsack-Repair Pass #%d-------'%npasses)
print('>>Number of edges added so far: %d'%len(selected_items))
# compute remaining budget
available_budget = capacity - sum([weights[e] for e in selected_items])
if verbose:
print('>>Budget remaining: %0.6f'%available_budget)
print('>>Current MLB objective value: %d'%(sum([values[k] for k in selected_items])))
# find remaining edge candidates
if len(cycles_copy)>0:
E_candidates = set(E_candidates).difference(selected_items).difference(set(edges_to_remove))
E_candidates = list(E_candidates)
elif len(cycles_copy)==0:
E_candidates = set(E_candidates).difference(selected_items)
E_candidates = list(E_candidates)
if verbose:
print('>>Candidates remaining: %d'%len(E_candidates))
if (len(E_candidates)==0):
can_add_more_edges = False
if verbose:
print('>>All candidate edges eliminated')
else:
min_item_cost = min([weights[e] for e in E_candidates])
if (available_budget < min_item_cost):
if verbose:
print('>>Not enough budget left to purchase cheapest remaining candidate edge')
print('Final remaining budget: %0.4f'%available_budget)
print('Final min cost candidate: %0.4f'%min_item_cost)
can_add_more_edges = False
if (appendlog == '') or verbose:
print('Knapsack-repair took %0.2f seconds'%(time.time()-timer))
selected_items_weight = sum(weights[i] for i in selected_items)
selected_items_mlb_value = sum(values[i] for i in selected_items)
G = nx.Graph(directed=False)
G.add_nodes_from(vertices)
for (i,j) in selected_items:
G.add_edge(i,j)
cycles = nx.cycle_basis(G) # list of cycles
assert selected_items_weight <= capacity + 0.0000000001, 'Selected items exceed weight capacity'
assert len(nx.cycle_basis(G)) == 0, 'Cycles present in selected items solution'
if (appendlog == '') or verbose:
print('Final solution cost: %0.4f'%(selected_items_weight))
print('Final solution MLB value: %0.4f'%(selected_items_mlb_value))
f_selected_items = evaluate_solution(selected_items, vertices, demand)[0]
print('Final solution true value: %0.4f'%(f_selected_items))
# Save results if this is a standalone run
if appendlog == '':
resultsfilename = resultfiledir+instancestring+'_'+str(solverunid)+'.csv'
with open(resultsfilename, 'wb') as results:
writer = csv.writer(results, delimiter=' ')
writer.writerow(['e, x_e'])
for (i,j) in items:
if (i,j) in selected_items:
writer.writerow([(i,j), np.abs(1)])
else:
writer.writerow([(i,j), np.abs(0)])
return selected_items, selected_items_weight, selected_items_mlb_value
def budget_pcsf_mlb_greedy(items, values, weights, capacity, vertices, demand, ratio=True, instancestring = '', logfiledir = '', appendlog = '', resultfiledir = '', verbose = True):
'''Greedily maximizes a modular lower bound (MLB) for a supermodular function subject to a combination of a knapsack and a graph matroid constraint.
Inputs:
items - list of edges that are candidates for selection
values - dict containing the modular value of each edge
weights - dict containing the cost of each edge
capacity - budget for selecting edges
vertices - list of vertices that make up the graph
ratio - whether to add edges by value/cost ratio or by value
Returns:
selected_items - set of selected edges
selected_items_weight - total cost of selected edges
selected_items_mlb_value - total modular value of selected edges
'''
if appendlog == '': # standalone run
assert instancestring is not '', 'budget_pcsf_mlb_knapsack_repair: need to provide an instance string for standalone run'
assert logfiledir is not '', 'budget_pcsf_mlb_knapsack_repair: need to specify a log file directory for standalone run'
assert resultfiledir is not '', 'budget_pcsf_mlb_knapsack_repair: need to specify a result file directory for standalone run'
solverunid = np.random.randint(10000)
logfilename = logfiledir+instancestring+'_'+str(solverunid)+'.log'
sys.stdout = open(logfilename, 'w')
else: # subroutine run
logfilename = appendlog
sys.stdout = open(logfilename, 'a')
selected_items = set()
available_budget = capacity
G = nx.Graph(directed=False)
G.add_nodes_from(vertices)
timer = time.time()
# when greedily maximizing the MLB,
# inspect edges in descending order of value/cost or value
if ratio:
value_cost_ratio = dict.fromkeys(values.keys())
for i in values.keys():
value_cost_ratio[i] = float(values[i])/float(weights[i])
sorted_items = sorted(value_cost_ratio.items(), key=operator.itemgetter(1), reverse=True)
else:
sorted_items = sorted(values.items(), key=operator.itemgetter(1), reverse=True)
for i in sorted_items:
can_afford = weights[i[0]] <= available_budget
G.add_edge(i[0][0], i[0][1])
cycles = nx.cycle_basis(G) # list of cycles
does_not_create_cycle = len(cycles)==0
if can_afford and does_not_create_cycle:
selected_items.add(i[0])
available_budget -= weights[i[0]]
else:
G.remove_edge(i[0][0], i[0][1])
if (appendlog == '') or verbose:
print('Greedy MLB maximization took %0.2f seconds'%(time.time()-timer))
selected_items_weight = sum(weights[i] for i in selected_items)
selected_items_mlb_value = sum(values[i] for i in selected_items)
assert selected_items_weight <= capacity + 0.0000000001, 'Selected items exceed weight capacity'
assert len(nx.cycle_basis(G)) == 0, 'Cycles present in selected items solution'
if (appendlog == '') or verbose:
print('Final solution cost: %0.4f'%(selected_items_weight))
print('Final solution MLB value: %0.4f'%(selected_items_mlb_value))
f_selected_items = evaluate_solution(selected_items, vertices, demand)[0]
print('Final solution true value: %0.4f'%(f_selected_items))
# Save results if this is a standalone run
if appendlog == '':
resultsfilename = resultfiledir+instancestring+'_'+str(solverunid)+'.csv'
with open(resultsfilename, 'wb') as results:
writer = csv.writer(results, delimiter=' ')
writer.writerow(['e, x_e'])
for (i,j) in items:
if (i,j) in selected_items:
writer.writerow([(i,j), np.abs(1)])
else:
writer.writerow([(i,j), np.abs(0)])
return selected_items, selected_items_weight, selected_items_mlb_value
def parallel_eval(e, G, V, D):
'''
Evaluates the effect of removing edge from a spanning tree.
Inputs:
e - edge to be removed
G - spanning tree graph (nx)
V - vertices in G
D - demand between pair of nodes in G
Returns:
f_G_minus_e - demand satisfied when e is removed from G
'''
G_temp = copy.deepcopy(G)
G_temp.remove_edge(e[0], e[1])
f_G_minus_e = evaluate_solution_from_graph(G_temp, V, D)
return f_G_minus_e
def evaluate_removing_edges_from_spanning_tree(G_tree, vertices, edges, demand, max_values_current):
# assert nx.is_connected(G_tree), 'The graph is not connected'
# assert nx.is_tree(G_tree), 'The graph is not a tree'
# assert len(G_tree.nodes) == len(vertices)
timer = time.time()
f_G_tree = evaluate_solution_from_graph(G_tree, vertices, demand)
pool = multiprocessing.Pool(10)
parallel_eval_e = partial(parallel_eval, G=G_tree, V=vertices, D=demand) # parallel_eval has only one argument x (y is fixed to 10)
result_list = pool.map(parallel_eval_e, G_tree.edges)
G_tree_edges_list = list(G_tree.edges)
for ei in range(len(G_tree_edges_list)):
if f_G_tree - result_list[ei] > max_values_current[G_tree_edges_list[ei]]:
max_values_current[G_tree_edges_list[ei]] = f_G_tree - result_list[ei]
print('Parallelly re-evaluating f for each edge in the tree and comparing to current max values took %0.2f seconds'%(time.time() - timer))
pool.close()
return max_values_current
def find_max_values(edges, vertices, demand, nruns = 20, method = 'connected'):
'''Estimates the maximum possible value each edge adds to a graph with pairwise node profits
(restricted supermodular on edges), when the edge is added last. Generates random spanning
trees on the graph, and finds the loss in objective caused by removing e.
Inputs:
edges - list of all edges in the graph
vertices - list of all vertices in the graph
demand - pairwise node profits
nruns - number of times to generate random spanning tree
method - 'cycles' if checking cycle_basis, else 'connected' if checking is_connected
Returns:
e_last_value - the value of e when it is added last to a spanning tree (forest) on the graph
'''
max_values = dict.fromkeys(edges)
for e in edges:
max_values[e] = 0
master_timer = time.time()
if method == 'connected':
# Construct the full graph
G_full_orig = nx.Graph(directed=False)
G_full_orig.add_nodes_from(vertices)
for (i,j) in edges:
G_full_orig.add_edge(i,j)
print('graph is connected initially: ', nx.is_connected(G_full_orig))
# Get edges participating in cycles
edges_participating_in_cycles = set()
G_full_orig_cycles = nx.cycle_basis(G_full_orig)
for c in G_full_orig_cycles:
cp = tuple(c) + (c[0],)
c_edges = set([(cp[k], cp[k+1]) if (cp[k], cp[k+1]) in edges else (cp[k+1], cp[k]) for k in range(len(cp)-1)])
edges_participating_in_cycles = edges_participating_in_cycles.union(c_edges)
edges_participating_in_cycles = list(edges_participating_in_cycles)
for run in range(nruns):
print(run)
# Copy the full graph
G_full = copy.deepcopy(G_full_orig)
G_full_is_tree = nx.is_tree(G_full)
# Randomly order edges participating in cycles
timer = time.time()
edge_order = np.random.permutation(len(edges_participating_in_cycles))
ordered_edges = [edges_participating_in_cycles[edge_order[j]] for j in range(len(edge_order))]
# print('order of edges participating in cycles to remove:')
# print(ordered_edges)
oe = 0
while not G_full_is_tree:
# Try to remove the next ordered edge
G_full.remove_edge(ordered_edges[oe][0], ordered_edges[oe][1])
# Check if graph is still connected
connected = nx.is_connected(G_full)
if connected:
print('removed')
G_full_is_tree = nx.is_tree(G_full)
oe += 1
else:
print('disconnected')
# Put the edge back
G_full.add_edge(ordered_edges[oe][0], ordered_edges[oe][1])
oe += 1
# For any edge still present in G_full, evaluate G_full with and without e
max_values = evaluate_removing_edges_from_spanning_tree(G_full, vertices, edges, demand, max_values)
elif method == 'cycles':
# Construct the full graph
G_full_orig = nx.Graph(directed=False)
G_full_orig.add_nodes_from(vertices)
for (i,j) in edges:
G_full_orig.add_edge(i,j)
for | |
# read complete
fsm.If(rdata.rvalid, rdata.rready,
read_count == 0).goto_init()
def connect(self, index, ports, name):
if not self.noio:
raise ValueError('I/O ports can not be connected to others.')
ports = defaultdict(lambda: None, ports)
if '_'.join([name, 'awid']) in ports:
awid = ports['_'.join([name, 'awid'])]
else:
awid = None
awaddr = ports['_'.join([name, 'awaddr'])]
awlen = ports['_'.join([name, 'awlen'])]
awsize = ports['_'.join([name, 'awsize'])]
awburst = ports['_'.join([name, 'awburst'])]
awlock = ports['_'.join([name, 'awlock'])]
awcache = ports['_'.join([name, 'awcache'])]
awprot = ports['_'.join([name, 'awprot'])]
awqos = ports['_'.join([name, 'awqos'])]
if '_'.join([name, 'awuser']) in ports:
awuser = ports['_'.join([name, 'awuser'])]
else:
awuser = None
awvalid = ports['_'.join([name, 'awvalid'])]
awready = ports['_'.join([name, 'awready'])]
if self.waddrs[index].awid is not None:
self.waddrs[index].awid.connect(awid if awid is not None else 0)
self.waddrs[index].awaddr.connect(awaddr)
self.waddrs[index].awlen.connect(awlen if awlen is not None else 0)
self.waddrs[index].awsize.connect(awsize if awsize is not None else
int(math.log(self.datawidth // 8)))
self.waddrs[index].awburst.connect(awburst if awburst is not None else BURST_INCR)
self.waddrs[index].awlock.connect(awlock if awlock is not None else 0)
self.waddrs[index].awcache.connect(awcache)
self.waddrs[index].awprot.connect(awprot)
self.waddrs[index].awqos.connect(awqos if awqos is not None else 0)
if self.waddrs[index].awuser is not None:
self.waddrs[index].awuser.connect(awuser if awuser is not None else 0)
self.waddrs[index].awvalid.connect(awvalid)
awready.connect(self.waddrs[index].awready)
wdata = ports['_'.join([name, 'wdata'])]
wstrb = ports['_'.join([name, 'wstrb'])]
wlast = ports['_'.join([name, 'wlast'])]
if '_'.join([name, 'wuser']) in ports:
wuser = ports['_'.join([name, 'wuser'])]
else:
wuser = None
wvalid = ports['_'.join([name, 'wvalid'])]
wready = ports['_'.join([name, 'wready'])]
self.wdatas[index].wdata.connect(wdata)
self.wdatas[index].wstrb.connect(wstrb)
self.wdatas[index].wlast.connect(wlast if wlast is not None else 1)
if self.wdatas[index].wuser is not None:
self.wdatas[index].wuser.connect(wuser if wuser is not None else 0)
self.wdatas[index].wvalid.connect(wvalid)
wready.connect(self.wdatas[index].wready)
if '_'.join([name, 'bid']) in ports:
bid = ports['_'.join([name, 'bid'])]
else:
bid = None
bresp = ports['_'.join([name, 'bresp'])]
if '_'.join([name, 'buser']) in ports:
buser = ports['_'.join([name, 'buser'])]
else:
buser = None
bvalid = ports['_'.join([name, 'bvalid'])]
bready = ports['_'.join([name, 'bready'])]
if bid is not None:
bid.connect(self.wresps[index].bid if self.wresps[index].bid is not None else 0)
bresp.connect(self.wresps[index].bresp)
if buser is not None:
buser.connect(self.wresps[index].buser if self.wresps[index].buser is not None else 0)
bvalid.connect(self.wresps[index].bvalid)
self.wresps[index].bready.connect(bready)
if '_'.join([name, 'arid']) in ports:
arid = ports['_'.join([name, 'arid'])]
else:
arid = None
araddr = ports['_'.join([name, 'araddr'])]
arlen = ports['_'.join([name, 'arlen'])]
arsize = ports['_'.join([name, 'arsize'])]
arburst = ports['_'.join([name, 'arburst'])]
arlock = ports['_'.join([name, 'arlock'])]
arcache = ports['_'.join([name, 'arcache'])]
arprot = ports['_'.join([name, 'arprot'])]
arqos = ports['_'.join([name, 'arqos'])]
if '_'.join([name, 'aruser']) in ports:
aruser = ports['_'.join([name, 'aruser'])]
else:
aruser = None
arvalid = ports['_'.join([name, 'arvalid'])]
arready = ports['_'.join([name, 'arready'])]
if self.raddrs[index].arid is not None:
self.raddrs[index].arid.connect(arid if arid is not None else 0)
self.raddrs[index].araddr.connect(araddr)
self.raddrs[index].arlen.connect(arlen if arlen is not None else 0)
self.raddrs[index].arsize.connect(arsize if arsize is not None else
int(math.log(self.datawidth // 8)))
self.raddrs[index].arburst.connect(arburst if arburst is not None else BURST_INCR)
self.raddrs[index].arlock.connect(arlock if arlock is not None else 0)
self.raddrs[index].arcache.connect(arcache)
self.raddrs[index].arprot.connect(arprot)
self.raddrs[index].arqos.connect(arqos if arqos is not None else 0)
if self.raddrs[index].aruser is not None:
self.raddrs[index].aruser.connect(aruser if aruser is not None else 0)
self.raddrs[index].arvalid.connect(arvalid)
arready.connect(self.raddrs[index].arready)
if '_'.join([name, 'rid']) in ports:
rid = ports['_'.join([name, 'rid'])]
else:
rid = None
rdata = ports['_'.join([name, 'rdata'])]
rresp = ports['_'.join([name, 'rresp'])]
rlast = ports['_'.join([name, 'rlast'])]
if '_'.join([name, 'ruser']) in ports:
ruser = ports['_'.join([name, 'ruser'])]
else:
ruser = None
rvalid = ports['_'.join([name, 'rvalid'])]
rready = ports['_'.join([name, 'rready'])]
if rid is not None:
rid.connect(self.rdatas[index].rid if self.rdatas[index].rid is not None else 0)
rdata.connect(self.rdatas[index].rdata)
rresp.connect(self.rdatas[index].rresp)
if rlast is not None:
rlast.connect(self.rdatas[index].rlast)
if ruser is not None:
ruser.connect(self.rdatas[index].ruser if self.rdatas[index].ruser is not None else 0)
rvalid.connect(self.rdatas[index].rvalid)
self.rdatas[index].rready.connect(rready)
def make_memory_image(filename, length, pattern='inc', dtype=None,
datawidth=32, wordwidth=8, endian='little'):
import numpy as np
if dtype is None:
dtype = np.int64
if pattern == 'inc':
l = list(range(length))
array = np.array(l, dtype=dtype)
else:
array = np.zeros([length], dtype=dtype)
to_memory_image(filename, array,
datawidth=datawidth, wordwidth=wordwidth,
endian=endian)
def to_memory_image(filename, array, length=None,
datawidth=32, wordwidth=8, endian='little', blksize=4096):
import numpy as np
if not isinstance(array, np.ndarray):
array = np.array(array)
array = np.reshape(array, [-1])
if not isinstance(array[0], (int, np.int64, np.int32)):
raise TypeError("not supported type: '%s'" %
str(type(array[0])))
if length is not None:
if len(array) > length:
array = array[:length]
elif len(array) < length:
np.append(array, np.zeros([length - len(array)],
dtype=array.dtype))
num_hex = int(math.ceil(wordwidth / 4))
fmt = ''.join(['%0', str(num_hex), 'x\n'])
if datawidth >= wordwidth:
num = int(math.ceil(datawidth / wordwidth))
zero = np.zeros(list(array.shape) + [num], dtype=np.int64)
base = array.reshape([-1, 1])
shamt = np.arange(num, dtype=np.int64) * [wordwidth]
if endian == 'big':
shamt.reverse()
mask = np.full([1], 2 ** wordwidth - 1, dtype=np.int64)
data = (((zero + base) >> shamt) & mask).reshape([-1])
with open(filename, 'w') as f:
for i in range(0, len(data), blksize):
blk = data[i:i + blksize]
s = ''.join([fmt % d for d in blk])
f.write(s)
return len(data)
else:
num = int(math.ceil(wordwidth / datawidth))
base = array.reshape([-1, num])
shamt = np.arange(num, dtype=np.int64) * [datawidth]
if endian == 'big':
shamt.reverse()
mask = np.full([1], 2 ** datawidth - 1, dtype=np.int64)
data = (base.reshape([-1, num]) & mask) << shamt
data = np.bitwise_or.reduce(data, -1).reshape([-1])
with open(filename, 'w') as f:
for i in range(0, len(data), blksize):
blk = data[i:i + blksize]
s = ''.join([fmt % d for d in blk])
f.write(s)
return len(data)
def aligned_shape(shape, datawidth, mem_datawidth):
aligned_shape = list(shape[:])
if datawidth == mem_datawidth or datawidth > mem_datawidth:
return aligned_shape
chunk = mem_datawidth // datawidth
new_size = int(math.ceil(aligned_shape[-1] / chunk)) * chunk
aligned_shape[-1] = new_size
return aligned_shape
def shape_to_length(shape):
return functools.reduce(lambda x, y: x * y, shape, 1)
def shape_to_memory_size(shape, datawidth, mem_datawidth=None, block_size=4096):
if mem_datawidth is not None:
shape = aligned_shape(shape, datawidth, mem_datawidth)
bytes = int(math.ceil(datawidth / 8))
length = shape_to_length(shape)
return ((block_size // bytes) *
int(math.ceil(length / (block_size // bytes))))
def set_memory(mem, src, mem_datawidth, src_datawidth, mem_offset,
num_align_words=None):
if mem_datawidth < src_datawidth:
return _set_memory_narrow(mem, src, mem_datawidth, src_datawidth, mem_offset,
num_align_words)
return _set_memory_wide(mem, src, mem_datawidth, src_datawidth, mem_offset,
num_align_words)
def _set_memory_wide(mem, src, mem_datawidth, src_datawidth, mem_offset,
num_align_words=None):
if mem_datawidth > 64:
raise ValueError('not supported')
import numpy as np
if num_align_words is not None:
src = align(src, num_align_words)
num_pack = int(math.ceil(mem_datawidth / src_datawidth))
src_mask = np.full([1], 2 ** src_datawidth - 1, dtype=np.int64)
mem_mask = np.full([1], 2 ** mem_datawidth - 1, dtype=np.int64)
offset = mem_offset // int(math.ceil(mem_datawidth / 8))
if src.shape[-1] % num_pack != 0:
pads = []
for s in src.shape[:-1]:
pads.append((0, 0))
pads.append((0, num_pack - src.shape[-1]))
src = np.pad(src, pads, 'constant')
masked_data = src.astype(np.int64) & src_mask
pack = np.arange(src.shape[-1], dtype=np.int64) % [num_pack]
shift = [src_datawidth] * pack
v = (masked_data << shift) & mem_mask
v = np.reshape(v, [-1, num_pack])
v = np.bitwise_or.reduce(v, -1)
dst_size = mem[offset:offset + v.shape[-1]].size
if v.size > dst_size:
raise ValueError("""too large source data: """
"""destination size (%d) < source size (%d)""" %
(dst_size, v.size))
mem[offset:offset + v.shape[-1]] = v
def _set_memory_narrow(mem, src, mem_datawidth, src_datawidth, mem_offset,
num_align_words=None):
if mem_datawidth > 64:
raise ValueError('not supported')
import numpy as np
if num_align_words is not None:
src = align(src, num_align_words)
num_pack = int(math.ceil(src_datawidth / mem_datawidth))
src_mask = np.full([1], 2 ** src_datawidth - 1, dtype=np.int64)
mem_mask = np.full([1], 2 ** mem_datawidth - 1, dtype=np.int64)
offset = mem_offset // int(math.ceil(mem_datawidth / 8))
pack = np.arange(num_pack, dtype=np.int64)
shift = [mem_datawidth] * pack
dup_src_based = np.zeros(list(src.shape) + [num_pack], dtype=np.int64)
dup_src = dup_src_based + np.reshape(src, list(src.shape) + [1])
v = dup_src >> shift
v = np.reshape(v, [-1])
v = v & mem_mask
dst_size = mem[offset:offset + v.shape[-1]].size
if v.size > dst_size:
raise ValueError("""too large source data: """
"""destination size (%d) < source size (%d)""" %
(dst_size, v.size))
mem[offset:offset + v.shape[-1]] = v
def align(src, num_align_words):
if num_align_words == 1:
return src
import numpy as np
src_aligned_shape = aligned_shape(src.shape, 1, num_align_words)
ret = np.zeros(src_aligned_shape, dtype=np.int64).reshape([-1])
offset = 0
index = 0
res = num_align_words - src.shape[-1] % num_align_words
for data in src.reshape([-1]):
ret[offset] = data
offset += 1
index += 1
if index == src.shape[-1]:
index = 0
if res < num_align_words:
offset += res
return ret
def split_read_write(m, ports, prefix,
read_prefix='r_', write_prefix='w_'):
# Read (AR, R)
r_ports = {}
for name, port in ports.items():
r_name = read_prefix + port.name
if name.startswith(prefix + '_ar') or name.startswith(prefix + '_r'):
if isinstance(port, vtypes.Reg):
r_port = m.RegLike(port, name=r_name)
port.connect(r_port)
else:
r_port = m.WireLike(port, name=r_name)
r_port.connect(port)
else:
r_port = m.WireLike(port, name=r_name)
if isinstance(port, vtypes.Wire):
r_port.assign(0)
r_ports[r_name] = r_port
# Write (AW, W, B)
w_ports = {}
for name, port in ports.items():
w_name = write_prefix + port.name
if (name.startswith(prefix + '_aw') or
name.startswith(prefix + '_w') or name.startswith(prefix + '_b')):
if | |
elif block_name == 'p':
if record_data:
element_type = ArticleElementType.PARAGRAPH
target_txt = format_text(child.text)
element_list.append(ArticleElement(type=element_type, content=target_txt))
elif block_name == 'div' and 'tables' in child_class:
if record_data:
element_type = ArticleElementType.TABLE
tbl = html_table_extract_elsevier(child)
element_list.append(ArticleElement(type=element_type, content=tbl))
elif 'figure' in block_name:
if record_data:
element_type = ArticleElementType.FIGURE
fig = html_figure_extract_elsevier(child)
element_list.append(ArticleElement(type=element_type, content=fig))
else:
html_section_extract_elsevier(
section_root=child,
element_list=element_list,
record_data=record_data
)
except TypeError:
pass
return element_list
def html_section_extract_acs(section_root,
element_list: Optional[List] = None):
"""
Depth-first search of the text in the sections
"""
if element_list is None:
element_list = list()
for child in section_root.children:
block_name = child.name
try:
# if the child is a section title
if re.match(r"h[0-9]", block_name):
hid = child.get('id', '')
if not re.match(r"_i[0-9]+", hid):
continue
element_type = ArticleElementType.SECTION_TITLE
target_txt = format_text(child.text)
element_list.append(ArticleElement(type=element_type, content=target_txt))
# if the child is a section block
elif block_name == 'div':
div_class = child.get('class', [''])
if len(div_class) == 0:
div_class = ['']
elif div_class[0] == "NLM_p":
for s in child.find_all('figure'):
fig_element = s.extract()
element_type = ArticleElementType.FIGURE
fig = html_figure_extract_acs(fig_element)
element_list.append(ArticleElement(type=element_type, content=fig))
html_section_extract_acs(section_root=child, element_list=element_list)
for s in child.find_all('div', {"class": "NLM_table-wrap"}):
table_element = s.extract()
element_type = ArticleElementType.TABLE
tbl = html_table_extract_acs(table_element)
element_list.append(ArticleElement(type=element_type, content=tbl))
element_type = ArticleElementType.PARAGRAPH
target_txt = format_text(child.text)
element_list.append(ArticleElement(type=element_type, content=target_txt))
elif div_class[0] == "NLM_table-wrap":
tbl = html_table_extract_acs(child)
element_type = ArticleElementType.TABLE
element_list.append(ArticleElement(type=element_type, content=tbl))
else:
html_section_extract_acs(section_root=child, element_list=element_list)
elif 'figure' in block_name:
continue
else:
html_section_extract_acs(section_root=child, element_list=element_list)
except TypeError:
pass
return element_list
def html_section_extract_aaas(section_root,
element_list: Optional[List] = None):
"""
Depth-first search of the text in the sections
"""
if element_list is None:
element_list = list()
for child in section_root.children:
block_name = child.name
try:
# if the child is a section title
if re.match(r"h[2-9]", block_name):
h2_class = child.get('class', [])
if len(h2_class) > 0:
continue
element_type = ArticleElementType.SECTION_TITLE
target_txt = format_text(child.text)
element_list.append(ArticleElement(type=element_type, content=target_txt))
# if the child is a section block
elif block_name == 'p':
pid = child.get('id', '')
if not re.match(r"p-[1-9]+", pid):
continue
element_type = ArticleElementType.PARAGRAPH
target_txt = format_text(child.text)
element_list.append(ArticleElement(type=element_type, content=target_txt))
elif 'figure' in block_name or 'table' in block_name:
continue
else:
html_section_extract_aaas(section_root=child, element_list=element_list)
except TypeError:
pass
return element_list
def xml_table_extract_elsevier(xml_table):
table = Table()
footnotes = list()
rows = list()
table.id = xml_table.attrib.get('id', None)
for child in xml_table:
if 'label' in child.tag:
table.label = child.text
elif 'caption' in child.tag:
table.caption = get_xml_text_iter(child)
elif 'table-footnote' in child.tag:
footnotes.append(get_xml_text_iter(child))
elif 'legend' in child.tag:
footnotes.append(get_xml_text_iter(child))
elif 'tgroup' in child.tag:
for xml_row in child.iter(r'{http://www.elsevier.com/xml/common/cals/dtd}row'):
cells = list()
for xml_entry in xml_row:
if 'entry' in xml_entry.tag:
if 'namest' in xml_entry.attrib and 'nameend' in xml_entry.attrib:
if xml_entry.attrib['namest'].startswith('col'):
start = np.int8(xml_entry.attrib['namest'][3:])
else:
start = np.int8(xml_entry.attrib['namest'])
if xml_entry.attrib['nameend'].startswith('col'):
end = np.int8(xml_entry.attrib['nameend'][3:])
else:
end = np.int8(xml_entry.attrib['nameend'])
width = end - start + 1
else:
width = 1
if 'morerows' in xml_entry.attrib:
height = np.int8(xml_entry.attrib['morerows']) + 1
else:
height = 1
text = get_xml_text_iter(xml_entry)
cell = TableCell(text, width, height)
cells.append(cell)
row = TableRow(cells)
rows.append(row)
table.footnotes = footnotes
table.rows = rows
return table.format_rows()
def xml_table_extract_acs(xml_table):
table = Table()
footnotes = list()
rows = list()
table.id = xml_table.attrib.get('id', None)
for child in xml_table:
if 'label' in child.tag:
table.label = child.text
elif 'caption' in child.tag:
table.caption = get_xml_text_iter(child)
elif 'table-wrap-foot' in child.tag:
for fn_element in child:
if 'fn' in fn_element.tag:
footnotes.append(get_xml_text_iter(fn_element))
# sometimes the footnotes are not wrapped in "fn" tags
if not footnotes:
footnotes.append(get_xml_text_iter(child))
elif 'table' in child.tag:
for tb_element in child:
if 'tgroup' in tb_element.tag:
for xml_row in tb_element.iter(
r'{http://www.niso.org/standards/z39-96/ns/oasis-exchange/table}row'):
cells = list()
for xml_entry in xml_row:
if 'entry' in xml_entry.tag:
if 'namest' in xml_entry.attrib and 'nameend' in xml_entry.attrib:
if xml_entry.attrib['namest'].startswith('col'):
start = np.int8(xml_entry.attrib['namest'][3:])
else:
start = np.int8(xml_entry.attrib['namest'])
if xml_entry.attrib['nameend'].startswith('col'):
end = np.int8(xml_entry.attrib['nameend'][3:])
else:
end = np.int8(xml_entry.attrib['nameend'])
width = end - start + 1
else:
width = 1
if 'morerows' in xml_entry.attrib:
height = np.int8(xml_entry.attrib['morerows']) + 1
else:
height = 1
text = get_xml_text_iter(xml_entry)
cell = TableCell(text, width, height)
cells.append(cell)
row = TableRow(cells)
rows.append(row)
table.footnotes = footnotes
table.rows = rows
return table.format_rows()
def xml_figure_extract(xml_figure):
figure = Figure()
figure.id = xml_figure.attrib.get('id', None)
for child in xml_figure:
if 'label' in child.tag:
figure.label = child.text
elif 'caption' in child.tag:
figure.caption = get_xml_text_iter(child)
return figure
def get_html_table_row(tr):
"""
Get a row of the html table
Parameters
----------
tr: table row (html tag)
"""
cells = list()
for child in tr:
block_name = child.name
if block_name in ['th', 'td']:
height = np.uint8(child.get('rowspan', 1))
width = np.uint8(child.get('colspan', 1))
text = format_text(child.text)
# text = text if text else '<EMPTY>'
cell = TableCell(text, width, height)
cells.append(cell)
return TableRow(cells)
def get_html_table_rows(root, rows: Optional[List] = None, include_foot: Optional[bool] = True):
"""
get row elements from html table
"""
if rows is None:
rows = list()
if isinstance(root, bs4.element.NavigableString):
return None
for child in root.children:
block_name = child.name
tb_elements = ['thead', 'tbody', 'tfoot'] if include_foot else ['thead', 'tbody']
if block_name in tb_elements:
get_html_table_rows(child, rows)
elif not include_foot and block_name == 'tfoot':
continue
elif block_name == 'tr':
rows.append(get_html_table_row(child))
return rows
def html_table_extract_wiley(table_div):
headers = table_div.find_all('header')
captions = list()
for header in headers:
captions.append(header.text)
caption = ' '.join(captions)
caption = format_text(caption)
table_id = table_div.get('id', '<EMPTY>')
tb_divs = table_div.find_all('div')
tb_div = None
for div in tb_divs:
div_class = div.get('class', '')
div_class = ' '.join(div_class) if isinstance(div_class, list) else div_class
if 'footnotes' in div_class:
tb_div = div
footnotes = list()
if tb_div:
lis = tb_div.find_all('li')
for li in lis:
footnotes.append(format_text(li.text))
tables = table_div.find_all('table')
if tables:
table = tables[0]
rows = get_html_table_rows(table)
else:
rows = list()
tbl = Table(
idx=table_id,
caption=caption,
rows=rows,
footnotes=footnotes).format_rows()
return tbl
def html_figure_extract_wiley(html_figure):
figure_id = html_figure.get('id', '<EMPTY>')
label = ' '.join([lb.text for lb in html_figure.find_all("strong", class_="figure__title")])
caption = ' '.join([cap.text for cap in html_figure.find_all("div", class_="figure__caption-text")])
caption = format_text(caption)
figure = Figure(idx=figure_id,
label=label,
caption=caption)
return figure
def html_figure_extract_springer(html_figure):
fig_idx_block = html_figure.figcaption.b
label = fig_idx_block.text
figure_id = fig_idx_block.get('id', '<EMPTY>')
try:
figure_content = html_figure.find_all('div', class_="c-article-section__figure-content")[0]
caption = figure_content.find_all('p')[0].text
caption = format_text(caption)
except IndexError:
caption = None
return Figure(idx=figure_id, label=label, caption=caption)
def get_element_text_recursive(root, text=None):
if text is None:
text = list()
block_name = root.name
if block_name == 'a' or block_name == 'span':
return None
for child in root.children:
if isinstance(child, bs4.element.NavigableString):
text.append(format_text(str(child)))
else:
get_element_text_recursive(child, text=text)
return format_text(''.join(text))
def html_table_extract_rsc(table_div):
tables = table_div.find_all('table')
if not tables:
return Table()
table = tables[0]
caption_divs = table_div.findPreviousSiblings('div', {"class": "table_caption"})
if caption_divs:
caption_div = caption_divs[0]
caption_span = caption_div.span
caption = format_text(caption_span.text)
table_id = caption_span.get('id', '<EMPTY>')
else:
caption = ''
table_id = None
footnote_ele = table.find_all('tfoot')
footnotes = list()
if footnote_ele:
footnote_content = footnote_ele[0].tr.th
unnumbered_footnote = get_element_text_recursive(footnote_content)
if unnumbered_footnote:
footnotes.append(unnumbered_footnote)
footnote_as = footnote_content.find_all('a')
for a in footnote_as:
if a.get('href', ''):
continue
a_span = a.find_next_sibling('span')
footnote = f'{a.text} {a_span.text}'
footnotes.append(footnote)
rows = get_html_table_rows(table, include_foot=False)
tbl = Table(
idx=table_id,
caption=caption,
rows=rows,
footnotes=footnotes).format_rows()
return tbl
def html_figure_extract_rsc(html_figure):
try:
title = html_figure.find_all('td', class_="image_title")[0]
label = title.b.text.strip()
caption = ' '.join([cap.text for cap in title.find_all('span', class_="graphic_title")])
caption = format_text(caption)
return Figure(label=label, caption=caption)
except IndexError:
return Figure()
def html_table_extract_springer(table_div):
caption_divs = table_div.find_all('div', {"class": "Caption"})
if caption_divs:
caption_div = caption_divs[0]
caption_span = caption_div.text
caption = format_text(caption_span)
else:
caption = ''
table_id = table_div.get('id', '<EMPTY>')
footnote_div = table_div.find_all('div', {"class": "TableFooter"})
footnotes = list()
if footnote_div:
footnote_ps = footnote_div[0].find_all('p')
for p in footnote_ps:
footnotes.append(format_text(p.text))
tables = table_div.find_all('table')
if tables:
table = tables[0]
rows = get_html_table_rows(table)
else:
rows = list()
tbl = Table(
idx=table_id,
caption=caption,
rows=rows,
footnotes=footnotes).format_rows()
return tbl
def get_acs_footnote(footnote_div):
footnotes = list()
pars = footnote_div.find_all('p')
for p in pars:
for child in p.children:
if isinstance(child, bs4.element.NavigableString):
if not footnotes:
footnotes.append(format_text(str(child)))
else:
footnotes[-1] += str(child)
elif child.name == 'i':
footnotes.append(format_text(child.text))
else:
footnotes[-1] += child.text
return footnotes
def html_table_extract_acs(table_div):
caption = ''
table_id = table_div.get('id', '<EMPTY>')
footnotes = list()
for child in table_div.children:
child_class = child.get('class', '')
child_class = ' '.join(child_class) if isinstance(child_class, list) else child_class
if 'caption' in child_class.lower():
caption = format_text(child.text)
if 'table-wrap-foot' in child_class:
footnotes = get_acs_footnote(child)
tables = table_div.find_all('table')
if tables:
table = tables[0]
rows = get_html_table_rows(table)
else:
rows = list()
tbl = Table(
idx=table_id,
caption=caption,
rows=rows,
footnotes=footnotes).format_rows()
return tbl
def html_figure_extract_acs(html_figure):
fig_id = html_figure.get('id', '<EMPTY>')
caption = format_text(html_figure.figcaption.text)
return Figure(idx=fig_id, caption=caption)
def html_table_extract_elsevier(table_div):
caption = ''
table_id = table_div.get('id', '<EMPTY>')
footnotes = list()
for child in table_div.children:
child_class = child.get('class', '')
child_class = ' '.join(child_class) if isinstance(child_class, list) else child_class
if 'captions' in child_class.lower():
caption = | |
handling #############################################
# see func_block_disable_msg.py test case for expected behaviour
def process_tokens(self, tokens):
"""Process tokens from the current module to search for module/block level
options."""
control_pragmas = {"disable", "enable"}
prev_line = None
saw_newline = True
seen_newline = True
for (tok_type, content, start, _, _) in tokens:
if prev_line and prev_line != start[0]:
saw_newline = seen_newline
seen_newline = False
prev_line = start[0]
if tok_type in (tokenize.NL, tokenize.NEWLINE):
seen_newline = True
if tok_type != tokenize.COMMENT:
continue
match = OPTION_PO.search(content)
if match is None:
continue
try:
for pragma_repr in parse_pragma(match.group(2)):
if pragma_repr.action in ("disable-all", "skip-file"):
if pragma_repr.action == "disable-all":
self.add_message(
"deprecated-pragma",
line=start[0],
args=("disable-all", "skip-file"),
)
self.add_message("file-ignored", line=start[0])
self._ignore_file = True
return
try:
meth = self._options_methods[pragma_repr.action]
except KeyError:
meth = self._bw_options_methods[pragma_repr.action]
# found a "(dis|en)able-msg" pragma deprecated suppression
self.add_message(
"deprecated-pragma",
line=start[0],
args=(
pragma_repr.action,
pragma_repr.action.replace("-msg", ""),
),
)
for msgid in pragma_repr.messages:
# Add the line where a control pragma was encountered.
if pragma_repr.action in control_pragmas:
self._pragma_lineno[msgid] = start[0]
if (pragma_repr.action, msgid) == ("disable", "all"):
self.add_message(
"deprecated-pragma",
line=start[0],
args=("disable=all", "skip-file"),
)
self.add_message("file-ignored", line=start[0])
self._ignore_file = True
return
# If we did not see a newline between the previous line and now,
# we saw a backslash so treat the two lines as one.
l_start = start[0]
if not saw_newline:
l_start -= 1
try:
meth(msgid, "module", l_start)
except exceptions.UnknownMessageError:
self.add_message(
"bad-option-value", args=msgid, line=start[0]
)
except UnRecognizedOptionError as err:
self.add_message(
"unrecognized-inline-option", args=err.token, line=start[0]
)
continue
except InvalidPragmaError as err:
self.add_message("bad-inline-option", args=err.token, line=start[0])
continue
# code checking methods ###################################################
def get_checkers(self):
"""return all available checkers as a list"""
return [self] + [
c
for _checkers in self._checkers.values()
for c in _checkers
if c is not self
]
def get_checker_names(self):
"""Get all the checker names that this linter knows about."""
current_checkers = self.get_checkers()
return sorted(
{
checker.name
for checker in current_checkers
if checker.name != MAIN_CHECKER_NAME
}
)
def prepare_checkers(self):
"""return checkers needed for activated messages and reports"""
if not self.config.reports:
self.disable_reporters()
# get needed checkers
needed_checkers = [self]
for checker in self.get_checkers()[1:]:
messages = {msg for msg in checker.msgs if self.is_message_enabled(msg)}
if messages or any(self.report_is_enabled(r[0]) for r in checker.reports):
needed_checkers.append(checker)
# Sort checkers by priority
needed_checkers = sorted(
needed_checkers, key=operator.attrgetter("priority"), reverse=True
)
return needed_checkers
# pylint: disable=unused-argument
@staticmethod
def should_analyze_file(modname, path, is_argument=False):
"""Returns whether or not a module should be checked.
This implementation returns True for all python source file, indicating
that all files should be linted.
Subclasses may override this method to indicate that modules satisfying
certain conditions should not be linted.
:param str modname: The name of the module to be checked.
:param str path: The full path to the source code of the module.
:param bool is_argument: Whether the file is an argument to pylint or not.
Files which respect this property are always
checked, since the user requested it explicitly.
:returns: True if the module should be checked.
:rtype: bool
"""
if is_argument:
return True
return path.endswith(".py")
# pylint: enable=unused-argument
def initialize(self):
"""Initialize linter for linting
This method is called before any linting is done.
"""
# initialize msgs_state now that all messages have been registered into
# the store
for msg in self.msgs_store.messages:
if not msg.may_be_emitted():
self._msgs_state[msg.msgid] = False
def check(self, files_or_modules):
"""main checking entry: check a list of files or modules from their name.
files_or_modules is either a string or list of strings presenting modules to check.
"""
self.initialize()
if not isinstance(files_or_modules, (list, tuple)):
files_or_modules = (files_or_modules,)
if self.config.from_stdin:
if len(files_or_modules) != 1:
raise exceptions.InvalidArgsError(
"Missing filename required for --from-stdin"
)
filepath = files_or_modules[0]
with fix_import_path(files_or_modules):
self._check_files(
functools.partial(self.get_ast, data=_read_stdin()),
[self._get_file_descr_from_stdin(filepath)],
)
elif self.config.jobs == 1:
with fix_import_path(files_or_modules):
self._check_files(
self.get_ast, self._iterate_file_descrs(files_or_modules)
)
else:
check_parallel(
self,
self.config.jobs,
self._iterate_file_descrs(files_or_modules),
files_or_modules,
)
def check_single_file(self, name, filepath, modname):
"""Check single file
The arguments are the same that are documented in _check_files
The initialize() method should be called before calling this method
"""
with self._astroid_module_checker() as check_astroid_module:
self._check_file(
self.get_ast, check_astroid_module, name, filepath, modname
)
def _check_files(self, get_ast, file_descrs):
"""Check all files from file_descrs
The file_descrs should be iterable of tuple (name, filepath, modname)
where
- name: full name of the module
- filepath: path of the file
- modname: module name
"""
with self._astroid_module_checker() as check_astroid_module:
for name, filepath, modname in file_descrs:
self._check_file(get_ast, check_astroid_module, name, filepath, modname)
def _check_file(self, get_ast, check_astroid_module, name, filepath, modname):
"""Check a file using the passed utility functions (get_ast and check_astroid_module)
:param callable get_ast: callable returning AST from defined file taking the following arguments
- filepath: path to the file to check
- name: Python module name
:param callable check_astroid_module: callable checking an AST taking the following arguments
- ast: AST of the module
:param str name: full name of the module
:param str filepath: path to checked file
:param str modname: name of the checked Python module
"""
self.set_current_module(name, filepath)
# get the module representation
ast_node = get_ast(filepath, name)
if ast_node is None:
return
self._ignore_file = False
self.file_state = FileState(modname)
# fix the current file (if the source file was not available or
# if it's actually a c extension)
self.current_file = ast_node.file # pylint: disable=maybe-no-member
check_astroid_module(ast_node)
# warn about spurious inline messages handling
spurious_messages = self.file_state.iter_spurious_suppression_messages(
self.msgs_store
)
for msgid, line, args in spurious_messages:
self.add_message(msgid, line, None, args)
@staticmethod
def _get_file_descr_from_stdin(filepath):
"""Return file description (tuple of module name, file path, base name) from given file path
This method is used for creating suitable file description for _check_files when the
source is standard input.
"""
try:
# Note that this function does not really perform an
# __import__ but may raise an ImportError exception, which
# we want to catch here.
modname = ".".join(astroid.modutils.modpath_from_file(filepath))
except ImportError:
modname = os.path.splitext(os.path.basename(filepath))[0]
return (modname, filepath, filepath)
def _iterate_file_descrs(self, files_or_modules):
"""Return generator yielding file descriptions (tuples of module name, file path, base name)
The returned generator yield one item for each Python module that should be linted.
"""
for descr in self._expand_files(files_or_modules):
name, filepath, is_arg = descr["name"], descr["path"], descr["isarg"]
if self.should_analyze_file(name, filepath, is_argument=is_arg):
yield (name, filepath, descr["basename"])
def _expand_files(self, modules):
"""get modules and errors from a list of modules and handle errors"""
result, errors = expand_modules(
modules,
self.config.black_list,
self.config.black_list_re,
self.config.ignore_list_paths_re,
)
for error in errors:
message = modname = error["mod"]
key = error["key"]
self.set_current_module(modname)
if key == "fatal":
message = str(error["ex"]).replace(os.getcwd() + os.sep, "")
self.add_message(key, args=message)
return result
def set_current_module(self, modname, filepath=None):
"""set the name of the currently analyzed module and
init statistics for it
"""
if not modname and filepath is None:
return
self.reporter.on_set_current_module(modname, filepath)
self.current_name = modname
self.current_file = filepath or modname
self.stats["by_module"][modname] = {}
self.stats["by_module"][modname]["statement"] = 0
for msg_cat in MSG_TYPES.values():
self.stats["by_module"][modname][msg_cat] = 0
@contextlib.contextmanager
def _astroid_module_checker(self):
"""Context manager for checking ASTs
The value in the context is callable accepting AST as its only argument.
"""
walker = ASTWalker(self)
_checkers = self.prepare_checkers()
tokencheckers = [
c
for c in _checkers
if interfaces.implements(c, interfaces.ITokenChecker) and c is not self
]
rawcheckers = [
c for c in _checkers if interfaces.implements(c, interfaces.IRawChecker)
]
# notify global begin
for checker in _checkers:
checker.open()
if interfaces.implements(checker, interfaces.IAstroidChecker):
walker.add_checker(checker)
yield functools.partial(
self.check_astroid_module,
walker=walker,
tokencheckers=tokencheckers,
rawcheckers=rawcheckers,
)
# notify global end
self.stats["statement"] = walker.nbstatements
for checker in reversed(_checkers):
checker.close()
def get_ast(self, filepath, modname, data=None):
"""Return an ast(roid) representation of a module or a string.
:param str filepath: path to checked file.
:param str modname: The name of the module to be checked.
:param str data: optional contents of the checked file.
:returns: the AST
:rtype: astroid.nodes.Module
"""
try:
if data is None:
return MANAGER.ast_from_file(filepath, modname, source=True)
return astroid.builder.AstroidBuilder(MANAGER).string_build(
data, modname, filepath
)
except astroid.AstroidSyntaxError as ex:
# pylint: disable=no-member
self.add_message(
"syntax-error",
line=getattr(ex.error, "lineno", 0),
col_offset=getattr(ex.error, "offset", None),
args=str(ex.error),
)
except astroid.AstroidBuildingException as ex:
self.add_message("parse-error", args=ex)
except Exception as ex: # pylint: disable=broad-except
traceback.print_exc()
self.add_message("astroid-error", args=(ex.__class__, ex))
return None
def check_astroid_module(self, ast_node, walker, rawcheckers, tokencheckers):
"""Check a module from its astroid representation.
For return value see _check_astroid_module
"""
before_check_statements = walker.nbstatements
retval | |
ids = torch.LongTensor(ids)
sids = torch.LongTensor(sids)
tids = torch.LongTensor(tids)
lids = torch.LongTensor(lids)
return ids, sids, tids, lids, cls_label
else:
# test
return bundle['context'], bundle['responses'], bundle['label']
def save(self):
if self.args['mode'] == 'train':
data = torch.save((self.data, self.responses), self.pp_path)
else:
data = torch.save(self.data, self.pp_path)
print(f'[!] save preprocessed dataset into {self.pp_path}')
def collate(self, batch):
if self.args['mode'] == 'train':
ids, sids, tids, lids, label = [], [], [], [], []
for a, b, c, d, e in batch:
ids.append(a)
sids.append(b)
tids.append(c)
lids.append(d)
label.append(e)
ids = pad_sequence(ids, batch_first=True, padding_value=self.pad)
sids = pad_sequence(sids, batch_first=True, padding_value=self.pad)
tids = pad_sequence(tids, batch_first=True, padding_value=self.pad)
lids = pad_sequence(lids, batch_first=True, padding_value=-100)
label = torch.LongTensor(label)
mask = generate_mask(ids)
ids, sids, tids, lids, mask, label = to_cuda(ids, sids, tids, lids, mask, label)
return {
'ids': ids,
'sids': sids,
'tids': tids,
'lids': lids,
'mask': mask,
'label': label,
}
else:
# test or valid set
assert len(batch) == 1
return {
'context': batch[0][0],
'responses': batch[0][1],
'label': batch[0][2],
}
class BERTFTCompMultiCLSDataset(Dataset):
def __init__(self, vocab, path, **args):
self.args = args
self.vocab = vocab
self.vocab.add_tokens(['[EOS]'])
self.pad = self.vocab.convert_tokens_to_ids('[PAD]')
self.sep = self.vocab.convert_tokens_to_ids('[SEP]')
self.eos = self.vocab.convert_tokens_to_ids('[EOS]')
self.cls = self.vocab.convert_tokens_to_ids('[CLS]')
self.topk = args['gray_cand_num']
self.compare_set_size = args['compare_set_size']
suffix = args['tokenizer'].replace('/', '_')
self.pp_path = f'{os.path.splitext(path)[0]}_ft_comp_multi_{suffix}.pt'
if os.path.exists(self.pp_path):
if self.args['mode'] == 'train':
self.data, self.responses = torch.load(self.pp_path)
else:
self.data = torch.load(self.pp_path)
print(f'[!] load preprocessed file from {self.pp_path}')
return None
self.data = []
if self.args['mode'] == 'train':
path = f'{os.path.split(path)[0]}/train_bm25_gray.txt'
data = read_bm25_hard_negative(path)
responses, response_overlap = [], set()
for item in tqdm(data):
context, response, candidates = item['q'], item['r'], item['nr']
ids = self.vocab.batch_encode_plus(context + [response], add_special_tokens=False)['input_ids']
cids = []
sids, cache = [], 0
for u in ids[:-1]:
cids.extend(u + [self.eos])
sids.extend([cache] * (len(u) + 1))
cache = 1 if cache == 0 else 0
sids.pop()
cids.pop()
if self.args['no_inner_session_negative'] is False:
candidates += context
if len(cids) == 0:
continue
rids = ids[-1]
responses.append(rids)
if response not in response_overlap:
responses.append(rids)
response_overlap.add(response)
self.data.append({
'context': cids,
'sids': sids,
'response': rids,
'candidates': candidates,
})
self.responses = responses
else:
data = read_text_data_utterances(path, lang=self.args['lang'])
for i in tqdm(range(0, len(data), 10)):
batch = data[i:i+10]
responses = [b[1][-1] for b in batch]
context = batch[0][1][:-1]
self.data.append({
'label': [b[0] for b in batch],
'context': context,
'responses': responses,
})
def __len__(self):
return len(self.data)
def _packup(self, cids, sids, rids):
ctx_max_length, res_max_length = self.args['ctx_max_length'], self.args['res_max_length']
num = len(rids)
# length limitation
rids = [i[:(res_max_length-2)] for i in rids]
cids = cids[-(ctx_max_length-2):]
sids = sids[-(ctx_max_length-2):]
cids_ = [self.cls] + cids + [self.sep]
sids_ = [sids[0]] + sids + [sids[-1]]
tids_ = [0] * (len(cids) + 2)
other_speaker = 0 if sids[-1] == 1 else 1
tcache = 1
# concatenation
for idx, r in enumerate(rids):
# [unused1] ~ [unused10]
cids_ += [idx + 1] + r + [self.sep]
sids_ += [other_speaker] * (len(r) + 2)
tids_ += [tcache] * (len(r) + 2)
tcache = 0 if tcache == 1 else 1
assert len(cids_) == len(sids_) == len(tids_)
return cids_, sids_, tids_
def __getitem__(self, i):
bundle = self.data[i]
if self.args['mode'] == 'train':
cids, rids, sids = deepcopy(bundle['context']), deepcopy(bundle['response']), deepcopy(bundle['sids'])
if self.args['no_hard_negative']:
hrids = random.sample(self.responses, self.topk)
else:
candidates = random.sample(
bundle['candidates'], self.topk
)
hrids = self.vocab.batch_encode_plus(candidates, add_special_tokens=False)['input_ids']
rids = [rids] + random.sample(hrids, self.topk) + random.sample(self.responses, self.compare_set_size - self.topk - 1)
random_idx = list(range(self.compare_set_size))
random.shuffle(random_idx)
label = random_idx.index(0)
rids = [rids[i] for i in random_idx]
ids, sids, tids = self._packup(cids, sids, rids)
ids = torch.LongTensor(ids)
sids = torch.LongTensor(sids)
tids = torch.LongTensor(tids)
return ids, sids, tids, label
else:
# test
return bundle['context'], bundle['responses'], bundle['label']
def save(self):
if self.args['mode'] == 'train':
data = torch.save((self.data, self.responses), self.pp_path)
else:
data = torch.save(self.data, self.pp_path)
print(f'[!] save preprocessed dataset into {self.pp_path}')
def collate(self, batch):
if self.args['mode'] == 'train':
ids, sids, tids, label = [], [], [], []
for a, b, c, d in batch:
ids.append(a)
sids.append(b)
tids.append(c)
label.append(d)
ids = pad_sequence(ids, batch_first=True, padding_value=self.pad)
sids = pad_sequence(sids, batch_first=True, padding_value=self.pad)
tids = pad_sequence(tids, batch_first=True, padding_value=self.pad)
label = torch.LongTensor(label)
mask = generate_mask(ids)
ids, sids, tids, label, mask = to_cuda(ids, sids, tids, label, mask)
return {
'ids': ids,
'sids': sids,
'tids': tids,
'label': label,
'mask': mask,
}
else:
# test or valid set
assert len(batch) == 1
return {
'context': batch[0][0],
'responses': batch[0][1],
'label': batch[0][2],
}
class BERTFTCompTokenDataset(Dataset):
def __init__(self, vocab, path, **args):
self.args = args
self.vocab = vocab
self.vocab.add_tokens(['[EOS]'])
self.pad = self.vocab.convert_tokens_to_ids('[PAD]')
self.sep = self.vocab.convert_tokens_to_ids('[SEP]')
self.eos = self.vocab.convert_tokens_to_ids('[EOS]')
self.cls = self.vocab.convert_tokens_to_ids('[CLS]')
self.topk = args['gray_cand_num']
suffix = args['tokenizer'].replace('/', '_')
self.pp_path = f'{os.path.splitext(path)[0]}_ft_comp_token_{suffix}.pt'
if os.path.exists(self.pp_path):
if self.args['mode'] == 'train':
self.data, self.responses = torch.load(self.pp_path)
else:
self.data = torch.load(self.pp_path)
print(f'[!] load preprocessed file from {self.pp_path}')
return None
self.data = []
if self.args['mode'] == 'train':
path = f'{os.path.split(path)[0]}/train_bm25_gray.txt'
data = read_bm25_hard_negative(path)
responses, response_overlap = [], set()
for item in tqdm(data):
context, response, candidates = item['q'], item['r'], item['nr']
ids = self.vocab.batch_encode_plus(context + [response], add_special_tokens=False)['input_ids']
cids = []
sids, cache = [], 0
for u in ids[:-1]:
cids.extend(u + [self.eos])
sids.extend([cache] * (len(u) + 1))
cache = 1 if cache == 0 else 0
sids.pop()
cids.pop()
if len(cids) == 0:
continue
rids = ids[-1]
responses.append(rids)
if response not in response_overlap:
responses.append(rids)
response_overlap.add(response)
self.data.append({
'context': cids,
'sids': sids,
'response': rids,
'candidates': candidates,
})
self.responses = responses
else:
data = read_text_data_utterances(path, lang=self.args['lang'])
for i in tqdm(range(0, len(data), 10)):
batch = data[i:i+10]
responses = [b[1][-1] for b in batch]
context = batch[0][1][:-1]
self.data.append({
'label': [b[0] for b in batch],
'context': context,
'responses': responses,
})
def __len__(self):
return len(self.data)
def _packup(self, cids, sids, rids1, rids2, label1, label2):
cids_, sids_, rids1_, rids2_ = deepcopy(cids), deepcopy(sids), deepcopy(rids1), deepcopy(rids2)
truncate_pair_two_candidates(
cids_, rids1_, rids2_,
self.args['max_len'],
sids=sids_,
)
other_speaker = 0 if sids_[-1] == 1 else 1
cids__ = [self.cls] + cids_ + [self.sep] + [1] + rids1_ + [self.sep] + [2] + rids2_ + [self.sep]
sids__ = [sids_[0]] + sids_ + [sids_[-1]] + [other_speaker] * (len(rids1_) + len(rids2_) + 4)
tids__ = [0] * (len(cids_) + 2) + [1] * (len(rids1_) + 2) + [0] * (len(rids2_) + 2)
tlids__ = [-100] * (len(cids_) + 2) + [label1] + [-100] * (len(rids1_) + 1) + [label2] + [-100] * (len(rids2_) + 1)
assert len(tids__) == len(sids__) == len(cids__) == len(tlids__)
return cids__, tids__, sids__, tlids__
def __getitem__(self, i):
bundle = self.data[i]
if self.args['mode'] == 'train':
cids, rids = bundle['context'], bundle['response']
speaker_ids = bundle['sids']
if self.args['no_hard_negative']:
hrids = random.sample(self.responses, self.topk)
else:
if self.topk > len(bundle['candidates']):
candidates = bundle['candidates']
if candidates:
hrids = self.vocab.batch_encode_plus(candidates, add_special_tokens=False)['input_ids']
else:
hrids = []
hrids += random.sample(self.responses, self.topk - len(candidates))
else:
candidates = random.sample(bundle['candidates'], self.topk)
hrids = self.vocab.batch_encode_plus(candidates, add_special_tokens=False)['input_ids']
ids, sids, tids, tlids = [], [], [], []
# positive vs. easy negative
for _ in range(self.topk):
e = random.choice(self.responses)
if random.random() > 0.5:
ids_, tids_, sids_, tlids_ = self._packup(cids, speaker_ids, rids, e, 1, 0)
else:
ids_, tids_, sids_, tlids_ = self._packup(cids, speaker_ids, e, rids, 0, 1)
ids.append(ids_)
sids.append(sids_)
tids.append(tids_)
tlids.append(tlids_)
# positive negatives vs. bm25 hard negative
for _ in range(self.topk):
h = random.choice(hrids)
if random.random() > 0.5:
ids_, tids_, sids_, tlids_ = self._packup(cids, speaker_ids, rids, h, 1, 0)
else:
ids_, tids_, sids_, tlids_ = self._packup(cids, speaker_ids, h, rids, 0, 1)
ids.append(ids_)
sids.append(sids_)
tids.append(tids_)
tlids.append(tlids_)
# easy neg vs. easy neg.
for _ in range(self.topk):
e1, e2 = random.sample(self.responses, 2)
ids_, tids_, sids_, tlids_ = self._packup(cids, speaker_ids, e1, e2, 0, 0)
ids.append(ids_)
sids.append(sids_)
tids.append(tids_)
tlids.append(tlids_)
# whole samples
ids = [torch.LongTensor(i) for i in ids]
sids = [torch.LongTensor(i) for i in sids]
tids = [torch.LongTensor(i) for i in tids]
tlids = [torch.LongTensor(i) for i in tlids]
return ids, sids, tids, tlids
else:
# test
return bundle['context'], bundle['responses'], bundle['label']
def save(self):
if self.args['mode'] == 'train':
data = torch.save((self.data, self.responses), self.pp_path)
else:
data = torch.save(self.data, self.pp_path)
print(f'[!] save preprocessed dataset into {self.pp_path}')
def collate(self, batch):
if self.args['mode'] == 'train':
ids, sids, tids, tlids = [], [], [], []
for b in batch:
ids.extend(b[0])
sids.extend(b[1])
tids.extend(b[2])
tlids.extend(b[3])
return {
'ids': ids,
'sids': sids,
'tids': | |
from django.test import TestCase
from tests.models import Invoice
from django_logic import Process, Transition
from django_logic.exceptions import TransitionNotAllowed
class User:
def __init__(self, is_allowed=True, is_staff=False):
self.is_allowed = is_allowed
self.is_staff = is_staff
def allowed(instance, user):
return user.is_allowed and not user.is_staff
def is_staff(instance, user):
return user.is_staff
def disallow(instance, user):
return False
def is_editable(instance):
return not instance.customer_received
def is_available(instance):
return instance.is_available
def not_available(instance):
return False
def disable_invoice(invoice: Invoice, *args, **kwargs):
invoice.is_available = False
invoice.customer_received = False
invoice.save()
def update_invoice(invoice, is_available, customer_received, *args, **kwargs):
invoice.is_available = is_available
invoice.customer_received = customer_received
invoice.save()
def enable_invoice(invoice: Invoice, *args, **kwargs):
invoice.is_available = True
invoice.save()
def fail_invoice(invoice: Invoice, *args, **kwargs):
raise Exception
class ValidateProcessTestCase(TestCase):
def setUp(self) -> None:
self.user = User()
def test_pure_process(self):
class MyProcess(Process):
pass
process = MyProcess(field_name='status', instance=Invoice(status='draft'))
self.assertTrue(process.is_valid())
self.assertTrue(process.is_valid(self.user))
def test_empty_permissions(self):
class MyProcess(Process):
permissions = []
self.assertTrue(MyProcess('state', instance=Invoice(status='draft')).is_valid())
self.assertTrue(MyProcess('state', instance=Invoice(status='draft')).is_valid(self.user))
def test_permissions_successfully(self):
class MyProcess(Process):
permissions = [allowed]
self.assertTrue(MyProcess('state', instance=Invoice(status='draft')).is_valid())
self.assertTrue(MyProcess('state', instance=Invoice(status='draft')).is_valid(self.user))
def test_permission_fail(self):
self.user.is_allowed = False
class MyProcess(Process):
permissions = [allowed]
process = MyProcess(field_name='status', instance=Invoice(status='draft'))
self.assertTrue(process.is_valid())
self.assertFalse(process.is_valid(self.user))
class AnotherProcess(Process):
permissions = [allowed, disallow]
process = AnotherProcess(field_name='status', instance=Invoice(status='draft'))
self.assertTrue(process.is_valid())
self.assertFalse(process.is_valid(self.user))
def test_empty_conditions(self):
class MyProcess(Process):
conditions = []
process = MyProcess(field_name='status', instance=Invoice(status='draft'))
self.assertTrue(process.is_valid(self.user))
def test_conditions_successfully(self):
class MyProcess(Process):
conditions = [is_editable]
process = MyProcess(field_name='status', instance=Invoice(status='draft'))
self.assertTrue(process.is_valid())
self.assertTrue(process.is_valid(self.user))
def test_conditions_fail(self):
class MyProcess(Process):
conditions = [not_available]
process = MyProcess(field_name='status', instance=Invoice(status='draft'))
self.assertFalse(process.is_valid())
self.assertFalse(process.is_valid(self.user))
class AnotherProcess(Process):
conditions = [is_editable]
instance = Invoice(status='draft')
instance.customer_received = True
process = AnotherProcess(field_name='status', instance=instance)
self.assertFalse(process.is_valid())
self.assertFalse(process.is_valid(self.user))
def test_permissions_and_conditions_successfully(self):
class MyProcess(Process):
permissions = [allowed]
conditions = [is_editable]
process = MyProcess(field_name='status', instance=Invoice(status='draft'))
self.assertTrue(process.is_valid())
self.assertTrue(process.is_valid(self.user))
def test_permissions_and_conditions_fail(self):
class MyProcess(Process):
permissions = [allowed, disallow]
conditions = [is_editable]
process = MyProcess(field_name='status', instance=Invoice(status='draft'))
self.assertTrue(process.is_valid())
self.assertFalse(process.is_valid(self.user))
class AnotherProcess(Process):
permissions = [allowed]
conditions = [is_editable, not_available]
process = AnotherProcess(field_name='status', instance=Invoice(status='draft'))
self.assertFalse(process.is_valid())
self.assertFalse(process.is_valid(self.user))
class FinalProcess(Process):
permissions = [allowed, disallow]
conditions = [is_editable, not_available]
process = FinalProcess(field_name='status', instance=Invoice(status='draft'))
self.assertFalse(process.is_valid())
self.assertFalse(process.is_valid(self.user))
def test_getattr_is_valid_name_and_transition(self):
class MyProcess(Process):
transitions = [Transition('is_valid', sources=['draft'], target='valid')]
invoice = Invoice.objects.create(status='draft')
process = MyProcess(instance=invoice, field_name='status')
# transition shouldn't be executed
self.assertTrue(process.is_valid())
invoice.refresh_from_db()
self.assertEqual(invoice.status, 'draft')
class GetAvailableActionsTestCase(TestCase):
def setUp(self) -> None:
self.user = User()
def test_get_actions_with_the_same_name(self):
transition1 = Transition('action', sources=['draft'], target='done')
transition2 = Transition('action', sources=['draft'], target='closed')
class ChildProcess(Process):
transitions = [transition1, transition2]
process = ChildProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(process.get_available_actions(), ['action'])
def test_get_sorted_list(self):
transition1 = Transition('cancel', sources=['draft'], target='done')
transition2 = Transition('action', sources=['draft'], target='closed')
transition3 = Transition('bulk_action', sources=['draft'], target='closed')
class ChildProcess(Process):
transitions = [transition1, transition2, transition3]
process = ChildProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(process.get_available_actions(), ['action', 'bulk_action', 'cancel'])
class GetAvailableTransitionsTestCase(TestCase):
def setUp(self) -> None:
self.user = User()
def test_pure_process(self):
class ChildProcess(Process):
pass
process = ChildProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions()), [])
def test_process(self):
transition1 = Transition('action', sources=['draft'], target='done')
transition2 = Transition('action', sources=['done'], target='closed')
class ChildProcess(Process):
transitions = [transition1, transition2]
process = ChildProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions()), [transition1])
process = ChildProcess(instance=Invoice.objects.create(status='done'), field_name='status')
self.assertEqual(list(process.get_available_transitions()), [transition2])
process = ChildProcess(instance=Invoice.objects.create(status='closed'), field_name='status')
self.assertEqual(list(process.get_available_transitions()), [])
def test_process_fail(self):
transition1 = Transition('action', sources=['draft'], target='done')
transition2 = Transition('action', sources=['done'], target='closed')
class ChildProcess(Process):
conditions = [not_available]
transitions = [transition1, transition2]
process = ChildProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions()), [])
process = ChildProcess(instance=Invoice.objects.create(status='done'), field_name='status')
self.assertEqual(list(process.get_available_transitions()), [])
process = ChildProcess(instance=Invoice.objects.create(status='closed'), field_name='status')
self.assertEqual(list(process.get_available_transitions()), [])
def test_conditions_and_permissions_successfully(self):
transition1 = Transition('action', sources=['draft'], target='done')
transition2 = Transition('action', sources=['done'], target='closed')
class ChildProcess(Process):
conditions = [is_editable]
permissions = [allowed]
transitions = [transition1, transition2]
process = ChildProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [transition1])
process = ChildProcess(instance=Invoice.objects.create(status='done'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [transition2])
process = ChildProcess(instance=Invoice.objects.create(status='closed'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [])
def test_conditions_and_permissions_fail(self):
transition1 = Transition('action', sources=['draft'], target='done')
transition2 = Transition('action', sources=['done'], target='closed')
class ChildProcess(Process):
conditions = [is_editable]
permissions = [disallow]
transitions = [transition1, transition2]
process = ChildProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [])
process = ChildProcess(instance=Invoice.objects.create(status='done'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [])
process = ChildProcess(instance=Invoice.objects.create(status='closed'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [])
def test_nested_process_permissions_successfully(self):
transition1 = Transition('action', sources=['draft'], target='done')
transition2 = Transition('action', sources=['done'], target='closed')
class ChildProcess(Process):
permissions = [allowed]
transitions = [transition1, transition2]
process = ChildProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [transition1])
class ParentProcess(Process):
permissions = [allowed]
nested_processes = (ChildProcess,)
process = ParentProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [transition1])
class GrandParentProcess(Process):
permissions = [allowed]
nested_processes = (ParentProcess,)
process = GrandParentProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [transition1])
def test_nested_process_permissions_fail(self):
transition1 = Transition('action', sources=['draft'], target='done')
transition2 = Transition('action', sources=['done'], target='closed')
class ChildProcess(Process):
permissions = [disallow]
transitions = [transition1, transition2]
process = ChildProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [])
class ParentProcess(Process):
permissions = [allowed]
nested_processes = (ChildProcess,)
process = ParentProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [])
class GrandParentProcess(Process):
permissions = [allowed]
nested_processes = (ParentProcess,)
process = GrandParentProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [])
def test_nested_process_conditions_successfully(self):
transition1 = Transition('action', sources=['draft'], target='done')
transition2 = Transition('action', sources=['done'], target='closed')
class ChildProcess(Process):
conditions = [is_editable]
transitions = [transition1, transition2]
process = ChildProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [transition1])
class ParentProcess(Process):
conditions = [is_editable]
nested_processes = (ChildProcess,)
process = ParentProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [transition1])
class GrandParentProcess(Process):
conditions = [is_editable]
nested_processes = (ParentProcess,)
process = GrandParentProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [transition1])
def test_nested_process_conditions_fail(self):
transition1 = Transition('action', sources=['draft'], target='done')
transition2 = Transition('action', sources=['done'], target='closed')
class ChildProcess(Process):
conditions = [is_editable]
transitions = [transition1, transition2]
process = ChildProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [transition1])
class ParentProcess(Process):
conditions = [not_available]
nested_processes = (ChildProcess,)
process = ParentProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [])
class GrandParentProcess(Process):
conditions = [is_editable]
nested_processes = (ParentProcess,)
process = GrandParentProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [])
def test_nested_process_successfully(self):
transition1 = Transition('action', sources=['draft'], target='done')
transition2 = Transition('action', sources=['done'], target='closed')
class ChildProcess(Process):
permissions = [allowed]
conditions = [is_editable]
transitions = [transition1, transition2]
process = ChildProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [transition1])
class ParentProcess(Process):
permissions = [allowed]
conditions = [is_editable]
nested_processes = (ChildProcess,)
process = ParentProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [transition1])
class GrandParentProcess(Process):
permissions = [allowed]
conditions = [is_editable]
nested_processes = (ParentProcess,)
process = GrandParentProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [transition1])
def test_nested_process_fail(self):
transition1 = Transition('action', sources=['draft'], target='done')
transition2 = Transition('action', sources=['done'], target='closed')
class ChildProcess(Process):
permissions = [allowed]
conditions = [is_editable]
transitions = [transition1, transition2]
process = ChildProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [transition1])
class ParentProcess(Process):
permissions = [allowed]
conditions = [is_editable]
nested_processes = (ChildProcess,)
process = ParentProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [transition1])
class GrandParentProcess(Process):
permissions = [allowed]
conditions = [not_available]
nested_processes = (ParentProcess,)
process = GrandParentProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
self.assertEqual(list(process.get_available_transitions(self.user)), [])
def test_nested_process_with_nested_transitions_successfully(self):
transition1 = Transition('action', sources=['draft'], target='done')
transition2 = Transition('action', sources=['done'], target='closed')
transition3 = Transition('action', sources=['draft'], target='approved')
transition4 = Transition('action', sources=['done'], target='closed')
transition5 = Transition('action', sources=['draft'], target='declined')
class ChildProcess(Process):
permissions = [allowed]
conditions = [is_editable]
transitions = [transition1, transition2]
class ParentProcess(Process):
permissions = [allowed]
conditions = [is_editable]
nested_processes = (ChildProcess,)
transitions = [transition3, transition4]
class GrandParentProcess(Process):
permissions = [allowed]
conditions = [is_editable]
nested_processes = (ParentProcess,)
transitions = [transition5]
process = GrandParentProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
for transition in process.get_available_transitions(self.user):
self.assertIn(transition, [transition1, transition3, transition5])
process = GrandParentProcess(instance=Invoice.objects.create(status='done'), field_name='status')
for transition in process.get_available_transitions(self.user):
self.assertIn(transition, [transition2, transition4])
def test_nested_process_with_nested_transitions_fail(self):
transition1 = Transition('action', sources=['draft'], target='done')
transition2 = Transition('action', sources=['done'], target='closed')
transition3 = Transition('action', sources=['draft'], target='approved')
transition4 = Transition('action', sources=['done'], target='closed')
transition5 = Transition('action', sources=['draft'], target='declined')
class ChildProcess(Process):
permissions = [disallow]
conditions = [is_editable]
transitions = [transition1, transition2]
class ParentProcess(Process):
permissions = [allowed]
conditions = [is_editable]
nested_processes = (ChildProcess,)
transitions = [transition3, transition4]
class GrandParentProcess(Process):
permissions = [allowed]
conditions = [is_editable]
nested_processes = (ParentProcess,)
transitions = [transition5]
process = GrandParentProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
for transition in process.get_available_transitions(self.user):
self.assertIn(transition, [transition3, transition5])
process = GrandParentProcess(instance=Invoice.objects.create(status='done'), field_name='status')
for transition in process.get_available_transitions(self.user):
self.assertIn(transition, [transition4])
def test_nested_process_with_nested_transitions_conditions_and_permissions_successfully(self):
transition1 = Transition('action', permissions=[allowed], conditions=[is_editable],
sources=['draft'],
target='done')
transition2 = Transition('action', permissions=[allowed], conditions=[is_editable],
sources=['done'],
target='closed')
transition3 = Transition('action',
permissions=[allowed],
conditions=[is_editable],
sources=['draft'],
target='approved')
transition4 = Transition('action',
permissions=[allowed],
conditions=[is_editable],
sources=['done'],
target='closed')
transition5 = Transition('action',
permissions=[allowed],
conditions=[is_editable],
sources=['draft'],
target='declined')
class ChildProcess(Process):
transitions = [transition1, transition2]
class ParentProcess(Process):
nested_processes = (ChildProcess,)
transitions = [transition3, transition4]
class GrandParentProcess(Process):
nested_processes = (ParentProcess,)
transitions = [transition5]
process = GrandParentProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
for transition in process.get_available_transitions(self.user):
self.assertIn(transition, [transition1, transition3, transition5])
process = GrandParentProcess(instance=Invoice.objects.create(status='done'), field_name='status')
for transition in process.get_available_transitions(self.user):
self.assertIn(transition, [transition2, transition4])
def test_nested_process_with_nested_transitions_conditions_and_permissions_fail(self):
transition1 = Transition('action',
permissions=[allowed],
conditions=[is_editable],
sources=['draft'],
target='done')
transition2 = Transition('action',
permissions=[disallow],
conditions=[is_editable],
sources=['done'],
target='closed')
transition3 = Transition('action',
permissions=[allowed],
conditions=[not_available],
sources=['draft'],
target='approved')
transition4 = Transition('action',
permissions=[allowed],
conditions=[is_editable],
sources=['done'],
target='closed')
transition5 = Transition('action',
permissions=[disallow],
conditions=[not_available],
sources=['draft'],
target='declined')
class ChildProcess(Process):
transitions = [transition1, transition2]
class ParentProcess(Process):
nested_processes = (ChildProcess,)
transitions = [transition3, transition4]
class GrandParentProcess(Process):
nested_processes = (ParentProcess,)
transitions = [transition5]
process = GrandParentProcess(instance=Invoice.objects.create(status='draft'), field_name='status')
for transition in process.get_available_transitions(self.user):
self.assertIn(transition, [transition1])
process = GrandParentProcess(instance=Invoice.objects.create(status='done'), field_name='status')
for transition in process.get_available_transitions(self.user):
self.assertIn(transition, [transition4])
def test_getattr_get_available_transition_name_and_transition(self):
class MyProcess(Process):
transitions = [Transition('get_available_transition', sources=['draft'], target='valid')]
invoice = Invoice.objects.create(status='draft')
process = MyProcess(instance=invoice, field_name='status')
# transition shouldn't be executed
self.assertEqual(list(process.get_available_transitions()), MyProcess.transitions)
invoice.refresh_from_db()
self.assertEqual(invoice.status, 'draft')
def test_get_non_existing_transition(self):
class MyProcess(Process):
transitions = [Transition('validate', sources=['draft'], target='valid')]
invoice = Invoice.objects.create(status='draft')
process = MyProcess(instance=invoice, field_name='status')
with self.assertRaises(TransitionNotAllowed):
process.test()
class ApplyTransitionTestCase(TestCase):
def | |
from the extension of the given filename.
>>> getFileType('test.cpp') == FILETYPE_CPP
True
>>> getFileType('path/file.h') == FILETYPE_CPP
True
>>> getFileType('test.dddoc') == FILETYPE_DDDOC
True
Args:
filename Filename to parse.
Returns:
One of {FILETYPE_CPP, FILETYPE_DDDOC, FILETYPE_OTHER}, depending
on the extension of filename.
"""
# Get file extension.
base, ext = os.path.splitext(filename)
if ext[1:] in CPP_EXTS:
return FILETYPE_CPP
elif ext[1:] in DDDOC_EXTS:
return FILETYPE_DDDOC
else:
return FILETYPE_OTHER
def _loadFile(self, filename):
"""Load the file with the given filename.
The line is then split into DDDoc entries, unwrapping entries that span
more than one line. Finally, the keys are expanded, and surrounding
whitespace is stripped.
"""
## print filename
# Load file contents, through a cache.
file_type = self._getFileType(filename)
if file_type == FILETYPE_CPP:
text = self._loadCPPFile(filename, self.cache)
elif file_type == FILETYPE_DDDOC:
text = self._loadDDDOCFile(filename, self.cache)
else:
raise Error("Unknown file type of file %s." % path)
text.append('.')
## print 'LOADING', filename
## print '\n'.join(text)
# Process all lines in the input, join lines that do not begin with a
# dot with the previous ones. This allows the wrapping of lines.
str = False
dddoc_entries = [] # [(path, filename, begin line no, end line no)]
line_no_begin, line_no_end = 1, 1
for line in text:
## if line and line != '.':
## print 'LINE', line
line_no_end += 1
if not line:
continue
if line[0] == '.':
if str is not False and str[0] == '.' and str != '.' and str.strip(): # Skip empty dummy lines.
dddoc_entries.append([str, filename, line_no_begin, line_no_end])
## print dddoc_entries[-1]
line_no_begin = line_no_end
str = line
if str == '.':
str = False
elif str:
if str[-1] != '\n':
str += '\n'
str += line
# Now, expand the keys of dddoc_entries, e.g. dddoc_entries[i][0].
# TODO(holtgrew): Consider escaping of keys here.
stack = []
stack_len_sum = 0
for entry in dddoc_entries:
## print 'ENTRY', entry
## print 'stack=%s' % (stack)
# Split out $key:$value of the entry and $the.$path.$elements from $key.
maybe_pair = splitKeys(entry[0].strip(), ':', 1)
if len(maybe_pair) == 2:
key, value = splitKeys(entry[0].strip(), ':', 1)
else:
key, value = entry[0].strip(), ''
path = splitKeys(key, '.')[1:]
# Count empty entries in the path.
## print ' ', path
empty_count = reduce(operator.add, [1 for x in path if not x], 0)
## print ' empty_count', empty_count
if empty_count <= len(stack):
stack = stack[:empty_count]
stack_len_sum = reduce(operator.add, list(map(len, stack)), 0)
stack.append(path[empty_count:])
stack_len_sum += len(stack[-1])
path = reduce(operator.add, stack, [])
# Remove any leading and trailing whitespace from value and compute
# updated begin and end line no.
line_count = len(value.splitlines())
value_no_leading = value.lstrip()
line_count2 = len(value_no_leading.splitlines())
line_no_begin = entry[2] + line_count - line_count2
value_no_trailing = value_no_leading.rstrip()
line_count3 = len(value_no_trailing.splitlines())
line_no_end = entry[3] - line_count2 + line_count3
# Store the DDDoc entry.
if path:
self.entries.append(DddocEntry(cleanPath(path), value_no_trailing, filename, line_no_begin, line_no_end))
new_entries = transformDddocEntry(self.entries[-1])
## if new_entries:
## print 'NEW ENTRIES', new_entries
self.entries += new_entries
## print self.entries[-1]
def run(self, search_path):
"""Call parseFile() on files.
All files below search_path will be searched that have file type
FILETYPE_CPP or FILETYPE_DOC as determined by getFileType().
Directories with names of IGNORED_DIRS are skipped.
Args:
search_path String, path to search files under.
"""
for root, dirs, files in os.walk(search_path):
# Parse all files.
for file in files:
if os.path.basename(file).startswith('.'):
continue # Skipp hidden files.
path = os.path.join(root, file)
if self._getFileType(path) in [FILETYPE_CPP, FILETYPE_DDDOC]:
self._loadFile(path)
# Exclude ignored diretories.
for ignored in IGNORED_DIRS:
if ignored in dirs:
dirs.remove(ignored)
class DddocTreeNode(object):
"""Represents one entry in the DddocTree.
Attrs:
tree The DddocTree that the node belongs to.
key The key of this child, last element of path.
path The full path to the child.
entry Range [beg, end) of DddocEntry that this node represents.
children dict with the children as key/value pairs.
texts Array of strings with the texts.
"""
def __init__(self, tree, key, path, entry, children={}):
self.tree = tree
self.key = key
self.path = path
self.entry = entry
self.children = children
self.texts = []
def text(self, spacer=' '):
return spacer.join(self.texts)
def __str__(self):
"""Returns dump for the whole tree in a user-readable manner."""
def _str(node, level=0, prefix=''):
space = ' ' * level
if prefix:
prefix = prefix + ' --> '
res = '%s %sDddocTreeNode(key=%s, texts=%s)' % (space, prefix, repr(node.key), repr(node.texts))
for k, child in node.children.items():
res += '\n' + _str(child, level + 1, k)
return res
return _str(self)
def dump(self, stream=sys.stdout):
"""Debug recursive dumping of a tree node."""
print(self, file=stream)
class DddocTree(object):
"""Tree with the information from the DDDoc contents.
Attrs:
entries The raw entries.
root The root DddocTreeNode.
glossary_nodes List of nodes that contain glossary entries. Built
in finalize().
"""
def __init__(self, entries):
self.entries = entries
#for e in self.entries:
# print e
self.root = DddocTreeNode(self, 'ROOT', [], (0, 0), self._buildSubtree([], 0, len(entries), 0))
self.cache = None
self.glossary_nodes = []
## self.root.dump()
## for entry in self.entries:
## print entry.path, entry.content
def _enableFindCache(self):
if self.cache is None:
self.cache = {}
def finalize(self):
"""Called after tree will not be modified any more.
Enables caching and builds some indices.
"""
self._enableFindCache()
print('Indexing Glossary Pages', file=sys.stderr)
if 'Page' in self.root.children:
for key, node in self.root.children['Page'].children.items():
if 'glossary' in node.children:
self.glossary_nodes.append(node.children['glossary'])
print(' Found Page.%s' % node.key, file=sys.stderr)
def _buildSubtree(self, path, begin_index, end_index, level):
# First, identify the entries belonging to each node (entry.path[i] are
# equal for i = level, inductively, also i <= level).
prev_key = None
prev_beg = None
subseqs = []
for i in range(begin_index, end_index):
if prev_key != self.entries[i].path[level]:
if prev_key != None:
subseqs.append((prev_beg, i))
prev_key = self.entries[i].path[level]
prev_beg = i
if prev_key != None and prev_beg != end_index: # Handle last.
subseqs.append((prev_beg, end_index))
# Now, subseqs contains a sequence of contiguous half-open intervals.
# Each contains the data for one tree node. There is a possibly empty
# sequence of leading entries with paths of length level + 1 containing
# the data for the current level node. The rest is for the level below.
result = {}
for (b, c) in subseqs:
assert b != c
# Split into entries for this and for next level: [a, b); [b, c).
a = b # [a, b) will be for this vertex.
while b < c and len(self.entries[b].path) == level + 1:
b += 1
# Compute node.
path = self.entries[a].path[:(level + 1)]
key = path[level]
node = DddocTreeNode(self, key, path, (a, b))
## print 'new node', key
for i in range(a, b):
if self.entries[i].content:
node.texts.append(self.entries[i].content)
# Compute subtree.
node.children = self._buildSubtree(path, b, c, level + 1)
result[key] = node
return result
def find(self, path):
"""Query tree for a DddocTreeNode.
The argument path can either be a dot-separated string or a list with
this information. If path is a string then one optional leading dot is
optional. Returns None if nothing could be found.
tree.find(['path', 'to', 'node'])
tree.find('path.to.node')
tree.find('.path.to.node')
"""
## print 'FIND(%s)' % repr(path)
# Try to retrieve from cache if there is a cache.
if not self.cache is None:
if not type(path) is str:
key = '.'.join(path)
else:
key = path
if key in self.cache:
return self.cache[key]
# Split path if is string, ignore leading dot if any.
if type(path) is str:
path = splitKeys(path, '.')
if path and path[0] == '':
path = path[1:]
# Now, query the tree.
def findRecurse(node, path):
"""Helper function that searches for the node with given path."""
if not path:
return node
if path[0] not in node.children:
return None
return findRecurse(node.children[path[0]], path[1:])
res = findRecurse(self.root, path)
if not self.cache is None:
self.cache['.'.join(path)] = res
return res
# Paths where the inline summary is moved into a .summary child. See
# documentation of processInlineSummaries() for details.
SUMMARY_PATHS = [
'*.*.param.*',
'*.*.returns',
'*.*.tag.*',
'*.*.value.*',
'*.*.returns.param.*', # TODO(holtgrew): Used for metafunctions, could be improved.
'Adaption.*',
'Class.*',
'Concept.*',
'Demo.*',
'Enum.*',
'Function.*',
'Macro.*',
'Memfunc.*',
'Metafunction.*',
'Shortcut.*',
'Spec.*',
'Tag.*',
]
# TODO(holtgrew): Also use for generateAutomaticReferences()
def _matchTreesInNode(tree, node, path, func, block_paths=[['globals']], level=0):
"""Calls func on | |
"_i" in first_layer or "_o" in first_layer:
mp_cost["Message Size"] += 0
else:
mp_cost["Message Size"] += first_node[first_layer]["ifmap"]
if "resnet" in spl["net_name"]:
end_name = None
if "resnet50" in spl["net_name"]:
start_name = 'conv2_1_a'
end_name = "conv5_3_c"
mp_cost["is_resnet"] = True
residual = []
for node, node_data in spl["mapping"].items():
for layer_name, layer_data in node_data.items():
if "conv1" in layer_name or "_c" in layer_name:
layer_full_name = layer_name
if "part" in layer_name:
layer_full_name = layer_name[:layer_name.index("part")-1]
if len(residual) == 0 or residual[-1][0] != layer_full_name:
residual += [[layer_full_name, layer_data["ofmap"], None, 0],] # [output_layer_name, output_Size, matching_input_layer_name, nodes_passed]
else:
residual[-1][1] += layer_data["ofmap"]
if ("_a" in layer_name or "fc1" in layer_name) and start_name not in layer_name:
layer_full_name = layer_name
if "part" in layer_name:
layer_full_name = layer_name[:layer_name.index("part")-1]
if residual[0][2] is not None and residual[0][2] != layer_full_name:
residual = residual[1:]
residual[0][2] = layer_full_name
mp_cost["Non-bypassing Residual Message Size"] += residual[0][3] * residual[0][1]
mp_cost["Bypassing Residual Message Size"] += residual[0][1] if residual[0][3] != 0 else 0
print(residual[0][0] + " passed to " + layer_name + " with res size of " + str(residual[0][1]) + " with " + str(residual[0][3]) + " nodes apart.")
for i in range(len(residual)):
if residual[i][2] is None or residual[i][3] < 1:
residual[i][3] += 1
# loop through all the nodes
for node, node_data in spl["mapping"].items():
if "dummy" in node:
continue
layers = spl["mapping"][node]
last_layer = max(layers, key=lambda k: layers[k]["Layer Number"])
first_layer = min(layers, key=lambda k: layers[k]["Layer Number"])
if first_layer[-2:] == '_i' or first_layer[-2:] == '_o':
mp_cost["Message Size"] += layers[first_layer]["ifmap"]
if "embed" in first_layer:
mp_cost["Message Size"] += 64
if "embed" in first_layer and "embed" not in last_layer and "part" in first_layer:
mp_cost["Message Size"] += 131072
if "embed" not in last_layer:
mp_cost["Message Size"] += layers[last_layer]["ofmap"]
mp_cost["Total Output Size"] += layers[last_layer]["ofmap"]
mp_cost["Outputs"] += [layers[last_layer]["ofmap"],]
mp_cost["Total Input Size"] += layers[first_layer]["ifmap"]
mp_cost["Inputs"] += [layers[first_layer]["ifmap"],]
if "embed" in first_layer and "embed" not in last_layer and "part" in first_layer:
mp_cost["Total Input Size"] += 131072
mp_cost["Inputs"][-1] += 131072
mp_cost["Total Message Size"] = mp_cost["Message Size"] + mp_cost["Bypassing Residual Message Size"]
mp_cost["Time Cost"] = (LATENCY * 1 + mp_cost["Total Message Size"] / float((BANDWIDTH * 1024**3) / 1e9)) / 1e9
mp_cost["Energy Cost"] = mp_cost["Total Message Size"] * E_PER_BYTE / 1e12
mp_cost["Extra Shutdown Energy Cost"] = mp_cost["Time Cost"] * LKG_PWR
return mp_cost
def preprocess_pipe_parallel_info(data):
info = {}
info["group_to_nodes"] = {}
info["node_to_group"] = {}
info["group_info"] = {}
num_nodes = data["num_nodes"]
node_data = data["node_data"]
last_layer_combination = []
max_node_time = 0
total_node_time = 0
last_total_time = 0
has_dummy = False
group_id = 0
for node_idx in range(1, num_nodes+1):
node_name = "node_" + str(node_idx)
dummy_name = "node_" + str(node_idx) + "_dummy"
if node_name not in node_data:
total_node_time += last_total_time
info["node_to_group"][node_name] = group_id
info["group_to_nodes"][group_id] += [node_name,]
if has_dummy:
info["node_to_group"][dummy_name] = group_id
info["group_to_nodes"][group_id] += [dummy_name,]
continue
last_total_time = 0
current_layer_combination = []
for layer, layer_data in node_data[node_name].items():
if layer == "Total":
continue
layer_name = layer
if "part" in layer:
index = layer.find("part")
layer_name = layer[:index-1]
current_layer_combination += [layer_name,]
if dummy_name in node_data:
has_dummy = True
for layer, layer_data in node_data[dummy_name].items():
if layer == "Total":
continue
layer_name = layer
if "part" in layer:
index = layer.find("part")
layer_name = layer[:index-1]
current_layer_combination += [layer_name,]
if current_layer_combination != last_layer_combination:
if group_id != 0:
info["group_info"][group_id] = {}
info["group_info"][group_id]["max_time"] = max_node_time
max_node_time = 0
info["group_info"][group_id]["sum_time"] = total_node_time
total_node_time = 0
last_layer_combination = current_layer_combination
group_id += 1
if group_id not in info["group_to_nodes"] and group_id != 0:
info["group_to_nodes"][group_id] = []
info["group_to_nodes"][group_id] += [node_name,]
if dummy_name in node_data:
info["group_to_nodes"][group_id] += [dummy_name,]
max_node_time = max(node_data[node_name]["Total"]["Total time"], max_node_time)
if dummy_name in node_data:
max_node_time = max(node_data[dummy_name]["Total"]["Total time"] + node_data[node_name]["Total"]["Total time"], max_node_time)
total_node_time += node_data[node_name]["Total"]["Total time"]
last_total_time += node_data[node_name]["Total"]["Total time"]
if dummy_name in node_data:
total_node_time += node_data[dummy_name]["Total"]["Total time"]
last_total_time += node_data[dummy_name]["Total"]["Total time"]
info["node_to_group"][node_name] = group_id
if dummy_name in node_data:
info["node_to_group"][dummy_name] = group_id
info["group_info"][group_id] = {}
info["group_info"][group_id]["max_time"] = max_node_time
info["group_info"][group_id]["sum_time"] = total_node_time
return info
def process_net_pipe_cost(data, group_info):
pipe_cost = {}
pipe_cost["Per Cycle Energy"] = 0.0
group_data = group_info["group_info"]
max_group = max(group_data, key = lambda k : group_data[k]["sum_time"])
max_group_time = group_data[max_group]["sum_time"]
pipe_cost["time"] = max_group_time
pipe_cost["throughput"] = 1.0 / max_group_time
last_seen_node = None
for node_idx in range(1, data["num_nodes"]+1):
node_name = "node_" + str(node_idx)
if node_name not in data["node_data"]:
node_name = last_seen_node
else:
last_seen_node = node_name
node_data = data["node_data"][node_name]
pipe_cost["Per Cycle Energy"] += node_data["Total"]["Total Energy"] + (max_group_time - node_data["Total"]["Total time"]) * LKG_PWR
dummy_name = node_name + "_dummy"
if dummy_name in data["node_data"]:
dummy_data = data["node_data"][dummy_name]
pipe_cost["Per Cycle Energy"] += dummy_data["Total"]["Total Energy"]
return pipe_cost
def process_net_parallel_cost(data, group_info):
para_cost = {}
para_cost["Total Energy"] = 0.0
group_data = group_info["group_info"]
total_time = 0.0
for group, gdata in group_data.items():
total_time += gdata["max_time"]
para_cost["time"] = total_time
para_cost["throughput"] = 1.0 / total_time
last_seen_node = None
for node_idx in range(1, data["num_nodes"]+1):
node_name = "node_" + str(node_idx)
if node_name not in data["node_data"]:
node_name = last_seen_node
else:
last_seen_node = node_name
node_data = data["node_data"][node_name]
para_cost["Total Energy"] += node_data["Total"]["Total Energy"] + (total_time - node_data["Total"]["Total time"]) * LKG_PWR
dummy_name = node_name + "_dummy"
if dummy_name in data["node_data"]:
dummy_data = data["node_data"][dummy_name]
para_cost["Total Energy"] += dummy_data["Total"]["Total Energy"]
return para_cost
def process_net_pipe_parallel_cost(data, group_info):
pipepara_cost = {}
pipepara_cost["Per Cycle Energy"] = 0.0
group_data = group_info["group_info"]
max_group = max(group_data, key = lambda k : group_data[k]["max_time"])
max_group_time = group_data[max_group]["max_time"]
pipepara_cost["time"] = max_group_time
pipepara_cost["throughput"] = 1.0 / max_group_time
last_seen_node = None
for node_idx in range(1, data["num_nodes"]+1):
node_name = "node_" + str(node_idx)
if node_name not in data["node_data"]:
node_name = last_seen_node
else:
last_seen_node = node_name
node_data = data["node_data"][node_name]
pipepara_cost["Per Cycle Energy"] += node_data["Total"]["Total Energy"] + (max_group_time - node_data["Total"]["Total time"]) * LKG_PWR
dummy_name = node_name + "_dummy"
if dummy_name in data["node_data"]:
dummy_data = data["node_data"][dummy_name]
pipepara_cost["Per Cycle Energy"] += dummy_data["Total"]["Total Energy"]
return pipepara_cost
def analyze():
e_cmp = []
t_cmp = []
t_lstm = {}
e_lstm = {}
e_stats = {}
msg = []
num_chips = []
for net, word, batch in itertools.product(NETS, WORDS, BATCHES):
for split in SPLIT:
#outfilename = os.path.join(OUTPUT_DIR, "_".join(net, str(word), str(batch)) + ".csv")
#fo = open(outfilename, 'a')
# load the table containing all node info
print("="*30)
print("Analyzing " + net + " with (word, batchsize, memsize) = " + str((word, batch, split)))
# #temp
# spl = extract_split(net, word, batch, split)
# mp_cost = process_net_mp_cost(spl)
# print("Message Passing Cost: ")
# print("Total Message Size: ", mp_cost["Total Message Size"])
# print("Total Input Size: ", mp_cost["Total Input Size"])
# print("Total Output Size: ", mp_cost["Total Output Size"])
# print("Time: ", mp_cost["Time Cost"])
# print("Energy: ", mp_cost["Energy Cost"])
#
#
# continue
res = extract_table(net, word, batch, split)
spl = extract_split(net, word, batch, split)
#print(json.dumps(res, indent=2))
t_stats = process_net_time_stats(res)
print("Time Cost: ", json.dumps(t_stats, indent=2))
for entry in t_stats:
if entry not in t_lstm:
t_lstm[entry] = t_stats[entry]
else:
t_lstm[entry] += t_stats[entry]
e_stats = process_net_energy_stats(res, t_stats)
print("Energy Cost: ", json.dumps(e_stats, indent=2))
for entry in e_stats:
if entry not in e_lstm:
e_lstm[entry] = e_stats[entry]
else:
e_lstm[entry] += e_stats[entry]
m_stats = process_net_mem_stats(res)
mp_cost = process_net_mp_cost(spl)
print("Message Passing Cost: ", json.dumps(mp_cost, indent=2))
msg += [mp_cost["Total Message Size"],]
if len(e_cmp) != len(SPLIT):
e_cmp += [e_stats["System-wide Total Energy"] + mp_cost["Energy Cost"] + mp_cost["Extra Shutdown Energy Cost"]]
else:
e_cmp[SPLIT.index(split)] += e_stats["System-wide Total Energy"] + mp_cost["Energy Cost"] + mp_cost["Extra Shutdown Energy Cost"]
if len(t_cmp) != len(SPLIT):
t_cmp += [t_stats["Total time"] + mp_cost["Time Cost"]]
else:
t_cmp[SPLIT.index(split)] += t_stats["Total time"] + mp_cost["Time Cost"]
group_info = preprocess_pipe_parallel_info(res)
#print("Group Info:", json.dumps(group_info, indent=2))
num_chips += [len(group_info["node_to_group"])]
pipe_info = process_net_pipe_cost(res, group_info)
print("Pipe Cost: ", json.dumps(pipe_info, indent=2))
para_info = process_net_parallel_cost(res, group_info)
print("Parallel Cost: ", json.dumps(para_info, indent=2))
pipepara_info = process_net_pipe_parallel_cost(res, group_info)
print("PipeParallel Cost: ", json.dumps(pipepara_info, indent=2))
print("========================================================")
print("Performance Comparison (automatically aggregated for split LSTM workloads if all parts are declared in the NETS variable):")
print("Energy Comparison: ")
print(e_cmp)
print("Relative Energy Coefficient (divided by the energy used by the last entry in the SPLIT list)")
print(np.array(e_cmp) / e_cmp[-1])
print("Time Comparison: ")
print(t_cmp)
print("Relative Time Coefficient (divided by the time used by the last entry in the SPLIT list)")
print(np.array(t_cmp) / t_cmp[-1])
return e_cmp, t_cmp, msg, num_chips, t_lstm, e_lstm
def main():
# global BANDWIDTH
#
# res = []
#
# for i in range(11):
# BANDWIDTH = 32 / float(2 ** i)
# cmp = analyze()
# res.append(cmp)
| |
in text
)
def justify(
self, texts: Iterable[str], max_len: int, mode: str = "right"
) -> List[str]:
# re-calculate padding space per str considering East Asian Width
def _get_pad(t):
return max_len - self.len(t) + len(t)
if mode == "left":
return [x.ljust(_get_pad(x)) for x in texts]
elif mode == "center":
return [x.center(_get_pad(x)) for x in texts]
else:
return [x.rjust(_get_pad(x)) for x in texts]
def _get_adjustment() -> TextAdjustment:
use_east_asian_width = get_option("display.unicode.east_asian_width")
if use_east_asian_width:
return EastAsianTextAdjustment()
else:
return TextAdjustment()
class TableFormatter:
show_dimensions: Union[bool, str]
is_truncated: bool
formatters: formatters_type
columns: Index
@property
def should_show_dimensions(self) -> bool:
return self.show_dimensions is True or (
self.show_dimensions == "truncate" and self.is_truncated
)
def _get_formatter(self, i: Union[str, int]) -> Optional[Callable]:
if isinstance(self.formatters, (list, tuple)):
if is_integer(i):
i = cast(int, i)
return self.formatters[i]
else:
return None
else:
if is_integer(i) and i not in self.columns:
i = self.columns[i]
return self.formatters.get(i, None)
@contextmanager
def get_buffer(
self, buf: Optional[FilePathOrBuffer[str]], encoding: Optional[str] = None
):
"""
Context manager to open, yield and close buffer for filenames or Path-like
objects, otherwise yield buf unchanged.
"""
if buf is not None:
buf = stringify_path(buf)
else:
buf = StringIO()
if encoding is None:
encoding = "utf-8"
elif not isinstance(buf, str):
raise ValueError("buf is not a file name and encoding is specified.")
if hasattr(buf, "write"):
yield buf
elif isinstance(buf, str):
with open(buf, "w", encoding=encoding, newline="") as f:
# GH#30034 open instead of codecs.open prevents a file leak
# if we have an invalid encoding argument.
# newline="" is needed to roundtrip correctly on
# windows test_to_latex_filename
yield f
else:
raise TypeError("buf is not a file name and it has no write method")
def write_result(self, buf: IO[str]) -> None:
"""
Write the result of serialization to buf.
"""
raise AbstractMethodError(self)
def get_result(
self,
buf: Optional[FilePathOrBuffer[str]] = None,
encoding: Optional[str] = None,
) -> Optional[str]:
"""
Perform serialization. Write to buf or return as string if buf is None.
"""
with self.get_buffer(buf, encoding=encoding) as f:
self.write_result(buf=f)
if buf is None:
return f.getvalue()
return None
class DataFrameFormatter(TableFormatter):
"""
Render a DataFrame
self.to_string() : console-friendly tabular output
self.to_html() : html table
self.to_latex() : LaTeX tabular environment table
"""
__doc__ = __doc__ if __doc__ else ""
__doc__ += common_docstring + return_docstring
def __init__(
self,
frame: "DataFrame",
columns: Optional[Sequence[str]] = None,
col_space: Optional[Union[str, int]] = None,
header: Union[bool, Sequence[str]] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[formatters_type] = None,
justify: Optional[str] = None,
float_format: Optional[float_format_type] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
line_width: Optional[int] = None,
max_rows: Optional[int] = None,
min_rows: Optional[int] = None,
max_cols: Optional[int] = None,
show_dimensions: Union[bool, str] = False,
decimal: str = ".",
table_id: Optional[str] = None,
render_links: bool = False,
bold_rows: bool = False,
escape: bool = True,
):
self.frame = frame
self.show_index_names = index_names
if sparsify is None:
sparsify = get_option("display.multi_sparse")
self.sparsify = sparsify
self.float_format = float_format
if formatters is None:
self.formatters = {}
elif len(frame.columns) == len(formatters) or isinstance(formatters, dict):
self.formatters = formatters
else:
raise ValueError(
(
"Formatters length({flen}) should match "
"DataFrame number of columns({dlen})"
).format(flen=len(formatters), dlen=len(frame.columns))
)
self.na_rep = na_rep
self.decimal = decimal
self.col_space = col_space
self.header = header
self.index = index
self.line_width = line_width
self.max_rows = max_rows
self.min_rows = min_rows
self.max_cols = max_cols
self.max_rows_displayed = min(max_rows or len(self.frame), len(self.frame))
self.show_dimensions = show_dimensions
self.table_id = table_id
self.render_links = render_links
if justify is None:
self.justify = get_option("display.colheader_justify")
else:
self.justify = justify
self.bold_rows = bold_rows
self.escape = escape
if columns is not None:
self.columns = ensure_index(columns)
self.frame = self.frame[self.columns]
else:
self.columns = frame.columns
self._chk_truncate()
self.adj = _get_adjustment()
def _chk_truncate(self) -> None:
"""
Checks whether the frame should be truncated. If so, slices
the frame up.
"""
from pandas.core.reshape.concat import concat
# Cut the data to the information actually printed
max_cols = self.max_cols
max_rows = self.max_rows
self.max_rows_adj: Optional[int]
max_rows_adj: Optional[int]
if max_cols == 0 or max_rows == 0: # assume we are in the terminal
(w, h) = get_terminal_size()
self.w = w
self.h = h
if self.max_rows == 0:
dot_row = 1
prompt_row = 1
if self.show_dimensions:
show_dimension_rows = 3
# assume we only get here if self.header is boolean.
# i.e. not to_latex() where self.header may be List[str]
self.header = cast(bool, self.header)
n_add_rows = self.header + dot_row + show_dimension_rows + prompt_row
# rows available to fill with actual data
max_rows_adj = self.h - n_add_rows
self.max_rows_adj = max_rows_adj
# Format only rows and columns that could potentially fit the
# screen
if max_cols == 0 and len(self.frame.columns) > w:
max_cols = w
if max_rows == 0 and len(self.frame) > h:
max_rows = h
if not hasattr(self, "max_rows_adj"):
if max_rows:
if (len(self.frame) > max_rows) and self.min_rows:
# if truncated, set max_rows showed to min_rows
max_rows = min(self.min_rows, max_rows)
self.max_rows_adj = max_rows
if not hasattr(self, "max_cols_adj"):
self.max_cols_adj = max_cols
max_cols_adj = self.max_cols_adj
max_rows_adj = self.max_rows_adj
truncate_h = max_cols_adj and (len(self.columns) > max_cols_adj)
truncate_v = max_rows_adj and (len(self.frame) > max_rows_adj)
frame = self.frame
if truncate_h:
# cast here since if truncate_h is True, max_cols_adj is not None
max_cols_adj = cast(int, max_cols_adj)
if max_cols_adj == 0:
col_num = len(frame.columns)
elif max_cols_adj == 1:
max_cols = cast(int, max_cols)
frame = frame.iloc[:, :max_cols]
col_num = max_cols
else:
col_num = max_cols_adj // 2
frame = concat(
(frame.iloc[:, :col_num], frame.iloc[:, -col_num:]), axis=1
)
# truncate formatter
if isinstance(self.formatters, (list, tuple)):
truncate_fmt = self.formatters
self.formatters = [
*truncate_fmt[:col_num],
*truncate_fmt[-col_num:],
]
self.tr_col_num = col_num
if truncate_v:
# cast here since if truncate_v is True, max_rows_adj is not None
max_rows_adj = cast(int, max_rows_adj)
if max_rows_adj == 1:
row_num = max_rows
frame = frame.iloc[:max_rows, :]
else:
row_num = max_rows_adj // 2
frame = concat((frame.iloc[:row_num, :], frame.iloc[-row_num:, :]))
self.tr_row_num = row_num
else:
self.tr_row_num = None
self.tr_frame = frame
self.truncate_h = truncate_h
self.truncate_v = truncate_v
self.is_truncated = bool(self.truncate_h or self.truncate_v)
def _to_str_columns(self) -> List[List[str]]:
"""
Render a DataFrame to a list of columns (as lists of strings).
"""
# this method is not used by to_html where self.col_space
# could be a string so safe to cast
self.col_space = cast(int, self.col_space)
frame = self.tr_frame
# may include levels names also
str_index = self._get_formatted_index(frame)
if not is_list_like(self.header) and not self.header:
stringified = []
for i, c in enumerate(frame):
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(
fmt_values,
self.justify,
minimum=(self.col_space or 0),
adj=self.adj,
)
stringified.append(fmt_values)
else:
if is_list_like(self.header):
# cast here since can't be bool if is_list_like
self.header = cast(List[str], self.header)
if len(self.header) != len(self.columns):
raise ValueError(
(
"Writing {ncols} cols but got {nalias} "
"aliases".format(
ncols=len(self.columns), nalias=len(self.header)
)
)
)
str_columns = [[label] for label in self.header]
else:
str_columns = self._get_formatted_column_labels(frame)
if self.show_row_idx_names:
for x in str_columns:
x.append("")
stringified = []
for i, c in enumerate(frame):
cheader = str_columns[i]
header_colwidth = max(
self.col_space or 0, *(self.adj.len(x) for x in cheader)
)
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(
fmt_values, self.justify, minimum=header_colwidth, adj=self.adj
)
max_len = max(max(self.adj.len(x) for x in fmt_values), header_colwidth)
cheader = self.adj.justify(cheader, max_len, mode=self.justify)
stringified.append(cheader + fmt_values)
strcols = stringified
if self.index:
strcols.insert(0, str_index)
# Add ... to signal truncated
truncate_h = self.truncate_h
truncate_v = self.truncate_v
if truncate_h:
col_num = self.tr_col_num
strcols.insert(self.tr_col_num + 1, [" ..."] * (len(str_index)))
if truncate_v:
n_header_rows = len(str_index) - len(frame)
row_num = self.tr_row_num
# cast here since if truncate_v is True, self.tr_row_num is not None
row_num = cast(int, row_num)
for ix, col in enumerate(strcols):
# infer from above row
cwidth = self.adj.len(strcols[ix][row_num])
is_dot_col = False
if truncate_h:
is_dot_col = ix == col_num + 1
if cwidth > 3 or is_dot_col:
my_str = "..."
else:
my_str = ".."
if ix == 0:
dot_mode = "left"
elif is_dot_col:
cwidth = 4
dot_mode = "right"
else:
dot_mode = "right"
dot_str = self.adj.justify([my_str], cwidth, mode=dot_mode)[0]
strcols[ix].insert(row_num + n_header_rows, dot_str)
return strcols
def write_result(self, buf: IO[str]) -> None:
"""
Render a DataFrame to a console-friendly tabular output.
"""
from pandas import Series
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
info_line = "Empty {name}\nColumns: {col}\nIndex: {idx}".format(
name=type(self.frame).__name__,
| |
<reponame>larrycameron80/libtaxii<filename>libtaxii/common.py<gh_stars>0
"""
Common utility classes and functions used throughout libtaxii.
"""
from operator import attrgetter
import random
import re
import sys
from uuid import uuid4
import dateutil.parser
from lxml import etree
import six
try:
import simplejson as json
except ImportError:
import json
from libtaxii.constants import *
_XML_PARSER = None
def parse(s):
"""
Uses the default parser to parse a string or file-like object
:param s: The XML String or File-like object to parse
:return: an etree._Element
"""
try:
e = etree.parse(s, get_xml_parser()).getroot()
except IOError:
e = etree.XML(s, get_xml_parser())
return e
def parse_xml_string(xmlstr):
"""Parse an XML string (binary or unicode) with the default parser.
:param xmlstr: An XML String to parse
:return: an etree._Element
"""
if isinstance(xmlstr, six.binary_type):
xmlstr = six.BytesIO(xmlstr)
elif isinstance(xmlstr, six.text_type):
# LXML doesn't accept Unicode strings with an explicit encoding, so
# try to detect and encode to bytes before passing to LXML.
encoding = re.findall(r'encoding="([0-9A-Za-z_\-]+)"', xmlstr[:50], re.I)
# re.findall returns a list of matching strings. We only care about the
# first one.
if encoding:
xmlstr = six.BytesIO(xmlstr.encode(encoding[0]))
else:
xmlstr = six.StringIO(xmlstr)
return parse(xmlstr)
def get_xml_parser():
"""Return the XML parser currently in use.
If one has not already been set (via :py:func:`set_xml_parser()`), a new
``etree.XMLParser`` is constructed with ``no_network=True`` and
``huge_tree=False``.
"""
global _XML_PARSER
if _XML_PARSER is None:
_XML_PARSER = etree.XMLParser(attribute_defaults=False,
dtd_validation=False,
load_dtd=False,
no_network=True,
ns_clean=True,
recover=False,
remove_blank_text=False,
remove_comments=False,
remove_pis=False,
strip_cdata=True,
compact=True,
# collect_ids=True,
resolve_entities=False,
huge_tree=False)
return _XML_PARSER
def set_xml_parser(xml_parser=None):
"""Set the libtaxii.messages XML parser.
Args:
xml_parser (etree.XMLParser): The parser to use to parse TAXII XML.
"""
global _XML_PARSER
_XML_PARSER = xml_parser
def parse_datetime_string(datetime_string):
"""Parse a string into a :py:class:`datetime.datetime`.
libtaxii users should not need to use this function directly.
"""
if not datetime_string:
return None
return dateutil.parser.parse(datetime_string)
def generate_message_id(maxlen=5, version=VID_TAXII_SERVICES_10):
"""Generate a TAXII Message ID.
Args:
maxlen (int): maximum length of the ID, in characters
Example:
.. code-block:: python
msg_id = tm11.generate_message_id()
message = tm11.DiscoveryRequest(msg_id)
# Or...
message = tm11.DiscoveryRequest(tm11.generate_message_id())
"""
if version == VID_TAXII_SERVICES_10:
message_id = str(uuid4().int % sys.maxsize)
elif version == VID_TAXII_SERVICES_11:
message_id = str(uuid4())
else:
raise ValueError('Unknown TAXII Version: %s. Must be a TAXII Services Version ID!' % version)
return message_id
def append_any_content_etree(etree_elt, content):
"""
General method for adding content to an etree element. This method can handle:
* etree._ElementTree
* etree._Element
* any python type that can be cast to str
* str
:param etree_elt: The etree to append the content to
:param content: The content to append
:return: The etree_elt
"""
if isinstance(content, etree._ElementTree): # If content is an element tree, append the root element
etree_elt.append(content.getroot())
return etree_elt
if isinstance(content, etree._Element): # If content is an element, append it
etree_elt.append(content)
return etree_elt
if not isinstance(content, six.string_types): # If content is a non-string, cast it to string and set etree_elt.text
etree_elt.text = str(content)
return etree_elt
# If content is a string, need to check if it's XML or not
try:
etree_elt.append(etree.XML(content, get_xml_parser()))
except etree.XMLSyntaxError:
etree_elt.text = content
return etree_elt
def gen_filename(collection_name, format_part, date_string, extension):
"""
Creates a filename based on various properties of a Poll Request and Content Block
:param collection_name: The collection name
:param format_part: The format part (e.g., '_STIX_10_')
:param date_string: A datestring
:param extension: The file extension to use
:return: A string containing the generated filename
"""
if six.PY3:
return (collection_name.lstrip(".") +
format_part +
re.sub(r"[^a-zA-Z0-9]", "_", date_string) + extension
).translate('/\\:*?"<>|')
else:
return (collection_name.lstrip(".") +
format_part +
re.sub(r"[^a-zA-Z0-9]", "_", date_string) + extension
).translate(None, '/\\:*?"<>|')
class TAXIIBase(object):
"""
Base class for all TAXII Messages and Message component types.
libtaxii users should not need to use this class directly.
"""
@property
def sort_key(self):
"""
This property allows list of TAXII objects to be compared efficiently.
The __eq__ method uses this property to sort the lists before
comparisons are made.
Subclasses must implement this property.
"""
raise NotImplementedError()
def to_etree(self):
"""Create an etree representation of this class.
Subclasses must implement this method.
"""
raise NotImplementedError()
def to_dict(self):
"""Create a dictionary representation of this class.
Subclasses must implement this method.
"""
raise NotImplementedError()
def to_json(self):
"""Create a JSON object of this class.
Assumes any binary content will be UTF-8 encoded.
"""
content_dict = self.to_dict()
_decode_binary_fields(content_dict)
return json.dumps(content_dict)
def to_xml(self, pretty_print=False):
"""Create an XML representation of this class.
Subclasses should not need to implement this method.
"""
return etree.tostring(self.to_etree(), pretty_print=pretty_print)
def to_text(self, line_prepend=''):
"""Create a nice looking (this is a subjective term!)
textual representation of this class. Subclasses should
implement this method.
Note that this is just a convenience method for making
TAXII Messages nice to read for humans and may change
drastically in future versions of libtaxii.
"""
raise NotImplementedError()
@classmethod
def from_etree(cls, src_etree):
"""Create an instance of this class from an etree.
Subclasses must implement this method.
"""
raise NotImplementedError()
@classmethod
def from_dict(cls, d):
"""Create an instance of this class from a dictionary.
Subclasses must implement this method.
"""
raise NotImplementedError()
@classmethod
def from_xml(cls, xml):
"""Create an instance of this class from XML.
Subclasses should not need to implement this method.
"""
etree_xml = parse_xml_string(xml)
return cls.from_etree(etree_xml)
# Just noting that there is not a from_text() method. I also
# don't think there will ever be one.
def __str__(self):
return self.to_xml(pretty_print=True)
def __eq__(self, other, debug=False):
"""
Generic method used to check equality of objects of any TAXII type.
Also allows for ``print``-based debugging output showing differences.
In order for subclasses to use this function, they must meet the
following criteria:
1. All class properties start with one underscore.
2. The sort_key property is implemented.
Args:
self (object): this object
other (object): the object to compare ``self`` against.
debug (bool): Whether or not to print debug statements as the
equality comparison is performed.
"""
if other is None:
if debug:
print('other was None!')
return False
if self.__class__.__name__ != other.__class__.__name__:
if debug:
print('class names not equal: %s != %s' % (self.__class__.__name__, other.__class__.__name__))
return False
# Get all member properties that start with '_'
members = [attr for attr in vars(self) if attr.startswith('_') and not attr.startswith('__')]
for member in members:
if debug:
print('member name: %s' % member)
self_value = getattr(self, member)
other_value = getattr(other, member)
if isinstance(self_value, TAXIIBase):
# A debuggable equals comparison can be made
eq = self_value.__eq__(other_value, debug)
elif isinstance(self_value, list):
# We have lists to compare
if len(self_value) != len(other_value):
# Lengths not equal
member = member + ' lengths'
self_value = len(self_value)
other_value = len(other_value)
eq = False
elif len(self_value) == 0:
# Both lists are of size 0, and therefore equal
eq = True
else:
# Equal sized, non-0 length lists. The list might contain
# TAXIIBase objects, or it might not. Peek at the first
# item to see whether it is a TAXIIBase object or not.
if isinstance(self_value[0], TAXIIBase):
# All TAXIIBase objects have the 'sort_key' property implemented
self_value = sorted(self_value, key=attrgetter('sort_key'))
other_value = sorted(other_value, key=attrgetter('sort_key'))
for self_item, other_item in six.moves.zip(self_value, other_value):
# Compare the ordered lists element by element
eq = self_item.__eq__(other_item, debug)
else:
# Assume they don't... just do a set comparison
eq = set(self_value) == set(other_value)
elif isinstance(self_value, dict):
# Dictionary to compare
if len(set(self_value.keys()) - set(other_value.keys())) != 0:
if debug:
print('dict keys not equal: %s != %s' % (self_value, other_value))
eq = False
for k, v in six.iteritems(self_value):
if other_value[k] != v:
if debug:
print('dict values not equal: %s != %s' % (v, other_value[k]))
eq = False
eq = True
elif isinstance(self_value, etree._Element):
# Non-TAXII etree element (i.e. STIX)
eq = (etree.tostring(self_value) == etree.tostring(other_value))
else:
# Do a direct comparison
eq = (self_value == other_value)
# TODO: is this duplicate?
if not eq:
if debug:
print('%s was not equal: %s != %s' % (member, self_value, other_value))
return False
return True
def __ne__(self, other, debug=False):
return not self.__eq__(other, debug)
def get_required(etree_xml, xpath, ns_map):
elements = etree_xml.xpath(xpath, namespaces=ns_map)
if len(elements) == 0:
raise ValueError('Element "%s" is required' % xpath)
return elements[0]
def get_optional(etree_xml, xpath, ns_map):
try:
return get_required(etree_xml, xpath, ns_map)
except ValueError:
pass
def get_optional_text(etree_xml, xpath, ns_map):
try:
return get_required(etree_xml, xpath, ns_map).text
except ValueError:
pass
def _decode_binary_fields(dict_obj):
"""Given a dict, decode any binary values, assuming | |
"""
Contains the derived saveable search from GridSearchCV
"""
import numpy as np
import pickle
import os.path
import six
import time
import numbers
import warnings
from collections import defaultdict
from functools import partial
from joblib import Parallel, delayed, logger
from itertools import product
from scipy.stats import rankdata
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection._split import check_cv
from sklearn.base import is_classifier, clone
from sklearn.metrics.scorer import _check_multimetric_scoring
from sklearn.utils.validation import indexable, _num_samples
from sklearn.model_selection._validation import _aggregate_score_dicts, _score
from sklearn.utils.deprecation import DeprecationDict
from sklearn.utils.metaestimators import _safe_split
from sklearn.exceptions import FitFailedWarning
from sklearn.model_selection import StratifiedKFold
from sklearn.utils.fixes import MaskedArray
from ..model.basicmodel import BasicModel
def _fit_and_score_save(estimator, X, y, scorer, train, test, verbose,
parameters, fold, save, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : A single callable or dict mapping scorer name to the callable
If it is a single callable, the return value for ``train_scores`` and
``test_scores`` is a single float.
For a dict, it should be one mapping the scorer name to the scorer
callable object / function.
The callable object / fn should have signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
return_n_test_samples : boolean, optional, default: False
Whether to return the ``n_test_samples``
return_times : boolean, optional, default: False
Whether to return the fit/score times.
Returns
-------
train_scores : dict of scorer name -> float, optional
Score on training set (for all the scorers),
returned only if `return_train_score` is `True`.
test_scores : dict of scorer name -> float, optional
Score on testing set (for all the scorers).
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
path = None
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
if save is not None:
path = "{}/{}.ret".format(save,fold)
if os.path.isfile(path):
if verbose > 1:
print("[INFO] Already calculated Fold: %d. Load result from %s."
%(fold,path))
with open(path, 'rb') as fp:
ret = pickle.load(fp)
print(ret)
return ret
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
test_scores = {}
train_scores = {}
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
is_multimetric = not callable(scorer)
n_scorers = len(scorer.keys()) if is_multimetric else 1
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
if is_multimetric:
test_scores = dict(zip(scorer.keys(),
[error_score, ] * n_scorers))
if return_train_score:
train_scores = dict(zip(scorer.keys(),
[error_score, ] * n_scorers))
else:
test_scores = error_score
if return_train_score:
train_scores = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
fit_time = time.time() - start_time
# _score will return dict if is_multimetric is True
test_scores = _score(estimator, X_test, y_test, scorer, is_multimetric)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_scores = _score(estimator, X_train, y_train, scorer,
is_multimetric)
end_msg = ""
if verbose > 2:
if is_multimetric:
for scorer_name, score in test_scores.items():
msg += "; %s=%s" % (scorer_name, score)
else:
msg += "; score=%s" % test_scores
if verbose > 1:
total_time = score_time + fit_time
end_msg = "%s; total=%s" % (msg, logger.short_format_time(total_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_scores, test_scores] if return_train_score else [test_scores]
if return_n_test_samples:
ret.append(_num_samples(X_test))
if return_times:
ret.extend([fit_time, score_time])
if return_parameters:
ret.append(parameters)
if path is not None:
with open(path, "wb") as fp:
pickle.dump(ret, fp)
if verbose > 1:
with open("%s/results.csv" % (save), "a") as fp:
fp.write("%s \n" % (end_msg))
return ret
class SaveGridSearchCV(GridSearchCV):
"""
TODO
"""
def __init__(self, estimator, param_grid, save=None, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', error_score='raise',
random_state=None, return_train_score="warn"):
_cv = self._process_cv(cv, random_state)
super(SaveGridSearchCV, self).__init__(
estimator=estimator, param_grid=param_grid, scoring=scoring,
fit_params=fit_params, n_jobs=n_jobs, iid=iid, refit=refit, cv=_cv,
verbose=verbose, pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score)
self.save = self._check_save(save, estimator)
def _process_cv(self, cv, random_state):
if cv is None:
cv = 3
if isinstance(cv, int):
return StratifiedKFold(n_splits=cv, random_state=random_state)
else:
return cv
def _check_save(self, save, estimator):
if save is None:
return None
else:
if isinstance(estimator, BasicModel):
estimator_name = estimator.get_name()
else:
estimator_name = estimator.__class__.__name__
path = "./{}_{}" .format(save,estimator_name)
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
return path
def fit(self, X, y=None, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
"""
if self.fit_params is not None:
warnings.warn('"fit_params" as a constructor argument was '
'deprecated in version 0.19 and will be removed '
'in version 0.21. Pass fit parameters to the '
'"fit" method instead.', DeprecationWarning)
if fit_params:
warnings.warn('Ignoring fit_params passed as a constructor '
'argument in favor of keyword arguments to '
'the "fit" method.', RuntimeWarning)
else:
fit_params = self.fit_params
estimator = self.estimator
#NEW!!!
if isinstance(self.cv, StratifiedKFold):
self.cv = self.cv.split(X,y)
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
scorers, self.multimetric_ = _check_multimetric_scoring(
self.estimator, scoring=self.scoring)
if self.multimetric_:
if self.refit is not False and (
not isinstance(self.refit, six.string_types) or
# This will work for both dict / list (tuple)
self.refit not in scorers):
raise ValueError("For multi-metric scoring, the parameter "
"refit must be set to a scorer key "
"to refit an estimator with the best "
"parameter setting on the whole data and "
"make the best_* attributes "
"available for that metric. If this is not "
"needed, refit should be set to False "
"explicitly. %r was passed." % self.refit)
else:
refit_metric = self.refit
else:
refit_metric = 'score'
X, y, groups = indexable(X, y, groups)
n_splits = cv.get_n_splits(X, y, groups)
# Regenerate parameter iterable for each fit
candidate_params = list(self._get_param_iterator())
n_candidates = len(candidate_params)
if self.verbose > 0:
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(n_splits, n_candidates,
n_candidates * n_splits))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
experiments = list(product(candidate_params,cv.split(X, y, groups)))
print("START: %s" % (self.estimator.__class__.__name__))
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(delayed(_fit_and_score_save)(clone(base_estimator), X, y, scorers,
experiments[fold][1][0],
| |
<reponame>peigongdh/collection
#! /usr/bin/python
#======================================================================
#
# cavemake.py - cavemake routine
#
# HISTORY:
# May.8 2008 skywind - create this file with some basic interface
# May.19 2008 skywind - add file saving interface
# Apr.25 2008 skywind - add updating iterator
# Oct.8 2008 skywind - add faultage level
# Oct.9 2008 skywind - upgrade point choosing method
#
#======================================================================
import sys, random
# to change this
randint = random.randint
def create_matrix(w = 8, h = 8, c = 0):
m = [ [ c for j in xrange(w) ] for i in xrange(h) ]
return m
def copy_matrix(m):
return [ [ n for n in line ] for line in m ]
def matrix_size(m):
if len(m) == 0: return 0, 0
return len(m[0]), len(m)
def copy_to(dst, src):
w, h = matrix_size(src)
for j in xrange(h):
for i in xrange(w):
dst[j][i] = src[j][i]
def print_matrix(m, is_all_char = False):
mlen = 0
for line in m:
for n in line:
s = len(str(n))
mlen = max(mlen, s)
for line in m:
text = ''
for n in line:
result = str(n).rjust(mlen) + ' '
if is_all_char: result = str(n)[:1]
text += result
print text
print ''
class disjointset:
def __init__(self):
self.__father = {}
self.__weight = {}
self.__size = 0
def __len__(self):
return self.__size
def find(self, x):
if x == None:
return None
father = self.__father
if x in father:
root = x
path = []
while father[root] != None:
path.append(root)
root = father[root]
for n in path:
self.__father[n] = root
return root
self.__father[x] = None
self.__weight[x] = 1
self.__size += 1
return x
def __getitem__(self, key):
return self.find(key)
def weight(self, x):
return self.__weight[self.find(x)]
def clear(self):
self.__father = {}
self.__weight = {}
self.__size = 0
def union(self, x, y):
root1 = self.find(x)
root2 = self.find(y)
if root1 != root2:
if self.__weight[root1] < self.__weight[root2]:
self.__weight[root2] += self.__weight[root1]
del self.__weight[root1]
self.__father[root1] = root2
else:
self.__weight[root1] += self.__weight[root2]
del self.__weight[root2]
self.__father[root2] = root1
def split(self):
roots = {}
for n in self.__father:
f = self.find(n)
if f in roots:
roots[f].append(n)
else:
roots[f] = [n]
return roots
class simplebunch:
def __init__ (self, **kwds): self.__dict__ = kwds
lengthof = lambda p1, p2: ((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2)
INCX = (0, 1, 1, 1, 0, -1, -1, -1)
INCY = (-1, -1, 0, 1, 1, 1, 0, -1)
CPERMWALL = 100
CWALL = 110
CFLOOR = 200
class cavemake:
def __init__(self, width = 32, height = 20, initial = 0.4):
self.__width = width
self.__height = height
self.__initial = initial
self.__map = [ [0 for i in xrange(width)] for j in xrange(height) ]
self.__ds = disjointset()
self.edges = []
self.roomset = []
def clear(self):
m = self.__map
for j in xrange(self.__height):
for i in xrange(self.__width):
m[j][i] = CWALL
for j in xrange(self.__height):
m[j][0] = CPERMWALL
m[j][self.__width - 1] = CPERMWALL
for i in xrange(self.__width):
m[0][i] = CPERMWALL
m[self.__height - 1][i] = CPERMWALL
self.roomset = []
self.edges = []
self.__ds.clear()
def print_cave(self):
m = self.__map
for j in xrange(self.__height):
row = ''
for n in m[j]:
if n == CFLOOR: row += '.'
elif n == CWALL or n == CPERMWALL: row += '#'
elif (n >= 0) and (n <= 9): row += str(n)
else: row += 'X'
print row
print
def __getitem__ (self, row):
if (row < 0) or (row >= self.__height):
raise KeyError("row out of range")
return self.__map[row]
def __iter__ (self):
return self.__map.__iter__()
def dump(self):
return self.__map
def pass_ca(self, no_new_wall = False, keep = False, use_backup = False):
m = self.__map
incx = (0, 1, 1, 1, 0, -1, -1, -1)
incy = (-1, -1, 0, 1, 1, 1, 0, -1)
n = m
if use_backup:
n = [ [ x for x in line ] for line in m ]
neighbor = [ CFLOOR for x in xrange(8) ]
for y in xrange(1, self.__height - 1):
for x in xrange(1, self.__width - 1):
wall_count = 0
for d in xrange(8):
neighbor[d] = n[y + incy[d]][x + incx[d]]
if neighbor[d] != CFLOOR:
wall_count += 1
adjacence = 0
for d in xrange(8):
if neighbor[d] == CFLOOR: break
if d < 8:
for step in xrange(8):
if neighbor[(d + step) & 7] != CFLOOR:
break
adjacence += 1
canwall = False
if (adjacence + wall_count == 8) or (not keep):
canwall = True
if (wall_count < 4) and (m[y][x] == CWALL):
m[y][x] = CFLOOR
elif (wall_count > 5) and (m[y][x] == CFLOOR):
if (not no_new_wall) and canwall:
m[y][x] = CWALL
def initialize(self):
m = self.__map
count = int(self.__width * self.__height * self.__initial)
maxcount = self.__width * self.__height * 2
self.clear()
while count > 0:
x = randint(1, self.__width - 2)
y = randint(1, self.__height - 2)
if m[y][x] == CWALL:
m[y][x] = CFLOOR
count -= 1
maxcount -= 1
if maxcount <= 0:
break
def __search_rooms(self):
ds = self.__ds
m = self.__map
ds.clear()
for y in xrange(1, self.__height - 1):
for x in xrange(1, self.__width - 1):
if m[y][x] != CFLOOR:
continue
root = ds[(y, x)]
if m[y][x + 1] == CFLOOR:
ds.union(root, (y, x + 1))
if m[y + 1][x] == CFLOOR:
ds.union(root, (y + 1, x))
if m[y + 1][x + 1] == CFLOOR:
if (m[y][x + 1] == CFLOOR) or (m[y + 1][x] == CFLOOR):
ds.union(root, (y + 1, x + 1))
if m[y + 1][x - 1] == CFLOOR:
if (m[y][x - 1] == CFLOOR) or (m[y + 1][x] == CFLOOR):
ds.union(root, (y + 1, x - 1))
def __choose_dir(self, src, dest, noisy = True):
if src[0] < dest[0]: incy = 1
elif src[0] > dest[0]: incy = -1
else: incy = 0
if src[1] < dest[1]: incx = 1
elif src[1] > dest[1]: incx = -1
else: incx = 0
if (noisy) and (incx != 0) and (incy != 0):
n = randint(0, 1)
if n == 0: incx = 0
elif n == 1: incy = 0
return incy, incx
def __join_room(self, pt, destpt):
ds = self.__ds
m = self.__map
pair = None
while True:
incy, incx = self.__choose_dir(pt, destpt, True)
npt = (pt[0] + incy, pt[1] + incx)
root = ds[pt]
need_stop = False
if npt == destpt:
need_stop = True
pair = (root, ds[destpt])
elif m[npt[0]][npt[1]] == CFLOOR:
if ds[npt] != root:
pair = (root, ds[npt])
need_stop = True
m[npt[0]][npt[1]] = CFLOOR
ds.union(root, ds[npt])
if randint(0, 1) == 0:
r = randint(0, 7)
noisy = pt[0] + INCY[r], pt[1] + INCX[r]
if m[noisy[0]][noisy[1]] == CWALL:
m[noisy[0]][noisy[1]] = CFLOOR
ds.union(root, ds[noisy])
pass
pt = npt
if need_stop: break
return pair
def __update_rooms(self):
self.__search_rooms()
sets = self.__ds.split()
roomset = {}
for n in sets:
room = simplebunch(root = n, pts = sets[n], size=0)
xmin, ymin = self.__width, self.__height
xmax, ymax = 0, 0
for y, x in sets[n]:
if x < xmin: xmin = x
if x > xmax: xmax = x
if y < ymin: ymin = y
if y > ymax: ymax = y
cx = (xmin + xmax + 1) * 0.5
cy = (ymin + ymax + 1) * 0.5
best_dist = (self.__width + self.__height) ** 3
best_pt = room.root
for pt in sets[n]:
dist = (cy - pt[0]) ** 2 + (cx - pt[1]) ** 2
if dist < best_dist:
best_dist, best_pt = dist, pt
room.center = best_pt
room.size = len(room.pts)
#print room.center, (cy, cx), room.root
roomset[n] = room
return roomset
def __choose_entrance(self):
roomset = self.__update_rooms()
border = []
for i in xrange(1, self.__width - 1):
border.append((1, i))
border.append((self.__height - 2, i))
for i in xrange(1, self.__height - 1):
border.append((i, 1))
border.append((i, self.__width - 2))
dists = []
for pt in border:
dist_sum = 0
for key in roomset:
room = roomset[key]
dist_sum += lengthof(room.center, pt)
dists.append((dist_sum, pt))
dists.sort()
dists.reverse()
if len(dists) < 4: return dists[0][1]
pos = len(dists)
pos = randint(0, int(pos / 2))
return dists[pos][1]
def open_area(self, entrance):
for incy in (-1, 0, 1):
for incx in (-1, 0, 1):
y = entrance[0] + incy
x = entrance[1] + incx
#if abs(incx) + abs(incy) >= 2:
# if randint(0, 1) == 0: continue
if (y < 0) or (y >= self.__height): continue
if (x < 0) or (x >= self.__width): continue
self.__map[y][x] = CFLOOR
def generate(self, printraw = False, FLOORLST = [], iter = 0):
self.initialize()
#self.print_cave()
self.pass_ca(use_backup = False)
#self.print_cave()
entrance = self.__choose_entrance()
entrance = self.__height - 2, int(self.__width * 3 / 4)
self.entrance = entrance
#self.__map[entrance[0]][entrance[1]] = CFLOOR
try:
for y, x in FLOORLST:
self.__map[y][x] = CFLOOR
except: pass
if printraw:
self.print_cave()
m_original = copy_matrix(self.__map)
m_result = copy_matrix(self.__map)
count = 4;
for ii in xrange(iter + 1):
self.clear()
copy_to(self.__map, m_original)
self.roomset = self.__update_rooms()
while True:
roomset = self.__update_rooms()
if len(roomset) <= 1: break
roomlst = roomset.keys()
pair_list = []
for i in xrange(len(roomlst) - 1):
rooma = roomset[roomlst[i]]
for j in xrange(i + 1, len(roomlst)):
roomb = roomset[roomlst[j]]
dist = lengthof(rooma.center, roomb.center)
pair_list.append((dist, (rooma, roomb)))
pair_list.sort()
limit = 500
index = 0
while True:
if index >= len(pair_list) - 1: break
if randint(0, 100) < limit: break
limit += 10
rooma, roomb = pair_list[0][1]
#print rooma.root, roomb.root
#pair = self.__join_room(rooma.root, roomb.root)
point1 = rooma.pts[random.randint(0, rooma.size - 1)]
point2 = roomb.pts[random.randint(0, roomb.size - 1)]
pair = self.__join_room(point1, point2)
self.edges.append(pair)
#self.print_cave()
#self.__open_entrance(entrance)
#self.pass_ca(True)
m = self.__map
for j in xrange(self.__height):
for i in xrange(self.__width):
if m[j][i] == CFLOOR:
m_result[j][i] = CFLOOR
copy_to(self.__map, m_result)
#self.pass_ca(False, True)
self.pass_ca(True)
return 0
def faultage(self, level = 2, ratio = 0.3):
m_original = copy_matrix(self.__map)
level = level < 8 and level or 8
for i in xrange(level):
count = 0
m = self.__map
for line in m:
for n in line:
if n == CWALL: count += 1
count = int(count * ratio)
limit = self.__width * self.__height * 3
while (limit > 0) and (count > 0):
x = randint(0, self.__width - 1)
y = randint(0, self.__height - 1)
if m[y][x] in (CWALL, CPERMWALL):
m[y][x] = CFLOOR
count -= 1
limit -= 1
self.pass_ca(use_backup = False)
#self.print_cave()
for y in xrange(self.__height):
for x in xrange(self.__width):
if m_original[y][x] in (CWALL, CPERMWALL):
m_original[y][x] = 1
if m_original[y][x] >= 1 and m_original[y][x] <= 9:
if m[y][x] in (CWALL, CPERMWALL):
m_original[y][x] += 1
elif m_original[y][x] == | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
# import rasahub_google_calendar
from time import gmtime, time, strftime
import json
import locale
import logging
import math
import mysql.connector
from mysql.connector import errorcode
import os
import string
import random
import re
import yaml
from nltk.stem.snowball import SnowballStemmer
import httplib2
stemmer = SnowballStemmer("german")
logger = logging.getLogger(__name__)
offlinemode = False
locale.setlocale(locale.LC_ALL, "de_DE.utf8")
class NotAuthenticatedError(Exception):
"""
Class NotAuthenticatedError is thrown everytime a google user is not
authenticated properly.
"""
def __init__(self):
"""
Exception initialization, sets error message.
"""
self.msg = "Not Authenticated"
def __str__(self):
"""
to-String method
:return: Error message
:rtype: str
"""
return self.msg
def connectToDB(dbHost, dbName, dbPort, dbUser, dbPwd):
"""
Establishes connection to the database
:param dbHost: database host address
:type state: str.
:param dbName: database name
:type state: str.
:param dbPort: database host port
:type state: int.
:param dbUser: database username
:type name: str.
:param dbPwd: <PASSWORD>
:type state: str.
:return: Instance of class MySQLConnection
:rtype: MySQLConnection
"""
try:
cnx = mysql.connector.connect(user=dbUser, port=int(dbPort), password=dbPwd, host=dbHost, database=dbName, autocommit=True)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
return cnx
def getBotID(cursor):
"""
Gets a suitable Bot User ID from a Humhub User Group called 'Bots'
:return: Bots Humhub User ID
:rtype: int
"""
query = "SELECT `user_id` FROM `group` JOIN `group_user` ON `group`.`id` = `group_user`.`group_id` WHERE `group`.`name` = 'Bots' ORDER BY user_id DESC LIMIT 1;"
cursor.execute(query)
return cursor.fetchone()[0]
def getNextID(cursor, current_id, bot_id, trigger):
"""
Gets the next message ID from Humhub
:return: Next message ID to process
:rtype: int
"""
query = ("SELECT id FROM message_entry WHERE user_id <> %(bot_id)s AND (content LIKE %(trigger)s OR message_entry.message_id IN "
"(SELECT DISTINCT message_entry.message_id FROM message_entry JOIN user_message "
"ON message_entry.message_id=user_message.message_id WHERE user_message.user_id = 5 ORDER BY message_entry.message_id)) "
"AND id > %(current_id)s ORDER BY id ASC")
data = {
'bot_id': bot_id,
'trigger': trigger + '%', # wildcard for SQL
'current_id': current_id,
}
row_count = cursor.execute(query, data)
results = cursor.fetchall()
if len(results) > 0: # fetchall returns list of results, each as a tuple
return results[0][0]
else:
return current_id
def getMessage(cursor, msg_id, trigger):
"""
Gets the newest message
:returns: Containing the message itself as string and the conversation ID
:rtype: dict
"""
query = "SELECT message_id, content FROM message_entry WHERE (user_id <> 5 AND id = {})".format(msg_id)
cursor.execute(query)
result = cursor.fetchone()
message_id = result[0]
if result[1][:len(trigger)] == trigger:
message = result[1][len(trigger):].strip()
else:
message = result[1].strip()
messagedata = {
'message': message,
'message_id': message_id
}
return messagedata
def create_new_conversation(cursor, title, message, user_id, bot_id):
"""
Creates new conversation in Humhub.
:param cursor: MySQL Cursor for database processes
:param str title: Title of conversation
:param str message: First message of created conversation
:param int user_id: Humhub User ID to create conversation with
:param int bot_id: User ID to use for the bot
"""
query = "INSERT INTO message (title, created_by, updated_by) VALUES ({}, {}, {})".format(title, bot_id, bot_id)
cursor.execute(query)
message_id = cursor.lastrowid
query = "INSERT INTO user_message (message_id, user_id, created_by, updated_by) VALUES ({}, {}, {}, {})".format(message_id, user_id, bot_id, bot_id)
cursor.execute(query)
query = "INSERT INTO message_entry (message_id, user_id, content, created_by, updated_by) VALUES ({}, {}, {}, {})".format(message_id, user_id, message, bot_id, bot_id)
cursor.execute(query)
def check_google_access(message_id, cursor, bot_id):
"""
Checks google calendar access for humhub user IDs
:param int message_id: ID of message
:param cursor: MySQL Cursor for database processes
:param bot_id: Humhub User ID of bot to exclude from calendar
"""
users = getUsersInConversation(cursor, message_id, bot_id)
calendars = []
return_case = True
for userID in users:
try:
calendar = get_google_calendar_items(userID)
except:
send_auth(cursor, userID, bot_id)
return []
return return_case
def getCurrentID(cursor):
"""
Gets the current max message ID from Humhub
:return: Current max message ID
:rtype: int
"""
query = "SELECT MAX(id) FROM message_entry;"
cursor.execute(query)
return cursor.fetchone()[0]
def send_auth_link(cursor, user_id, bot_id):
"""
Sends Google auth URL to not-authentificated users
:param cursor: MySQL Cursor for database processes
:param user_id: Humhub User ID to send URL to
:param bot_id: Humhub User ID of bot to exclude from calendar
"""
title = "Bitte authentifizieren Sie sich"
message = "http://localhost:8080/" + str(user_id)
create_new_conversation(cursor, title, message, user_id, bot_id)
def getUsersInConversation(cursor, sender_id, bot_id):
"""
Returns a list of Humhub User IDs participating in the conversation using
the sender ID
:param cursor: Mysql Cursor
:type cusor: mysql.connector.cursor.MySQLCursor
:param sender_id: Humhub conversation sender ID
:type sender_id: int
:param bot_id: Bot Humhub User ID
:type bot_id: int
:return: List of users in conversation
:rtype: list
"""
query = ("""SELECT user_id FROM user_message WHERE message_id = {}
""").format(sender_id)
cursor.execute(query)
users = []
for user_id in cursor:
if user_id != bot_id:
users.append(user_id[0])
return users
def getCalendar(user_id, date, cursor):
"""
Gets calendar pattern of a given Humhub User ID
:param user_id: Humhub user ID to get the calendar information from
:type user_id: int
:param date: Specific date to get the calendar information
:type date: datetime
:param cursor: Mysql Cursor
:type cusor: mysql.connector.cursor.MySQLCursor
:return: Calendar pattern with set busy dates of user_id
:rtype: dict
"""
# create calendar pattern
calendarPattern = createCalendarPattern()
# get busy appointments
startdate = date.strftime("%Y-%m-%d 00:00:00")
enddate = date.strftime("%Y-%m-%d 23:59:59")
startdate = "'" + startdate + "'"
enddate = "'" + enddate + "'"
#query = ("""SELECT start_datetime, end_datetime FROM calendar_entry
# INNER JOIN calendar_entry_participant ON
# calendar_entry.id =
# calendar_entry_participant.calendar_entry_id
# WHERE calendar_entry_participant.user_id = {} AND
# calendar_entry_participant.participation_state = 3 AND
# calendar_entry.start_datetime BETWEEN {} AND {}
# """).format(user_id, startdate, enddate)
#cursor.execute(query)
try:
dates = get_google_calendar_items(user_id)
except:
# not authenticated
bot_id = getBotID(cursor)
send_auth_link(cursor, user_id, bot_id)
raise NotAuthenticatedError
#for (start_datetime, end_datetime) in cursor:
# busydates.append([start_datetime, end_datetime])
#cnx.close()
return setBusyDates(calendarPattern, dates)
def setBusyDates(calendarPattern, dates):
"""
Sets busy dates in a given calendar pattern using calendar information
:param calendarPattern: Blank calendar pattern
:type calendarPattern: array
:param cursor: Array containing start and end datetimes of busy dates
:type cursor: array
:return: Calendarpattern with set busy dates
:rtype: dict
"""
# Google Edition
for appointment in dates:
start = dates[appointment]['start'] # format: 2018-05-24T17:00:00
end = dates[appointment]['end']
start_datetime = strptime(start, "%Y-%m-%dT%H:%M:%S")
end_datetime = strptime(end, "%Y-%m-%dT%H:%M:%S")
# convert minute to array index, round down as its starting time
startIndex = int(float(start_datetime.minute) / 15.)
# end minute index is round up
endIndex = int(math.ceil(float(end_datetime.minute) / 15.))
endAtZero = False
if endIndex == 0:
endAtZero = True
else:
endIndex -= 1 # correct index for all cases except 0
# set all patterns to 0 between start and end indezes
for i in range(start_datetime.hour, end_datetime.hour + 1):
if start_datetime.hour == end_datetime.hour:
for j in range(startIndex, endIndex + 1):
calendarPattern[i][j] = 1
break
# three cases: i = start.hour, i = end.hour or between
if i == start_datetime.hour:
# only set to 0 beginning from startIndex to 3
for j in range(startIndex, 4):
calendarPattern[i][j] = 1
elif i == end_datetime.hour:
if endAtZero:
break
# only set to 0 beginning from 0 to endIndex
for j in range(endIndex + 1):
calendarPattern[i][j] = 1
else:
# set all to 0
for j in range(0, 4):
calendarPattern[i][j] = 1
# Humhub Edition
#for (start_datetime, end_datetime) in cursor:
# # convert minute to array index, round down as its starting time
# startIndex = int(float(start_datetime.minute) / 15.)
# # end minute index is round up
# endIndex = int(math.ceil(float(end_datetime.minute) / 15.))
# endAtZero = False
# if endIndex == 0:
# endAtZero = True
# else:
# endIndex -= 1 # correct index for all cases except 0
# # set all patterns to 0 between start and end indezes
# for i in range(start_datetime.hour, end_datetime.hour + 1):
# if start_datetime.hour == end_datetime.hour:
# for j in range(startIndex, endIndex + 1):
# calendarPattern[i][j] = 1
# break
# # three cases: i = start.hour, i = end.hour or between
# if i == start_datetime.hour:
# # only set to 0 beginning from startIndex to 3
# for j in range(startIndex, 4):
# calendarPattern[i][j] = 1
# elif i == end_datetime.hour:
# if endAtZero:
# break
# # only set to | |
if not cartopy_enabled():
raise RuntimeError("'cartopy' is not "
"installed or is disabled")
return self._cartopy()
def pyngl(self, geobounds, **kwargs):
"""Return a :class:`Ngl.Resources` object for the map projection.
Args:
geobounds (:class:`wrf.GeoBounds`, optional): The geobounds to
get the extents. If set to None and using the *var* parameter,
the geobounds will be taken from the variable. If using a
file, then the geobounds will be taken from the native grid.
**kwargs: Additional PyNGL resources to set while creating the
:class:`Ngl.Resources` object.
Returns:
:class:`Ngl.Resources`: A dict-like object that contains the
PyNGL resources for the map projection.
See Also:
`PyNGL <https://www.pyngl.ucar.edu/>`_
"""
if not pyngl_enabled():
raise RuntimeError("'pyngl' is not "
"installed or is disabled")
return self._pyngl(geobounds, **kwargs)
def proj4(self):
"""Return the PROJ.4 string for the map projection.
Returns:
:obj:`str`: A string suitable for use with the PROJ.4 library.
See Also:
PROJ.4 <https://trac.osgeo.org/proj/>`_
"""
return self._proj4()
def cf(self):
"""Return a dictionary with the NetCDF CF parameters for the
projection.
Returns:
:obj:`dict`: A dictionary with the NetCDF CF parameter names and
projection parameter values.
"""
return self._cf_params()
# Used for 'missing' projection values during the 'join' method
class NullProjection(WrfProj):
"""A :class:`wrf.WrfProj` subclass for empty projections.
The :class:`NullProjection` is primarily used for creating missing
projections when using the 'join' method.
"""
def __init__(self):
"""Initialize a :class:`wrf.NullProjection` object."""
pass
def __repr__(self):
return "{}()".format(self.__class__.__name__)
class LambertConformal(WrfProj):
"""A :class:`wrf.WrfProj` subclass for Lambert Conformal Conic projections.
See Also:
:class:`wrf.WrfProj`, :class:`wrf.LatLon`,
:class:`wrf.PolarStereographic`,
:class:`Mercator`, :class:`RotatedLatLon`
"""
def __init__(self, **proj_params):
"""Initialize a :class:`wrf.LambertConformal` object.
Args:
**proj_params: Map projection optional keyword arguments, that
have the same names as found in WRF output NetCDF global
attributes:
- 'TRUELAT1': True latitude 1.
- 'TRUELAT2': True latitude 2.
- 'MOAD_CEN_LAT': Mother of all domains center latitude.
- 'STAND_LON': Standard longitude.
- 'POLE_LAT': Pole latitude.
- 'POLE_LON': Pole longitude.
"""
super(LambertConformal, self).__init__(**proj_params)
self._std_parallels = [self.truelat1]
if self.truelat2 is not None:
self._std_parallels.append(self.truelat2)
def _cf_params(self):
_cf_params = {}
_cf_params["grid_mapping_name"] = "lambert_conformal_conic";
_cf_params["standard_parallel"] = self._std_parallels
_cf_params["longitude_of_central_meridian"] = self.stand_lon
_cf_params["latitude_of_projection_origin"] = self.moad_cen_lat
_cf_params["semi_major_axis"] = Constants.WRF_EARTH_RADIUS
return _cf_params
def _pyngl(self, geobounds, **kwargs):
if not pyngl_enabled():
return None
truelat2 = (self.truelat1
if _ismissing(self.truelat2)
else self.truelat2)
_pyngl = Resources()
_pyngl.mpProjection = "LambertConformal"
_pyngl.mpDataBaseVersion = "MediumRes"
_pyngl.mpLambertMeridianF = self.stand_lon
_pyngl.mpLambertParallel1F = self.truelat1
_pyngl.mpLambertParallel2F = truelat2
_pyngl.mpLimitMode = "Corners"
_pyngl.mpLeftCornerLonF = geobounds.bottom_left.lon
_pyngl.mpLeftCornerLatF = geobounds.bottom_left.lat
_pyngl.mpRightCornerLonF = geobounds.top_right.lon
_pyngl.mpRightCornerLatF = geobounds.top_right.lat
for key, val in viewitems(kwargs):
setattr(_pyngl, key, val)
return _pyngl
def _basemap(self, geobounds, **kwargs):
if not basemap_enabled():
return None
local_kwargs = dict(projection = "lcc",
lon_0 = self.stand_lon,
lat_0 = self.moad_cen_lat,
lat_1 = self.truelat1,
lat_2 = self.truelat2,
llcrnrlat = geobounds.bottom_left.lat,
urcrnrlat = geobounds.top_right.lat,
llcrnrlon = geobounds.bottom_left.lon,
urcrnrlon = geobounds.top_right.lon,
rsphere = Constants.WRF_EARTH_RADIUS,
resolution = 'l')
local_kwargs.update(kwargs)
_basemap = Basemap(**local_kwargs)
return _basemap
def _cartopy(self):
if not cartopy_enabled():
return None
# Set cutoff to -30 for NH, +30.0 for SH.
cutoff = -30.0 if self.moad_cen_lat >= 0 else 30.0
_cartopy = crs.LambertConformal(
central_longitude = self.stand_lon,
central_latitude = self.moad_cen_lat,
standard_parallels = self._std_parallels,
globe = self._globe(),
cutoff = cutoff)
return _cartopy
def _proj4(self):
truelat2 = (self.truelat1
if _ismissing(self.truelat2)
else self.truelat2)
_proj4 = ("+proj=lcc +units=meters +a={} +b={} +lat_1={} "
"+lat_2={} +lat_0={} +lon_0={}".format(
Constants.WRF_EARTH_RADIUS,
Constants.WRF_EARTH_RADIUS,
self.truelat1,
truelat2,
self.moad_cen_lat,
self.stand_lon))
return _proj4
class Mercator(WrfProj):
"""A :class:`wrf.WrfProj` subclass for Mercator projections.
See Also:
:class:`wrf.WrfProj`, :class:`wrf.LatLon`,
:class:`wrf.PolarStereographic`,
:class:`RotatedLatLon`, :class:`LambertConformal`
"""
def __init__(self, **proj_params):
"""Initialize a :class:`wrf.Mercator` object.
Args:
**proj_params: Map projection optional keyword arguments, that
have the same names as found in WRF output NetCDF global
attributes:
- 'TRUELAT1': True latitude 1.
- 'TRUELAT2': True latitude 2.
- 'MOAD_CEN_LAT': Mother of all domains center latitude.
- 'STAND_LON': Standard longitude.
- 'POLE_LAT': Pole latitude.
- 'POLE_LON': Pole longitude.
"""
super(Mercator, self).__init__(**proj_params)
self._lat_ts = (None
if self.truelat1 == 0. or _ismissing(self.truelat1)
else self.truelat1)
def _cf_params(self):
_cf_params = {}
_cf_params["grid_mapping_name"] = "mercator"
_cf_params["longitude_of_projection_origin"] = self.stand_lon
_cf_params["standard_parallel"] = self.truelat1
return _cf_params
def _pyngl(self, geobounds, **kwargs):
if not pyngl_enabled():
return None
_pyngl = Resources()
_pyngl.mpProjection = "Mercator"
_pyngl.mpDataBaseVersion = "MediumRes"
_pyngl.mpCenterLatF = 0.0
_pyngl.mpCenterLonF = self.stand_lon
_pyngl.mpLimitMode = "Corners"
_pyngl.mpLeftCornerLonF = geobounds.bottom_left.lon
_pyngl.mpLeftCornerLatF = geobounds.bottom_left.lat
_pyngl.mpRightCornerLonF = geobounds.top_right.lon
_pyngl.mpRightCornerLatF = geobounds.top_right.lat
for key, val in viewitems(kwargs):
setattr(_pyngl, key, val)
return _pyngl
def _basemap(self, geobounds, **kwargs):
if not basemap_enabled():
return None
local_kwargs = dict(projection = "merc",
lon_0 = self.stand_lon,
lat_0 = self.moad_cen_lat,
lat_ts = self._lat_ts,
llcrnrlat = geobounds.bottom_left.lat,
urcrnrlat = geobounds.top_right.lat,
llcrnrlon = geobounds.bottom_left.lon,
urcrnrlon = geobounds.top_right.lon,
rsphere = Constants.WRF_EARTH_RADIUS,
resolution = "l")
local_kwargs.update(kwargs)
_basemap = Basemap(**local_kwargs)
return _basemap
def _cartopy(self):
if not cartopy_enabled():
return None
if self._lat_ts == 0.0:
_cartopy = crs.Mercator(
central_longitude = self.stand_lon,
globe = self._globe())
else:
_cartopy = MercatorWithLatTS(
central_longitude = self.stand_lon,
latitude_true_scale = self._lat_ts,
globe = self._globe())
return _cartopy
def _proj4(self):
_proj4 = ("+proj=merc +units=meters +a={} +b={} "
"+lon_0={} +lat_ts={}".format(
Constants.WRF_EARTH_RADIUS,
Constants.WRF_EARTH_RADIUS,
self.stand_lon,
self._lat_ts))
return _proj4
class PolarStereographic(WrfProj):
"""A :class:`wrf.WrfProj` subclass for Polar Stereographic projections.
See Also:
:class:`wrf.WrfProj`, :class:`wrf.LatLon`,
:class:`wrf.RotatedLatLon`,
:class:`Mercator`, :class:`LambertConformal`
"""
def __init__(self, **proj_params):
"""Initialize a :class:`wrf.PolarStereographic` object.
Args:
**proj_params: Map projection optional keyword arguments, that
have the same names as found in WRF output NetCDF global
attributes:
- 'TRUELAT1': True latitude 1.
- 'TRUELAT2': True latitude 2.
- 'MOAD_CEN_LAT': Mother of all domains center latitude.
- 'STAND_LON': Standard longitude.
- 'POLE_LAT': Pole latitude.
- 'POLE_LON': Pole longitude.
"""
super(PolarStereographic, self).__init__(**proj_params)
self._hemi = -90. if self.truelat1 < 0 else 90.
self._lat_ts = (None
if _ismissing(self.truelat1)
else self.truelat1)
def _cf_params(self):
_cf_params = {}
_cf_params["grid_mapping_name"] = "polar_stereographic"
_cf_params["straight_vertical_longitude_from_pole"] = (
self.stand_lon)
_cf_params["standard_parallel"] = self.truelat1
_cf_params["latitude_of_projection_origin"] = self._hemi
return _cf_params
def _pyngl(self, geobounds, **kwargs):
if not pyngl_enabled():
return None
_pyngl = Resources()
_pyngl.mpProjection = "Stereographic"
_pyngl.mpDataBaseVersion = "MediumRes"
_pyngl.mpCenterLonF = self.stand_lon
if self._hemi > 0:
_pyngl.mpCenterLatF = 90.0
else:
_pyngl.mpCenterLatF = -90.0
_pyngl.mpLimitMode = "Corners"
_pyngl.mpLeftCornerLonF = geobounds.bottom_left.lon
_pyngl.mpLeftCornerLatF = geobounds.bottom_left.lat
_pyngl.mpRightCornerLonF = geobounds.top_right.lon
_pyngl.mpRightCornerLatF = geobounds.top_right.lat
for key, val in viewitems(kwargs):
setattr(_pyngl, key, val)
return _pyngl
def _basemap(self, geobounds, **kwargs):
if not basemap_enabled():
return None
local_kwargs = dict(projection = "stere",
lon_0 = self.stand_lon,
lat_0 = self._hemi,
lat_ts = self._lat_ts,
llcrnrlat = geobounds.bottom_left.lat,
urcrnrlat = geobounds.top_right.lat,
llcrnrlon = geobounds.bottom_left.lon,
urcrnrlon = geobounds.top_right.lon,
rsphere = Constants.WRF_EARTH_RADIUS,
resolution = "l")
local_kwargs.update(kwargs)
_basemap = Basemap(**local_kwargs)
return _basemap
def _cartopy(self):
if not cartopy_enabled():
return None
_cartopy = crs.Stereographic(central_latitude=self._hemi,
central_longitude=self.stand_lon,
true_scale_latitude=self._lat_ts,
globe=self._globe())
return _cartopy
def _proj4(self):
_proj4 = ("+proj=stere +units=meters +a={} +b={} "
"+lat0={} +lon_0={} +lat_ts={}".format(
Constants.WRF_EARTH_RADIUS,
Constants.WRF_EARTH_RADIUS,
self._hemi,
self.stand_lon,
self._lat_ts))
return _proj4
class LatLon(WrfProj):
"""A :class:`wrf.WrfProj` subclass for Lat Lon projections.
See Also:
:class:`wrf.WrfProj`, :class:`wrf.RotatedLatLon`,
:class:`wrf.PolarStereographic`,
:class:`Mercator`, :class:`LambertConformal`
"""
def __init__(self, **proj_params):
"""Initialize a :class:`wrf.LatLon` object.
Args:
**proj_params: Map projection optional keyword arguments, that
have the same names as found in WRF output NetCDF global
attributes:
- 'TRUELAT1': True latitude 1.
- 'TRUELAT2': True latitude 2.
- 'MOAD_CEN_LAT': Mother of all domains center latitude.
- 'STAND_LON': Standard longitude.
- 'POLE_LAT': Pole latitude.
- 'POLE_LON': Pole longitude.
"""
super(LatLon, self).__init__(**proj_params)
def _cf_params(self):
_cf_params = {}
_cf_params["grid_mapping_name"] = "latitude_longitude"
return _cf_params
def _pyngl(self, geobounds, **kwargs):
if not pyngl_enabled():
return None
_pyngl = Resources()
_pyngl.mpProjection = "CylindricalEquidistant"
_pyngl.mpDataBaseVersion = "MediumRes"
_pyngl.mpCenterLonF = self.stand_lon
_pyngl.mpCenterLatF = self.moad_cen_lat
_pyngl.mpLimitMode = "Corners"
_pyngl.mpLeftCornerLonF = | |
"""
Hyperparameter Distributions
====================================
Here you'll find a few hyperparameter distributions. It's also possible to create yours by inheriting
from the base class. Each distribution must override the method ``rvs``, which will return a sampled value from
the distribution.
..
Copyright 2019, Neuraxio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
..
Thanks to Umaneo Technologies Inc. for their contributions to this Machine Learning
project, visit https://www.umaneo.com/ for more information on Umaneo Technologies Inc.
"""
import copy
import random
import sys
from abc import abstractmethod, ABCMeta
from typing import List
from scipy.stats import norm
from scipy.integrate import quad
import math
import numpy as np
from scipy.stats import truncnorm
class HyperparameterDistribution(metaclass=ABCMeta):
"""Base class for other hyperparameter distributions."""
def __init__(self, null_default_value):
"""
Create a HyperparameterDistribution. This method should still be called with super if it gets overriden.
"""
self.first_id = id(self)
self.null_default_value = null_default_value
@abstractmethod
def rvs(self):
"""
Sample the random variable.
:return: The randomly sampled value.
"""
pass
def nullify(self):
return self.null_default_value
@abstractmethod
def pdf(self, x) -> float:
"""
Abstract method for probability distribution function value at `x`.
:param x: value where the probability distribution function is evaluated.
:return: The probability distribution function value.
"""
pass
@abstractmethod
def cdf(self, x) -> float:
"""
Abstract method for cumulative distribution function value at `x`.
:param x: value where the cumulative distribution function is evaluated.
:return: The cumulative distribution function value.
"""
pass
def narrow_space_from_best_guess(self, best_guess, kept_space_ratio: float = 0.0) -> 'HyperparameterDistribution':
"""
Takes a value that is estimated to be the best one of the space, and restrict the space near that value.
By default, this function will completely replace the returned value by the new guess if not overriden.
:param best_guess: the value towards which we want to narrow down the space.
:param kept_space_ratio: what proportion of the space is kept. Should be between 0.0 and 1.0. Default is to keep only the best_guess (0.0).
:return: a new HyperparameterDistribution object that has been narrowed down.
"""
return FixedHyperparameter(best_guess, self.null_default_value).was_narrowed_from(kept_space_ratio, self)
def was_narrowed_from(
self, kept_space_ratio: float, original_hp: 'HyperparameterDistribution'
) -> 'HyperparameterDistribution':
"""
Keep track of the original distribution to restore it.
:param kept_space_ratio: the ratio which made the current object narrower than the ``original_hp``.
:param original_hp: The original HyperparameterDistribution, which will be kept in a private variable for an eventual restore.
:return: self.
"""
self.kept_space_ratio_trace = (
self.get_current_narrowing_value() *
kept_space_ratio *
original_hp.get_current_narrowing_value()
)
self.original_hp: HyperparameterDistribution = original_hp.unnarrow()
return self
def get_current_narrowing_value(self):
if not hasattr(self, 'kept_space_ratio_trace'):
self.kept_space_ratio_trace: float = 1.0
return self.kept_space_ratio_trace
def unnarrow(self) -> 'HyperparameterDistribution':
"""
Return the original distribution before narrowing of the distribution. If the distribution was never narrowed,
will return a copy of self.
:return: the original HyperparameterDistribution before narrowing, or else self if the distribution is virgin.
"""
if not hasattr(self, 'original_hp'):
return copy.deepcopy(self)
return copy.deepcopy(self.original_hp.unnarrow())
def __eq__(self, other):
return self.first_id == other.first_id
class FixedHyperparameter(HyperparameterDistribution):
"""This is an hyperparameter that won't change again, but that is still expressed as a distribution."""
def __init__(self, value, null_default_value=None):
"""
Create a still hyperparameter
:param value: what will be returned by calling ``.rvs()``.
"""
HyperparameterDistribution.__init__(self, null_default_value)
self.value = value
def rvs(self):
"""
Sample the non-random anymore value.
:return: the value given at creation.
"""
return self.value
def pdf(self, x) -> float:
"""
Probability distribution function value at `x`.
Since the parameter is fixed, the value return is 1 when x == value and 0 otherwise.
:param x: value where the probability distribution function is evaluated.
:return: The probability distribution function value.
"""
if x == self.value:
return 1.
return 0.
def cdf(self, x) -> float:
"""
Cumulative distribution function value at `x`.
Since the parameter is fixed, the value return is 1 if x>= value and 0 otherwise.
:param x: value where the cumulative distribution function is evaluated.
:return: The cumulative distribution function value.
"""
if x >= self.value:
return 1.
return 0.
# TODO: Mixin this or something:
# class DelayedAdditionOf(MalleableDistribution):
# """A HyperparameterDistribution (MalleableDistribution mixin) that """
#
# def __init__(self, *dists):
# self.dists = dists
#
# def rvs(self):
# rvss = [d.rvs if hasattr(d, 'rvs') else d for d in self.dists]
# return sum(rvss)
#
#
# class MalleableDistribution(metaclass=ABCMeta):
# """An hyperparameter distribution to which it's possible to do additional math using defaut python operators."""
#
# def __add__(self, other):
# return DelayedAdditionOf(self, other)
#
# max min + - / * % ** // == != < > <= >=
#
class Boolean(HyperparameterDistribution):
"""Get a random boolean hyperparameter."""
def __init__(self, null_default_value=False):
HyperparameterDistribution.__init__(self, null_default_value)
def rvs(self):
"""
Get a random True or False.
:return: True or False (random).
"""
return random.choice([True, False])
def pdf(self, x) -> float:
"""
Calculate the boolean probability mass function value at position `x`.
:param x: value where the probability mass function is evaluated.
:return: value of the probability mass function.
"""
if (x is True) or (x == 1) or (x is False) or (x == 0):
return 0.5
return 0.
def cdf(self, x) -> float:
"""
Calculate the boolean cumulative distribution function value at position `x`.
:param x: value where the cumulative distribution function is evaluated.
:return: value of the cumulative distribution function.
"""
if x < 0:
return 0.
if (0 <= x < 1) or (x is False):
return 0.5
if x >= 1 or (x is True):
return 1.
return 0.
class Choice(HyperparameterDistribution):
"""Get a random value from a choice list of possible value for this hyperparameter.
When narrowed, the choice will only collapse to a single element when narrowed enough.
For example, if there are 4 items in the list, only at a narrowing value of 0.25 that
the first item will be kept alone.
"""
def __init__(self, choice_list: List, null_default_value=None):
"""
Create a random choice hyperparameter from the given list.
:param choice_list: a list of values to sample from.
:type choice_list: List
:param null_default_value: default value for distribution
:type null_default_value: default choice value. if None, default choice value will be the first choice
"""
if null_default_value is None:
HyperparameterDistribution.__init__(self, choice_list[0])
elif null_default_value in choice_list:
HyperparameterDistribution.__init__(self, null_default_value)
else:
raise ValueError('invalid default value {0} not in choice list : {1}'.format(null_default_value, choice_list))
self.choice_list = choice_list
def rvs(self):
"""
Get one of the items randomly.
:return: one of the items of the list.
"""
return random.choice(self.choice_list)
def pdf(self, x) -> float:
"""
Calculate the choice probability mass function value at position `x`.
:param x: value where the probability mass function is evaluated.
:return: value of the probability mass function.
"""
try:
x_in_choice = x in self.choice_list
except (TypeError, ValueError, AttributeError):
raise ValueError(
"Item not find in list. Make sure the item is in the choice list and a correct method __eq__ is defined for all item in the list.")
else:
if x_in_choice:
return 1 / (len(self.choice_list))
return 0.
def cdf(self, x) -> float:
"""
Calculate the choice probability cumulative distribution function value at position `x`.
The index in the list is used to know how the choice is performed.
:param x: value where the cumulative distribution function is evaluated.
:return: value of the cumulative distribution function.
"""
try:
index = self.choice_list.index(x)
except ValueError:
raise ValueError(
"Item not found in list. Make sure the item is in the choice list and a correct method __eq__ is defined for all item in the list.")
except (NotImplementedError, NotImplemented):
raise ValueError("A correct method for __eq__ should be defined for all item in the list.")
except AttributeError:
raise ValueError("choice_list param should be a list.")
else:
return (index + 1) / len(self.choice_list)
def narrow_space_from_best_guess(self, best_guess, kept_space_ratio: float = 0.0) -> HyperparameterDistribution:
"""
Will narrow the space. If the cumulative kept_space_ratio gets to be under or equal to 1/len(choice_list),
then | |
Hebrew calendar."""
return mod(h_year, 7) == 0
# see lines 1605-1617 in calendrica-3.0.cl
def last_day_of_hebrew_month(h_month, h_year):
"""Return last day of month h_month in Hebrew year h_year."""
if ((h_month in [IYYAR, TAMMUZ, ELUL, TEVET, ADARII])
or ((h_month == ADAR) and (not is_hebrew_leap_year(h_year)))
or ((h_month == MARHESHVAN) and (not is_long_marheshvan(h_year)))
or ((h_month == KISLEV) and is_short_kislev(h_year))):
return 29
else:
return 30
# see lines 1619-1634 in calendrica-3.0.cl
def molad(h_month, h_year):
"""Return moment of mean conjunction of h_month in Hebrew h_year."""
y = (h_year + 1) if (h_month < TISHRI) else h_year
months_elapsed = h_month - TISHRI + quotient(235 * y - 234, 19)
return (HEBREW_EPOCH -
876/25920 +
months_elapsed * (29 + hr(12) + 793/25920))
# see lines 1636-1663 in calendrica-3.0.cl
def hebrew_calendar_elapsed_days(h_year):
"""Return number of days elapsed from the (Sunday) noon prior
to the epoch of the Hebrew calendar to the mean
conjunction (molad) of Tishri of Hebrew year h_year,
or one day later."""
months_elapsed = quotient(235 * h_year - 234, 19)
parts_elapsed = 12084 + 13753 * months_elapsed
days = 29 * months_elapsed + quotient(parts_elapsed, 25920)
return (days + 1) if (mod(3 * (days + 1), 7) < 3) else days
# see lines 1665-1670 in calendrica-3.0.cl
def hebrew_new_year(h_year):
"""Return fixed date of Hebrew new year h_year."""
return (HEBREW_EPOCH +
hebrew_calendar_elapsed_days(h_year) +
hebrew_year_length_correction(h_year))
# see lines 1672-1684 in calendrica-3.0.cl
def hebrew_year_length_correction(h_year):
"""Return delays to start of Hebrew year h_year to keep ordinary
year in range 353-356 and leap year in range 383-386."""
# I had a bug... h_year = 1 instead of h_year - 1!!!
ny0 = hebrew_calendar_elapsed_days(h_year - 1)
ny1 = hebrew_calendar_elapsed_days(h_year)
ny2 = hebrew_calendar_elapsed_days(h_year + 1)
if ((ny2 - ny1) == 356):
return 2
elif ((ny1 - ny0) == 382):
return 1
else:
return 0
# see lines 1686-1690 in calendrica-3.0.cl
def days_in_hebrew_year(h_year):
"""Return number of days in Hebrew year h_year."""
return hebrew_new_year(h_year + 1) - hebrew_new_year(h_year)
# see lines 1692-1695 in calendrica-3.0.cl
def is_long_marheshvan(h_year):
"""Return True if Marheshvan is long in Hebrew year h_year."""
return days_in_hebrew_year(h_year) in [355, 385]
# see lines 1697-1700 in calendrica-3.0.cl
def is_short_kislev(h_year):
"""Return True if Kislev is short in Hebrew year h_year."""
return days_in_hebrew_year(h_year) in [353, 383]
# see lines 1702-1721 in calendrica-3.0.cl
def fixed_from_hebrew(h_date):
"""Return fixed date of Hebrew date h_date."""
month = standard_month(h_date)
day = standard_day(h_date)
year = standard_year(h_date)
if (month < TISHRI):
tmp = (summa(lambda m: last_day_of_hebrew_month(m, year),
TISHRI,
lambda m: m <= last_month_of_hebrew_year(year)) +
summa(lambda m: last_day_of_hebrew_month(m, year),
NISAN,
lambda m: m < month))
else:
tmp = summa(lambda m: last_day_of_hebrew_month(m, year),
TISHRI,
lambda m: m < month)
return hebrew_new_year(year) + day - 1 + tmp
# see lines 1723-1751 in calendrica-3.0.cl
def hebrew_from_fixed(date):
"""Return Hebrew (year month day) corresponding to fixed date date.
# The fraction can be approximated by 365.25."""
approx = quotient(date - HEBREW_EPOCH, 35975351/98496) + 1
year = final(approx - 1, lambda y: hebrew_new_year(y) <= date)
start = (TISHRI
if (date < fixed_from_hebrew(hebrew_date(year, NISAN, 1)))
else NISAN)
month = next(start, lambda m: date <= fixed_from_hebrew(
hebrew_date(year, m, last_day_of_hebrew_month(m, year))))
day = date - fixed_from_hebrew(hebrew_date(year, month, 1)) + 1
return hebrew_date(year, month, day)
# see lines 1753-1761 in calendrica-3.0.cl
def yom_kippur(g_year):
"""Return fixed date of Yom Kippur occurring in Gregorian year g_year."""
hebrew_year = g_year - gregorian_year_from_fixed(HEBREW_EPOCH) + 1
return fixed_from_hebrew(hebrew_date(hebrew_year, TISHRI, 10))
# see lines 1763-1770 in calendrica-3.0.cl
def passover(g_year):
"""Return fixed date of Passover occurring in Gregorian year g_year."""
hebrew_year = g_year - gregorian_year_from_fixed(HEBREW_EPOCH)
return fixed_from_hebrew(hebrew_date(hebrew_year, NISAN, 15))
# see lines 1772-1782 in calendrica-3.0.cl
def omer(date):
"""Return the number of elapsed weeks and days in the omer at date date.
Returns BOGUS if that date does not fall during the omer."""
c = date - passover(gregorian_year_from_fixed(date))
return [quotient(c, 7), mod(c, 7)] if (1 <= c <= 49) else BOGUS
# see lines 1784-1793 in calendrica-3.0.cl
def purim(g_year):
"""Return fixed date of Purim occurring in Gregorian year g_year."""
hebrew_year = g_year - gregorian_year_from_fixed(HEBREW_EPOCH)
last_month = last_month_of_hebrew_year(hebrew_year)
return fixed_from_hebrew(hebrew_date(hebrew_year(last_month, 14)))
# see lines 1795-1805 in calendrica-3.0.cl
def ta_anit_esther(g_year):
"""Return fixed date of Ta'anit Esther occurring in Gregorian
year g_year."""
purim_date = purim(g_year)
return ((purim_date - 3)
if (day_of_week_from_fixed(purim_date) == SUNDAY)
else (purim_date - 1))
# see lines 1807-1821 in calendrica-3.0.cl
def tishah_be_av(g_year):
"""Return fixed date of Tishah be_Av occurring in Gregorian year g_year."""
hebrew_year = g_year - gregorian_year_from_fixed(HEBREW_EPOCH)
av9 = fixed_from_hebrew(hebrew_date(hebrew_year, AV, 9))
return (av9 + 1) if (day_of_week_from_fixed(av9) == SATURDAY) else av9
# see lines 1823-1834 in calendrica-3.0.cl
def birkath_ha_hama(g_year):
"""Return the list of fixed date of Birkath ha_Hama occurring in
Gregorian year g_year, if it occurs."""
dates = coptic_in_gregorian(7, 30, g_year)
return (dates
if ((not (dates == [])) and
(mod(standard_year(coptic_from_fixed(dates[0])), 28) == 17))
else [])
# see lines 1836-1840 in calendrica-3.0.cl
def sh_ela(g_year):
"""Return the list of fixed dates of Sh'ela occurring in
Gregorian year g_year."""
return coptic_in_gregorian(3, 26, g_year)
# exercise for the reader from pag 104
def hebrew_in_gregorian(h_month, h_day, g_year):
"""Return list of the fixed dates of Hebrew month, h_month, day, h_day,
that occur in Gregorian year g_year."""
jan1 = gregorian_new_year(g_year)
y = standard_year(hebrew_from_fixed(jan1))
date1 = fixed_from_hebrew(hebrew_date(y, h_month, h_day))
date2 = fixed_from_hebrew(hebrew_date(y + 1, h_month, h_day))
# Hebrew and Gregorian calendar are aligned but certain
# holidays, i.e. Tzom Tevet, can fall on either side of Jan 1.
# So we can have 0, 1 or 2 occurences of that holiday.
dates = [date1, date2]
return list_range(dates, gregorian_year_range(g_year))
# see pag 104
def tzom_tevet(g_year):
"""Return the list of fixed dates for Tzom Tevet (Tevet 10) that
occur in Gregorian year g_year. It can occur 0, 1 or 2 times per
Gregorian year."""
jan1 = gregorian_new_year(g_year)
y = standard_year(hebrew_from_fixed(jan1))
d1 = fixed_from_hebrew(hebrew_date(y, TEVET, 10))
d1 = (d1 + 1) if (day_of_week_from_fixed(d1) == SATURDAY) else d1
d2 = fixed_from_hebrew(hebrew_date(y + 1, TEVET, 10))
d2 = (d2 + 1) if (day_of_week_from_fixed(d2) == SATURDAY) else d2
dates = [d1, d2]
return list_range(dates, gregorian_year_range(g_year))
# this is a simplified version where no check for SATURDAY
# is performed: from hebrew year 1 till 2000000
# there is no TEVET 10 falling on Saturday...
def alt_tzom_tevet(g_year):
"""Return the list of fixed dates for Tzom Tevet (Tevet 10) that
occur in Gregorian year g_year. It can occur 0, 1 or 2 times per
Gregorian year."""
return hebrew_in_gregorian(TEVET, 10, g_year)
# see lines 1842-1859 in calendrica-3.0.cl
def yom_ha_zikkaron(g_year):
"""Return fixed date of Yom ha_Zikkaron occurring in Gregorian
year g_year."""
hebrew_year = g_year - gregorian_year_from_fixed(HEBREW_EPOCH)
iyyar4 = fixed_from_hebrew(hebrew_date(hebrew_year, IYYAR, 4))
if (day_of_week_from_fixed(iyyar4) in [THURSDAY, FRIDAY]):
return kday_before(WEDNESDAY, iyyar4)
elif (SUNDAY == day_of_week_from_fixed(iyyar4)):
return iyyar4 + 1
else:
return iyyar4
# see lines 1861-1879 in calendrica-3.0.cl
def hebrew_birthday(birthdate, h_year):
"""Return fixed date of the anniversary of Hebrew birth date
birthdate occurring in Hebrew h_year."""
birth_day = standard_day(birthdate)
birth_month = standard_month(birthdate)
birth_year = standard_year(birthdate)
if (birth_month == last_month_of_hebrew_year(birth_year)):
return fixed_from_hebrew(hebrew_date(h_year,
last_month_of_hebrew_year(h_year),
birth_day))
else:
return (fixed_from_hebrew(hebrew_date(h_year, birth_month, 1)) +
birth_day - 1)
# see lines 1881-1893 in calendrica-3.0.cl
def hebrew_birthday_in_gregorian(birthdate, g_year):
"""Return the list of the fixed dates of Hebrew birthday
birthday that occur in Gregorian g_year."""
jan1 = gregorian_new_year(g_year)
y = standard_year(hebrew_from_fixed(jan1))
date1 = hebrew_birthday(birthdate, y)
date2 = hebrew_birthday(birthdate, y + 1)
return list_range([date1, date2], gregorian_year_range(g_year))
# see lines 1895-1937 in calendrica-3.0.cl
def yahrzeit(death_date, h_year):
"""Return fixed date of the anniversary of Hebrew death date death_date
occurring in Hebrew h_year."""
death_day = standard_day(death_date)
death_month = standard_month(death_date)
death_year = standard_year(death_date)
if ((death_month == MARHESHVAN) and
(death_day == 30) and
(not is_long_marheshvan(death_year + 1))):
return fixed_from_hebrew(hebrew_date(h_year, KISLEV, 1)) - 1
elif ((death_month == KISLEV) and
(death_day == 30) and
is_short_kislev(death_year + 1)):
return fixed_from_hebrew(hebrew_date(h_year, TEVET, 1)) - 1
elif (death_month == ADARII):
return fixed_from_hebrew(hebrew_date(h_year,
last_month_of_hebrew_year(h_year),
death_day))
elif ((death_day == 30) and
(death_month == ADAR) and
(not is_hebrew_leap_year(h_year))):
return fixed_from_hebrew(hebrew_date(h_year, SHEVAT, 30))
else:
return (fixed_from_hebrew(hebrew_date(h_year, death_month, 1)) +
death_day - 1)
# see lines 1939-1951 in calendrica-3.0.cl
def yahrzeit_in_gregorian(death_date, g_year):
"""Return the list of the fixed dates of death date death_date (yahrzeit)
that occur in Gregorian year g_year."""
jan1 = gregorian_new_year(g_year)
y = standard_year(hebrew_from_fixed(jan1))
date1 = yahrzeit(death_date, y)
date2 = yahrzeit(death_date, y + 1)
return list_range([date1, date2], gregorian_year_range(g_year))
# see lines 1953-1960 in calendrica-3.0.cl
def shift_days(l, cap_Delta):
"""Shift each weekday on list l by cap_Delta days."""
return map(lambda x: day_of_week_from_fixed(x + | |
when calling `releases_availableToTester`") # noqa: E501
# verify the required parameter 'app_name' is set
if ('app_name' not in params or
params['app_name'] is None):
raise ValueError("Missing the required parameter `app_name` when calling `releases_availableToTester`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner_name' in params:
path_params['owner_name'] = params['owner_name'] # noqa: E501
if 'app_name' in params:
path_params['app_name'] = params['app_name'] # noqa: E501
query_params = []
if 'published_only' in params:
query_params.append(('published_only', params['published_only'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'multipart/form-data', 'application/json-patch+json']) # noqa: E501
# Authentication setting
auth_settings = ['APIToken'] # noqa: E501
return self.api_client.call_api(
'/v0.1/apps/{owner_name}/{app_name}/releases/filter_by_tester', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='array', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def releases_list(self, owner_name, app_name, **kwargs): # noqa: E501
"""releases_list # noqa: E501
Return basic information about releases. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.releases_list(owner_name, app_name, async=True)
>>> result = thread.get()
:param async bool
:param string owner_name: The name of the owner (required)
:param string app_name: The name of the application (required)
:param boolean published_only: When *true*, filters out releases that were uploaded but were never distributed. Releases that under deleted distribution groups will not be filtered out.(optional)
:param string scope: When the scope is 'tester', only includes releases that have been distributed to groups that the user belongs to.(optional)
:return: array
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.releases_list_with_http_info(owner_name, app_name, **kwargs) # noqa: E501
else:
(data) = self.releases_list_with_http_info(owner_name, app_name, **kwargs) # noqa: E501
return data
def releases_list_with_http_info(self, owner_name, app_name, **kwargs): # noqa: E501
"""releases_list # noqa: E501
Return basic information about releases. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.releases_list_with_http_info(owner_name, app_name, async=True)
>>> result = thread.get()
:param async bool
:param string owner_name: The name of the owner (required)
:param string app_name: The name of the application (required)
:param boolean published_only: When *true*, filters out releases that were uploaded but were never distributed. Releases that under deleted distribution groups will not be filtered out.(optional)
:param string scope: When the scope is 'tester', only includes releases that have been distributed to groups that the user belongs to.(optional)
:return: array
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner_name', 'app_name', 'published_only', 'scope'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method releases_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner_name' is set
if ('owner_name' not in params or
params['owner_name'] is None):
raise ValueError("Missing the required parameter `owner_name` when calling `releases_list`") # noqa: E501
# verify the required parameter 'app_name' is set
if ('app_name' not in params or
params['app_name'] is None):
raise ValueError("Missing the required parameter `app_name` when calling `releases_list`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner_name' in params:
path_params['owner_name'] = params['owner_name'] # noqa: E501
if 'app_name' in params:
path_params['app_name'] = params['app_name'] # noqa: E501
query_params = []
if 'published_only' in params:
query_params.append(('published_only', params['published_only'])) # noqa: E501
if 'scope' in params:
query_params.append(('scope', params['scope'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'multipart/form-data', 'application/json-patch+json']) # noqa: E501
# Authentication setting
auth_settings = ['APIToken'] # noqa: E501
return self.api_client.call_api(
'/v0.1/apps/{owner_name}/{app_name}/releases', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='array', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def releaseUploads_complete(self, upload_id, owner_name, app_name, body, **kwargs): # noqa: E501
"""releaseUploads_complete # noqa: E501
Commits or aborts the upload process for a release for the specified application # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.releaseUploads_complete(upload_id, owner_name, app_name, body, async=True)
>>> result = thread.get()
:param async bool
:param string upload_id: The ID of the upload (required)
:param string owner_name: The name of the owner (required)
:param string app_name: The name of the application (required)
:param object body: The release information (required)
:return: ReleaseUploadEndResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.releaseUploads_complete_with_http_info(upload_id, owner_name, app_name, body, **kwargs) # noqa: E501
else:
(data) = self.releaseUploads_complete_with_http_info(upload_id, owner_name, app_name, body, **kwargs) # noqa: E501
return data
def releaseUploads_complete_with_http_info(self, upload_id, owner_name, app_name, body, **kwargs): # noqa: E501
"""releaseUploads_complete # noqa: E501
Commits or aborts the upload process for a release for the specified application # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.releaseUploads_complete_with_http_info(upload_id, owner_name, app_name, body, async=True)
>>> result = thread.get()
:param async bool
:param string upload_id: The ID of the upload (required)
:param string owner_name: The name of the owner (required)
:param string app_name: The name of the application (required)
:param object body: The release information (required)
:return: ReleaseUploadEndResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['upload_id', 'owner_name', 'app_name', 'body'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method releaseUploads_complete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'upload_id' is set
if ('upload_id' not in params or
params['upload_id'] is None):
raise ValueError("Missing the required parameter `upload_id` when calling `releaseUploads_complete`") # noqa: E501
# verify the required parameter 'owner_name' is set
if ('owner_name' not in params or
params['owner_name'] is None):
raise ValueError("Missing the required parameter `owner_name` when calling `releaseUploads_complete`") # noqa: E501
# verify the required parameter 'app_name' is set
if ('app_name' not in params or
params['app_name'] is None):
raise ValueError("Missing the required parameter `app_name` when calling `releaseUploads_complete`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `releaseUploads_complete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'upload_id' in params:
path_params['upload_id'] = params['upload_id'] # noqa: E501
if 'owner_name' in params:
path_params['owner_name'] = params['owner_name'] # noqa: E501
if 'app_name' in params:
path_params['app_name'] = params['app_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIToken'] # noqa: E501
return self.api_client.call_api(
'/v0.1/apps/{owner_name}/{app_name}/release_uploads/{upload_id}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReleaseUploadEndResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def releaseUploads_create(self, owner_name, app_name, body, **kwargs): # noqa: E501
"""releaseUploads_create # noqa: E501
Begins the upload process for a new release for the specified application. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.releaseUploads_create(owner_name, app_name, body, async=True)
>>> result = thread.get()
:param async bool
:param string owner_name: The name of the owner (required)
:param string app_name: The name of the application (required)
:param object body: The release information (required)
:return: ReleaseUploadBeginResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.releaseUploads_create_with_http_info(owner_name, app_name, body, **kwargs) # noqa: E501
else:
(data) | |
the following:
'form-group has-error custom-class'
"""
classes = ['form-group']
if self.style == styles.BOOTSTRAP_4 and self.form_type == formtype.HORIZONTAL:
classes.append('row')
if self.error and self.style == styles.BOOTSTRAP_3:
classes.append('has-error')
if self.form_group_css_class:
classes.append(self.form_group_css_class)
return ' '.join(classes)
@property
def input_classes(self):
"""
Full list of classes for the class attribute of the input, returned as a string with
spaces separating each class.
"""
classes = [self.base_input_css_class]
if self.css_class:
classes.append(self.css_class)
if self.style == styles.BOOTSTRAP_4 and self.error:
classes.append('is-invalid')
return ' '.join(classes)
@property
def form_group_style(self):
"""
Style attribute for form group
"""
if self.form.form_type == formtype.INLINE:
return 'vertical-align: top'
return ''
@property
def form_group_attributes(self):
css_classes = self.form_group_classes
style = self.form_group_style
parts = []
if css_classes:
parts.append('class="{}"'.format(css_classes))
if style:
parts.append('style="{}"'.format(style))
return Markup(' '.join(parts))
class FormSection(object):
def __init__(self, name, fields=[]):
self.name = name
self.fields = list(fields)
def render(self):
"""Render the form to HTML"""
return Markup(env.get_template('form_section.html').render(section=self))
class Form(object):
# The name of the hidden input used to detect form submission
SUBMITTED_HIDDEN_INPUT_NAME = '--form-submitted--'
def __init__(self, fields=[], action='', method='POST', css_class=None, submit_text='Submit',
read_form_data=True, form_name='', label_width=3, form_type=None,
id=None, submit_css_class='btn-primary', column_breakpoint='sm',
show_asterisks=False, max_width=None, disable_csrf=False, readonly=False,
style=styles.BOOTSTRAP_3):
"""
:param fields: List of Field objects
:param action: Action field in generated form
:param method: Method field in generated form. Must be 'POST' or 'GET'
:param css_class: CSS class of generated form
:param submit_text: Text to render in submit button. If None, no button is generated and has to be manually
added to the fields array
:param read_form_data: If True (by default) automatically parses the form input from the current request
:param form_name: If you have multiple forms on the same page, each must have a unique form name
:param label_width: The width (using the grid system) of the labels for this form
:param form_type: Form type constant (i.e. VERTICAL or HORIZONTAL)
:param id: The id to insert into the form tag
:param submit_css_class: The class of the automatically added submit button (if applicable)
:param column_breakpoint: Bootstrap column breakpoint where horizontal form degrades into
vertical form. Values: sm, md, lg. Defaults to 'sm'
:param show_asterisks: Should an asterisk be displayed next to required fields?
:param max_width: Maximum width, either an integer value representing the number of pixels
or a string containing a units i.e. '50%' or '240px'
:param disable_csrf: Set to True to remove the CSRF field (if applicable)
:param readonly: If set to True, all fields will be readonly, and it's garunteed that the
fields values will not change when the form is submitted. Allows the
form to be rendered, without accepting user input. If readonly is True,
ready and submitted will always return False
:param style: The "style" of form to render. This determines how the fields are laid out
and some of the CSS classes that are used. Bootstrap 3 or Bootstrap 4 are
the current supported values. Use a constant in styles.py
"""
if method != 'POST' and method != 'GET':
raise ValueError('Invalid method: %s. Valid options are GET and POST' % method)
if style not in styles.ALL_STYLES:
raise ValueError('Invalid style: {}. Only the following values are '
'supported: {}'.format(
', '.join(styles.ALL_STYLES)
))
# List of all fields not in a sections
self.fields = []
# Keep a dictionary with all fields
self.field_dict = OrderedDict()
# Add fields to form
self.add_fields(fields)
if form_type is None:
self.form_type = _default_form_type
else:
self.form_type = form_type
self.method = method
self.action = action
if css_class is None:
if self.form_type == formtype.HORIZONTAL:
self.css_class = 'form-horizontal'
elif self.form_type == formtype.INLINE:
self.css_class = 'form-inline'
else:
self.css_class = ''
else:
self.css_class = css_class
# Record if we have processed the form data yet
self.processed_data = False
self.form_name = form_name
self.label_width = label_width
self.id = id
self.column_breakpoint = column_breakpoint
self.show_asterisks = show_asterisks
self.max_width = max_width
if isinstance(self.max_width, int):
self.max_width = '{}px'.format(self.max_width)
self.disable_csrf = disable_csrf
self.readonly = readonly
self.style = style
# Record whether or not we have any validation errors
self.has_errors = False
# Optional form 'sections' to separate out fields and to allow sections of the form to be rendered independently
self._sections = OrderedDict()
if submit_text:
self.add_submit(submit_text, submit_css_class)
if read_form_data:
self.read_form_data()
def add_submit(self, submit_text, css_class='btn-primary'):
from .basicfields import SubmitButton
self.add_field(SubmitButton('submit', submit_text, label_width=self.label_width,
css_class=css_class))
def allow_duplicate_field(self, field, existing_fields=None):
"""Are we going to allow the duplicate field to be added?"""
if existing_fields is None:
existing_fields = self.field_dict
if not field.allow_duplicates:
return False
return existing_fields[field.name].allow_duplicates
def add_field(self, field):
if field.name in self.field_dict and not self.allow_duplicate_field(field):
raise exceptions.DuplicateField('A field named "{}" is already present in the form'.format(field.name))
self.fields.append(field)
self.field_dict[field.name] = field
field.form = self
def add_fields(self, fields):
# Dict mapping new field names onto the new field
new_fields = {}
for field in fields:
if field.name in self.field_dict and not self.allow_duplicate_field(field):
raise exceptions.DuplicateField('A field named "{}" is already present in the form'.format(field.name))
if field.name in new_fields and not self.allow_duplicate_field(field, new_fields):
raise exceptions.DuplicateField('Multiple fields named "{}" would be added to the form'.format(field.name))
new_fields[field.name] = field
for field in fields:
self.fields.append(field)
self.field_dict[field.name] = field
field.form = self
def add_section(self, name, fields=[]):
# Dict mapping new field names onto the new field
new_fields = {}
for field in fields:
if field.name in self.field_dict and not self.allow_duplicate_field(field):
raise exceptions.DuplicateField('A field named "{}" is already present in the form'.format(field.name))
if field.name in new_fields and not self.allow_duplicate_field(field, new_fields):
raise exceptions.DuplicateField('Multiple fields named "{}" would be added to the form'.format(field.name))
new_fields[field.name] = field
section = FormSection(name, fields)
for field in section.fields:
self.field_dict[field.name] = field
field.form = self
self._sections[name] = section
return section
def get_section(self, name):
return self._sections[name]
def has_section(self, name):
return name in self._sections
@property
def sections(self):
return [self._sections[key] for key in self._sections]
@property
def submitted_hidden_input_name(self):
return '%s%s' % (self.SUBMITTED_HIDDEN_INPUT_NAME, self.form_name)
def render(self):
"""Render the form and all sections to HTML"""
return Markup(env.get_template('form.html').render(form=self,
render_open_tag=True,
render_close_tag=True,
render_before=True,
render_sections=True,
render_after=True,
generate_csrf_token=None if self.disable_csrf else _csrf_generation_function))
def render_before_sections(self):
"""Render the form up to the first section. This will open the form tag but not close it."""
return Markup(env.get_template('form.html').render(form=self,
render_open_tag=True,
render_close_tag=False,
render_before=True,
render_sections=False,
render_after=False,
generate_csrf_token=None if self.action else _csrf_generation_function))
def render_after_sections(self):
"""Render the form up to the first section. This will close the form tag, but not open it."""
return Markup(env.get_template('form.html').render(form=self,
render_open_tag=False,
render_close_tag=True,
render_before=False,
render_sections=False,
render_after=True,
generate_csrf_token=None if self.action else _csrf_generation_function))
def render_sections(self):
"""
Renders all sections in the form, each inside a fieldset with the legend generated from the section name.
No form tag is included: just the inputs are rendered.
"""
return Markup(env.get_template('form.html').render(form=self,
render_open_tag=False,
render_close_tag=False,
render_before=False,
render_sections=True,
render_after=False,
generate_csrf_token=_csrf_generation_function))
def render_start(self):
"""
This will open the form, without rendering any fields at all
"""
return Markup(env.get_template('form.html').render(form=self,
render_open_tag=True,
render_close_tag=False,
render_before=False,
render_sections=False,
render_after=False,
generate_csrf_token=_csrf_generation_function))
def render_end(self):
"""
This will close the form, without rendering any fields at all
"""
return Markup(env.get_template('form.html').render(form=self,
render_open_tag=False,
render_close_tag=True,
render_before=False,
render_sections=False,
render_after=False,
generate_csrf_token=_csrf_generation_function))
def render_section(self, name):
return self.get_section(name).render()
def render_field(self, name):
return Markup(self.get_field(name).render())
def is_section_empty(self, name):
return not self.get_section(name).fields
@property
def all_fields(self):
# Create list of all fields from all sections
if not self.sections:
return self.fields
else:
all_fields = self.fields[:]
for section in self.sections:
for field in section.fields:
all_fields.append(field)
return all_fields
def read_form_data(self):
"""Attempt to read the form data from the request"""
if self.processed_data:
raise exceptions.AlreadyProcessed('The data has already been processed for this form')
if self.readonly:
return
if request.method == self.method:
if self.method == 'POST':
data = request.form
else:
data = request.args
if self.submitted_hidden_input_name in data:
# The form has been submitted
self.processed_data = True
for field in self.all_fields:
# We need to skip readonly fields
if field.readonly:
pass
else:
field.extract_value(data)
# Validate the field
if not field.validate():
log.debug('Validation error in field \'%s\': %s' % (field.name, field.error))
self.has_errors = True
def __getitem__(self, item):
if not self.processed_data:
raise exceptions.FormNotProcessed('The form data has not been processed yet')
if item not in self.field_dict:
raise exceptions.FieldNotFound('The field \'%s\' is not present in the processed form data' % item)
return self.field_dict[item].value
def __contains__(self, item):
if not self.processed_data:
raise exceptions.FormNotProcessed('The form data has not been processed yet')
return item in self.field_dict
def get_if_present(self, | |
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Reduces the number of points in SMLM images to a given maximum count or fraction.
This script takes a folder of SMLM files and, for each file, saves a copy of it up
to the specified maximum number of points in the output folder.
The number of points is given either as an exact value or a percentage of the
initial total points.
Images with less than the specified maximum are simply copied to the output folder.
@author: Dave
"""
import os
import numpy as np
from natsort import natsorted
import gc
import datetime
import json
proc_wd = os.path.dirname(os.path.abspath(__file__))
if os.getcwd() != proc_wd:
os.chdir(proc_wd)
print('Changed working directory to ' + proc_wd)
import FuncEtc as fn_etc
# Screen for identical xy coordinate pairs
# If found only the first pair will be retained and the other removed.
# Identical points give rise to zero-length neighbour distances which can break
# things further down the line.
doFilterDuplicates = True
# Begin processing at this file in the list of files. Should normally be zero to
# start with the first file but you can jump ahead if you wish to resume processing
# an earlier set or if you are batch-processing across machines.
# NB: remember Python's zero-based indexing. To process the first file in the list
# this needs to be set to zero.
starting_index = 0 # normally zero (begin with the first file)
# End processing at this file index in the list of files. Should normally be zero
# to process all files in the list but you can terminate the list early, e.g.
# to only process a subset of the files or if you are batch-processing across
# multiple machines
# NB: remember Python's zero-based indexing.
finishing_index = 0 # normally zero (end with the last file)
# NB starting_index can be greater than finishing_index; the script will just
# process the files in the reverse order. e.g.
# Files = [0,1,2,3,4,5,6,7,8,9]
# starting_index = 0 and finishing_index = 5 >>> Processes Files 0,1,2,3,4
# starting_index = 8 and finishing_index = 3 >>> Processes Files 8,7,6,5,4
if __name__ == '__main__':
# Initial processing settings (ProcSettings) are loaded from a JSON file
good_json = False
default_json_file = ''
while not good_json:
if 'input_PrepJSON' in locals():
default_json_file = input_PrepJSON # recycle the previous input
input_PrepJSON = fn_etc.askforinput(
message = 'Full path to JSON file describing the data',
errormessage = 'The file you provided does not exist or you supplied a path only. Check that your path includes the file you want and try again.',
defaultval = default_json_file,
isvalid = lambda v : os.path.isfile(v))
with open(input_PrepJSON, 'r') as file:
ps = json.loads(file.read())
fn_etc.info_msg('Imported JSON variables:')
print(' │')
print(' ├─InputFileDelimiter:\t' + ps['InputFileDelimiter'])
print(' ├─InputFileExt:\t' + ps['InputFileExt'])
print(' │')
print(' ├─xCol:\t\t' + str(ps['xCol']))
print(' ├─yCol:\t\t' + str(ps['yCol']))
print(' ├─ClusMembershipIDCol:\t' + str(ps['ClusMembershipIDCol']))
print(' ├─ChanIDCol:\t\t' + str(ps['ChanIDCol']))
print(' ├─UIDCol:\t\t' + str(ps['UIDCol']))
print(' │')
print(' ├─AutoAxes:\t\t' + str(ps['AutoAxes']))
if ps['AutoAxes']:
print(' ├─AutoAxesNearest:\t' + str(ps['AutoAxesNearest']))
print(' ├─ImageSize:\t\tTo be determined')
print(' ├─xMin:\t\tTo be determined')
print(' ├─xMax:\t\tTo be determined')
print(' ├─yMin:\t\tTo be determined')
print(' ├─yMax:\t\tTo be determined')
else:
print(' ├─AutoAxesNearest:\tNot applicable')
print(' ├─ImageSize:\t\t' + str(ps['ImageSize']))
print(' ├─xMin:\t\t' + str(ps['xMin']))
print(' ├─xMax:\t\t' + str(ps['xMax']))
print(' ├─yMin:\t\t' + str(ps['yMin']))
print(' ├─yMax:\t\t' + str(ps['yMax']))
print(' │')
print(' ├─ClosestFriend:\t' + str(ps['ClosestFriend']))
print(' └─FurthestFriend:\t' + str(ps['FurthestFriend']))
verify_good_json = fn_etc.askforinput(
message = 'Are these settings correct? Enter \'Y\' to proceed or enter \'N\' to select another JSON file (or the same file, after you have edited it with the correct settings)',
errormessage= 'Type Y or N',
defaultval= 'y',
isvalid = lambda v : v.lower() in ['y','n','yes','no'])
if verify_good_json.lower() in ['y','yes']:
print('JSON file accepted.')
good_json = True
else:
print('JSON file rejected.')
default_input_path = os.path.dirname(input_PrepJSON)
# get the data from the folder
inputpath = fn_etc.askforinput(
message = 'Enter the path of the folder containing ' + ps['InputFileExt'] + ' data tables',
errormessage= 'The folder you provided does not exist or you have provided the path to a file.',
defaultval= default_input_path,
isvalid = lambda v : os.path.isdir(v))
reduction_method = fn_etc.askforinput(
message = 'Do you want to reduce data to [1]-Maximum total points or [2]-Fraction of original points? (Enter 1 or 2)',
errormessage= 'Type the number 1 or 2 and press enter',
defaultval= '2',
isvalid = lambda v : v in ['1','2'])
if reduction_method in ['1']:
max_points_per_set = fn_etc.askforinput(
message = 'Enter the maximum number of points to be retained from each dataset',
errormessage= 'Please enter a non-zero positive integer',
defaultval= '100000',
isvalid = lambda v: v.isdigit() and int(v) >= 1)
max_points_per_set = int(max_points_per_set)
fraction_points_per_set = 0
default_outfolder = os.path.join(inputpath, 'Reduced_' + str(max_points_per_set) + '_Pts_Max')
elif reduction_method in ['2']:
fraction_points_per_set = fn_etc.askforinput(
message = 'Enter the fraction of points to be retained from each dataset',
errormessage= 'Please enter a number which is greater than zero and less than one',
defaultval= '0.5',
isvalid = lambda v: v.replace('.','').isdigit() and float(v) > 0 and float(v) < 1)
fraction_points_per_set = float(fraction_points_per_set)
max_points_per_set = 0
default_outfolder = os.path.join(inputpath, 'Reduced_' + str(fraction_points_per_set) + 'x_Pts')
s1_prep_outputpath = fn_etc.askforinput(
message = 'Enter the name of the output folder',
errormessage= 'The output folder must be named',
defaultval= os.path.abspath(default_outfolder),
isvalid = lambda v : len(v) > 0)
# get a list of input files from the given inputfolder
files = natsorted([i for i in os.listdir(inputpath) if os.path.isfile(os.path.join(inputpath, i)) and ps['InputFileExt'] in i])
total_files = np.shape(files)[0]
# check the starting_index value in case we are restarting a run
if starting_index != 0:
reset_starting_index = fn_etc.askforinput(
message = 'Current Index is set to ' + str(starting_index) + ', i.e. begin with File ' + str(starting_index + 1) + '. Do you want to reset it to zero? (Y or N)',
errormessage= 'Type Y or N',
defaultval= 'y',
isvalid = lambda v : v.lower() in ['y','n','yes','no'])
if reset_starting_index.lower() in ['y','yes']:
starting_index = 0
print('Current index has been reset to zero. Processing will begin from the first file in the list.')
else:
print('Keeping the current index. Processing will begin with File ' + str(starting_index + 1) + ' in the list.')
current_index = starting_index
# check the finishing_index value in case we are restarting a run
if finishing_index != 0:
reset_finishing_index = fn_etc.askforinput(
message = 'Current Index is set to ' + str(finishing_index) + ', i.e. end processing after File ' + str(finishing_index - 1) + ' is done. Do you want to reset it and process all files? (Y or N)',
errormessage= 'Type Y or N',
defaultval= 'y',
isvalid = lambda v : v.lower() in ['y','n','yes','no'])
if reset_finishing_index.lower() in ['y','yes']:
finishing_index = total_files
print('Finishing Index has been reset and all files in the folder will be processed.')
else:
print('Keeping the current index. Processing will end once File ' + str(finishing_index) + ' is done.')
else:
finishing_index = total_files
proceed_with_processing = fn_etc.askforinput(
message = 'When you are ready to proceed type P and Enter (or X to cancel everything and exit)',
errormessage= 'Type P or X',
defaultval= 'P',
isvalid = lambda v : v.lower() in ['p','x'])
if proceed_with_processing.lower() in ['p']:
print('Rightyo, off we go...')
else:
print("That's ok. Maybe next time?")
exit()
#make the folder for the output data
if not os.path.exists(s1_prep_outputpath):
os.makedirs(s1_prep_outputpath)
# process all the files
for fileIdx in range(starting_index, finishing_index):
current_file = files[fileIdx]
output_prefix = os.path.splitext(current_file)[0]
fn_etc.info_msg(str(fileIdx + 1) + ' of ' + str(total_files) + '\t' + current_file)
print('Loading data...', end='', flush=True)
datatable = np.genfromtxt(os.path.join(inputpath, current_file),
delimiter=ps['InputFileDelimiter'],
skip_header=1) # names=True
# will be exporting as tab-delimited from here, so swap out original delimiters in the header for tabs
with open(os.path.join(inputpath, current_file), 'r') as f:
ps['TableHeaders'] = f.readline().strip()
TotalPointsThisImage = datatable.shape[0]
print('Done (' + str(TotalPointsThisImage) + ' points)')
#duplicate xy screening
if doFilterDuplicates:
data_xy = np.concatenate((datatable[:, ps['xCol'], None], datatable[:, ps['yCol'], None]), axis=1)
_, uniq_idx = np.unique(data_xy, axis=0, return_index=True)
if uniq_idx.shape[0] < datatable.shape[0]:
uniq_idx = np.sort(uniq_idx)
datatable = datatable[uniq_idx,:]
oldTotalPoints = TotalPointsThisImage
TotalPointsThisImage = datatable.shape[0]
DuplicatePointsRemoved = oldTotalPoints - TotalPointsThisImage
else:
DuplicatePointsRemoved | |
is None:
vmin = min_toplot
extend = _extend_mapping[vmin <= min_toplot, vmax >= max_toplot]
if pop_ids is not None:
data_pop_ids = model_pop_ids = resid_pop_ids = pop_ids
if len(pop_ids) != 2:
raise ValueError('pop_ids must be of length 2.')
else:
data_pop_ids = masked_data.pop_ids
model_pop_ids = masked_model.pop_ids
if masked_model.pop_ids is None:
model_pop_ids = data_pop_ids
if model_pop_ids == data_pop_ids:
resid_pop_ids = model_pop_ids
else:
resid_pop_ids = None
ax = pylab.subplot(2, 2, 1)
plot_single_2d_sfs(masked_data, vmin=vmin, vmax=vmax,
pop_ids=data_pop_ids, colorbar=False)
ax.set_title('data')
ax2 = pylab.subplot(2, 2, 2, sharex=ax, sharey=ax)
plot_single_2d_sfs(masked_model, vmin=vmin, vmax=vmax,
pop_ids=model_pop_ids, extend=extend)
ax2.set_title('model')
if residual == 'Anscombe':
resid = Inference.Anscombe_Poisson_residual(masked_model, masked_data,
mask=vmin)
elif residual == 'linear':
resid = Inference.linear_Poisson_residual(masked_model, masked_data,
mask=vmin)
else:
raise ValueError("Unknown class of residual '%s'." % residual)
if resid_range is None:
resid_range = max((abs(resid.max()), abs(resid.min())))
resid_extend = _extend_mapping[-resid_range <= resid.min(),
resid_range >= resid.max()]
ax3 = pylab.subplot(2, 2, 3, sharex=ax, sharey=ax)
plot_2d_resid(resid, resid_range, pop_ids=resid_pop_ids,
extend=resid_extend)
ax3.set_title('residuals')
ax = pylab.subplot(2,2,4)
flatresid = numpy.compress(numpy.logical_not(resid.mask.ravel()),
resid.ravel())
ax.hist(flatresid, bins=20, density=True)
ax.set_title('residuals')
ax.set_yticks([])
if show:
pylab.show()
def plot_3d_comp_multinom(model, data, vmin=None, vmax=None,
resid_range=None, fig_num=None,
pop_ids=None, residual='Anscombe', adjust=True):
"""
Multinomial comparison between 3d model and data.
model: 3-dimensional model SFS
data: 3-dimensional data SFS
vmin, vmax: Minimum and maximum values plotted for sfs are vmin and
vmax respectively.
resid_range: Residual plot saturates at +- resid_range.
fig_num: Clear and use figure fig_num for display. If None, an new figure
window is created.
pop_ids: If not None, override pop_ids stored in Spectrum.
residual: 'Anscombe' for Anscombe residuals, which are more normally
distributed for Poisson sampling. 'linear' for the linear
residuals, which can be less biased.
adjust: Should method use automatic 'subplots_adjust'? For advanced
manipulation of plots, it may be useful to make this False.
This comparison is multinomial in that it rescales the model to optimally
fit the data.
"""
model = Inference.optimally_scaled_sfs(model, data)
plot_3d_comp_Poisson(model, data, vmin=vmin, vmax=vmax,
resid_range=resid_range, fig_num=fig_num,
pop_ids=pop_ids, residual=residual,
adjust=adjust)
def plot_3d_comp_Poisson(model, data, vmin=None, vmax=None,
resid_range=None, fig_num=None, pop_ids=None,
residual='Anscombe', adjust=True, show=True):
"""
Poisson comparison between 3d model and data.
model: 3-dimensional model SFS
data: 3-dimensional data SFS
vmin, vmax: Minimum and maximum values plotted for sfs are vmin and
vmax respectively.
resid_range: Residual plot saturates at +- resid_range.
fig_num: Clear and use figure fig_num for display. If None, an new figure
window is created.
pop_ids: If not None, override pop_ids stored in Spectrum.
residual: 'Anscombe' for Anscombe residuals, which are more normally
distributed for Poisson sampling. 'linear' for the linear
residuals, which can be less biased.
adjust: Should method use automatic 'subplots_adjust'? For advanced
manipulation of plots, it may be useful to make this False.
show: If True, execute pylab.show command to make sure plot displays.
"""
if data.folded and not model.folded:
model = model.fold()
# errors if there are zero entries in the data or model, mask them:
model.mask[model == 0] = True
data.mask[data == 0] = True
masked_model, masked_data = Numerics.intersect_masks(model, data)
if fig_num is None:
f = pylab.gcf()
else:
f = pylab.figure(fig_num, figsize=(8, 10))
pylab.clf()
if adjust:
pylab.subplots_adjust(bottom=0.07, left=0.07, top=0.95, right=0.95)
modelmax = max(masked_model.sum(axis=sax).max() for sax in range(3))
datamax = max(masked_data.sum(axis=sax).max() for sax in range(3))
modelmin = min(masked_model.sum(axis=sax).min() for sax in range(3))
datamin = min(masked_data.sum(axis=sax).min() for sax in range(3))
max_toplot = max(modelmax, datamax)
min_toplot = min(modelmin, datamin)
if vmax is None:
vmax = max_toplot
if vmin is None:
vmin = min_toplot
extend = _extend_mapping[vmin <= min_toplot, vmax >= max_toplot]
# Calculate the residuals
if residual == 'Anscombe':
resids = [Inference.\
Anscombe_Poisson_residual(masked_model.sum(axis=2 - sax),
masked_data.sum(axis=2 - sax),
mask=vmin) for sax in range(3)]
elif residual == 'linear':
resids =[Inference.\
linear_Poisson_residual(masked_model.sum(axis=2 - sax),
masked_data.sum(axis=2 - sax),
mask=vmin) for sax in range(3)]
else:
raise ValueError("Unknown class of residual '%s'." % residual)
min_resid = min([r.min() for r in resids])
max_resid = max([r.max() for r in resids])
if resid_range is None:
resid_range = max((abs(max_resid), abs(min_resid)))
resid_extend = _extend_mapping[-resid_range <= min_resid,
resid_range >= max_resid]
if pop_ids is not None:
if len(pop_ids) != 3:
raise ValueError('pop_ids must be of length 3.')
data_ids = model_ids = resid_ids = pop_ids
else:
data_ids = masked_data.pop_ids
model_ids = masked_model.pop_ids
if model_ids is None:
model_ids = data_ids
if model_ids == data_ids:
resid_ids = model_ids
else:
resid_ids = None
for sax in range(3):
marg_data = masked_data.sum(axis=2 - sax)
marg_model = masked_model.sum(axis=2 - sax)
curr_ids = []
for ids in [data_ids, model_ids, resid_ids]:
if ids is None:
ids = ['pop0', 'pop1', 'pop2']
if ids is not None:
ids = list(ids)
del ids[2 - sax]
curr_ids.append(ids)
ax = pylab.subplot(4, 3, sax + 1)
plot_colorbar = (sax == 2)
plot_single_2d_sfs(marg_data, vmin=vmin, vmax=vmax, pop_ids=curr_ids[0],
extend=extend, colorbar=plot_colorbar)
pylab.subplot(4, 3, sax + 4, sharex=ax, sharey=ax)
plot_single_2d_sfs(marg_model, vmin=vmin, vmax=vmax,
pop_ids=curr_ids[1], extend=extend, colorbar=False)
resid = resids[sax]
pylab.subplot(4, 3, sax + 7, sharex=ax, sharey=ax)
plot_2d_resid(resid, resid_range, pop_ids=curr_ids[2],
extend=resid_extend, colorbar=plot_colorbar)
ax = pylab.subplot(4, 3, sax + 10)
flatresid = numpy.compress(numpy.logical_not(resid.mask.ravel()),
resid.ravel())
ax.hist(flatresid, bins=20, density=True)
ax.set_yticks([])
if show:
pylab.show()
def plot_3d_spectrum(fs, fignum=None, vmin=None, vmax=None, pop_ids=None,
show=True):
"""
Logarithmic heatmap of single 3d FS.
Note that this method is slow, because it relies on matplotlib's software
rendering. For faster and better looking plots, use plot_3d_spectrum_mayavi.
fs: FS to plot
vmin: Values in fs below vmin are masked in plot.
vmax: Values in fs above vmax saturate the color spectrum.
fignum: Figure number to plot into. If None, a new figure will be created.
pop_ids: If not None, override pop_ids stored in Spectrum.
show: If True, execute pylab.show command to make sure plot displays.
"""
import mpl_toolkits.mplot3d as mplot3d
fig = pylab.figure(fignum)
ax = mplot3d.Axes3D(fig)
if vmin is None:
vmin = fs.min()
if vmax is None:
vmax = fs.max()
# Which entries should I plot?
toplot = numpy.logical_not(fs.mask)
toplot = numpy.logical_and(toplot, fs.data >= vmin)
# Figure out the color mapping.
normalized = (numpy.log(fs)-numpy.log(vmin))\
/(numpy.log(vmax)-numpy.log(vmin))
normalized = numpy.minimum(normalized, 1)
colors = pylab.cm.hsv(normalized)
# We draw by calculating which faces are visible and including each as a
# polygon.
polys, polycolors = [],[]
for ii in range(fs.shape[0]):
for jj in range(fs.shape[1]):
for kk in range(fs.shape[2]):
if not toplot[ii, jj, kk]:
continue
if kk < fs.shape[2] - 1 and toplot[ii, jj, kk + 1]:
pass
else:
polys.append([[ii - 0.5, jj + 0.5, kk + 0.5], [ii + 0.5, jj + 0.5, kk + 0.5],
[ii + 0.5, jj - 0.5, kk + 0.5], [ii - 0.5, jj - 0.5, kk + 0.5]]
)
polycolors.append(colors[ii, jj, kk])
if kk > 0 and toplot[ii, jj, kk - 1]:
pass
else:
polys.append([[ii - 0.5, jj + 0.5, kk - 0.5], [ii + 0.5, jj + 0.5, kk - 0.5],
[ii + 0.5, jj - 0.5, kk - 0.5], [ii - 0.5, jj - 0.5, kk - 0.5]]
)
polycolors.append(colors[ii, jj, kk])
if jj < fs.shape[1] - 1 and toplot[ii, jj + 1, kk]:
pass
else:
polys.append([[ii - 0.5, jj + 0.5, kk + 0.5], [ii + 0.5, jj + 0.5, kk + 0.5],
[ii + 0.5, jj + 0.5, kk - 0.5], [ii - 0.5, jj + 0.5, kk - 0.5]]
)
polycolors.append(colors[ii, jj, kk])
if jj > 0 and toplot[ii, jj - 1, kk]:
pass
else:
polys.append([[ii - 0.5, jj - 0.5, kk + 0.5], [ii + 0.5, jj - 0.5, kk + 0.5],
[ii + 0.5, jj - 0.5, kk - 0.5], [ii - 0.5, jj - 0.5, kk - 0.5]]
)
polycolors.append(colors[ii, jj, kk])
if ii < fs.shape[0] - 1 and toplot[ii + 1, jj, kk]:
pass
else:
polys.append([[ii + 0.5, jj - 0.5, kk + 0.5], [ii + 0.5, jj + 0.5, kk + 0.5],
[ii + 0.5, jj + 0.5, kk - 0.5], [ii + 0.5, jj - 0.5, kk - 0.5]]
)
polycolors.append(colors[ii, jj, kk])
if ii > 0 and toplot[ii - 1, jj, kk]:
pass
else:
polys.append([[ii - 0.5, jj - 0.5, kk + 0.5], [ii - 0.5, jj + 0.5, kk + 0.5],
[ii - 0.5, jj + 0.5, kk - 0.5], [ii - | |
hasattr(weekly_schedule, "warnings"):
print_messages("Warning", weekly_schedule.warnings)
else:
print("Failed to create schedule {}".format(schedule_name))
def create_monthly_schedule(server, args):
monthly_interval = TSC.MonthlyInterval(start_time=time(args.start_hour, args.start_minute),
interval_value=args.monthly_interval)
schedule_name = args.create_schedule
monthly_schedule = TSC.ScheduleItem(schedule_name, 75, TSC.ScheduleItem.Type.DataAcceleration,
TSC.ScheduleItem.ExecutionOrder.Parallel, monthly_interval)
monthly_schedule = server.schedules.create(monthly_schedule)
if monthly_schedule is not None:
print("Monthly schedule \"{}\" created to run on {}th at {:02d}:{:02d}.".format(
schedule_name, args.monthly_interval, int(args.start_hour), int(args.start_minute)))
if hasattr(monthly_schedule, "warnings"):
print_messages("Warning", monthly_schedule.warnings)
else:
print("Failed to create schedule {}".format(schedule_name))
def print_messages(header, messages):
output = ""
if messages is not None and len(messages) > 0:
starter = "* " if len(messages) > 1 else " "
end = "\n" if len(messages) > 1 else ""
if header is not None and len(header) > 0:
output = header + (":\n" if len(messages) > 1 else ":")
for message in messages:
output = output + starter + message + end
print(output)
def create_materialized_view_schedule(server, args):
# verifies start and end times
if not verify_time_arguments(args):
return False
try:
if args.hourly_interval is not None:
create_hourly_schedule(server, args)
elif args.daily_interval is not None:
create_daily_schedule(server, args)
elif args.weekly_interval is not None:
create_weekly_schedule(server, args)
else:
create_monthly_schedule(server, args)
except ServerResponseError as error:
print("{}: {}".format(error.summary, error.detail))
return False
return True
def find_project_path(project, all_projects, path):
# project stores the id of it's parent
# this method is to run recursively to find the path from root project to given project
path = project.name if len(path) == 0 else project.name + '/' + path
if project.parent_id is None:
return path
else:
return find_project_path(all_projects[project.parent_id], all_projects, path)
def get_project_id_to_project_path_map(server, projects=None):
# most likely user won't have too many projects so we store them in a dict to search
all_projects = {project.id: project for project in TSC.Pager(server.projects)}
if projects is None:
projects = all_projects.values()
result = dict()
for project in projects:
result[project.id] = find_project_path(project, all_projects, "")
return result
def get_project_path_to_project_map(server, projects):
# most likely user won't have too many projects so we store them in a dict to search
all_projects = {project.id: project for project in TSC.Pager(server.projects)}
result = dict()
for project in projects:
result[find_project_path(project, all_projects, "")] = project
return result
def print_paths(paths):
for path in paths.keys():
print(path)
def get_and_print_acceleration_enabled_sites(server):
enabled_sites = set()
# For server admin, this will prints all the materialized views enabled sites
# For other users, this only prints the status of the site they belong to
# only server admins can get all the sites in the server
# other users can only get the site they are in
for site in TSC.Pager(server.sites):
if site.data_acceleration_mode != "disable":
enabled_sites.add(site)
return enabled_sites
def print_acceleration_enabled_workbooks(server, args, site):
workbook_id_to_workbook_from_args = find_workbook_id_to_workbook(server, args)
# Individual workbooks can be enabled only when the sites they belong to are enabled too
workbooks = list()
project_id_to_project_path = dict()
project_id_to_project_path.update(get_project_id_to_project_path_map(server))
workbooks.extend(list(TSC.Pager(server.workbooks)))
rows = list()
enabled_workbooks = list()
local_tz = tz.tzlocal()
for workbook in workbooks:
if (workbook_id_to_workbook_from_args is None or workbook.id in workbook_id_to_workbook_from_args) and \
workbook.data_acceleration_config['acceleration_enabled']:
project_path = project_id_to_project_path[workbook.project_id]
enabled_workbooks.append((workbook, project_path))
last_updated_at = workbook.data_acceleration_config['last_updated_at'].astimezone(local_tz) \
if workbook.data_acceleration_config['last_updated_at'] is not None else None
last_running_job = find_last_running_job(server, workbook)
last_updated_at = last_running_job.ended_at \
if last_updated_at is None and last_running_job is not None else last_updated_at
last_running_time = (last_running_job.ended_at - last_running_job.started_at).total_seconds() \
if last_running_job is not None and last_running_job.started_at is not None \
and last_running_job.ended_at is not None else None
rows.append([
normalize_site_content_url(site), '{}/{}'.format(project_path, workbook.name),
workbook.data_acceleration_config['acceleration_status'],
last_updated_at,
last_running_time
])
rows.sort(key=lambda x: x[2])
header = "\nWorkbook Acceleration is enabled for the following workbooks"
columns = ["Site", "Project/Workbook", "Status",
"Last Updated", "Task Running Time (Secs)"]
print_table(rows, columns, header,
["Task Running Time (Secs)"])
return enabled_workbooks
def show_materialized_views_status(server, args, site_content_url):
enabled_workbooks = print_acceleration_enabled_workbooks(server, args, site_content_url)
workbook_id_to_workbook = dict()
for workbook, path in enabled_workbooks:
workbook_id_to_workbook[workbook.id] = workbook, path
show_materialized_views_tasks(server, args, workbook_id_to_workbook)
def update_project_by_path(server, args, data_acceleration_config, site_content_url):
if args.project_path is None:
print("Use --project_path <project path> to specify the path of the project")
return False
project_name = args.project_path.split('/')[-1]
if not assert_site_enabled_for_materialized_views(server, site_content_url):
return False
projects = [project for project in TSC.Pager(server.projects) if project.name == project_name]
if not assert_project_valid(args.project_path, projects):
return False
possible_paths = get_project_path_to_project_map(server, projects)
update_project(possible_paths[args.project_path], server, data_acceleration_config)
return True
def update_project(project, server, data_acceleration_config):
all_projects = list(TSC.Pager(server.projects))
project_ids = find_project_ids_to_update(all_projects, project)
for workbook in TSC.Pager(server.workbooks):
if workbook.project_id in project_ids:
workbook.data_acceleration_config = data_acceleration_config
update_workbook_internal(server, workbook)
print("Updated Workbook Acceleration settings for project: {}".format(project.name))
print('\n')
def find_project_ids_to_update(all_projects, project):
projects_to_update = []
find_projects_to_update(project, all_projects, projects_to_update)
return set([project_to_update.id for project_to_update in projects_to_update])
def parse_workbook_path(file_path):
# parse the list of project path of workbooks
workbook_paths = sanitize_workbook_list(file_path, "path")
workbook_path_mapping = defaultdict(list)
for workbook_path in workbook_paths:
workbook_project = workbook_path.rstrip().split('/')
workbook_path_mapping[workbook_project[-1]].append('/'.join(workbook_project[:-1]))
return workbook_path_mapping
def update_workbook_internal(server, workbook):
# without removing the workbook name, the rest api server code will
# think the user would change the name of the workbook
try:
workbook_name = workbook.name
workbook.name = None
server.workbooks.update(workbook)
finally:
workbook.name = workbook_name
def update_workbook_by_path(workbook_path, server, data_acceleration_config, workbook_id_to_schedules):
workbook_id_to_workbook = get_workbook_from_path(server, workbook_path)
rows = list()
for workbook, path in workbook_id_to_workbook.values():
try:
workbook.data_acceleration_config = data_acceleration_config
if confirm_workbook_update(workbook, path, workbook_id_to_schedules, data_acceleration_config, None) \
in [UserResponse.YES, UserResponse.YES_FOR_ALL]:
update_workbook_internal(server, workbook)
rows.append(["{}/{}".format(path, workbook.name)])
except ServerResponseError as error:
print("Unable to {} {}/{}. {}".format(
"enable" if data_acceleration_config["acceleration_enabled"] else "disable",
path, workbook.name, error.detail
))
return False
enabled_or_disabled = data_acceleration_config["acceleration_enabled"]
print_table(rows, ["Project/Workbook"], "Workbooks {}".format(
"Enabled" if enabled_or_disabled else "Disabled"))
def get_all_materialized_views_tasks(server):
tasks = list(TSC.Pager(lambda options: server.tasks.get(task_type=TSC.TaskItem.Type.DataAcceleration)))
workbook_id_to_schedules = dict()
for task in tasks:
if task.target.id not in workbook_id_to_schedules:
workbook_id_to_schedules[task.target.id] = list()
workbook_id_to_schedules[task.target.id].append(task.schedule_item.name)
return workbook_id_to_schedules
def update_workbook(server, args, data_acceleration_config, site_content_url):
workbook_path = find_workbook_path(args)
if args.path_list is None and workbook_path is None:
print("Use '--path-list <filename>' or --workbook-path <workbook-path> "
"to specify the path of workbooks")
print('\n')
return False
if not assert_site_enabled_for_materialized_views(server, site_content_url):
return False
workbook_id_to_schedules = None
if not data_acceleration_config["acceleration_enabled"]:
workbook_id_to_schedules = get_all_materialized_views_tasks(server)
if args.path_list is not None:
workbook_path_mapping = parse_workbook_path(args.path_list)
all_projects = {project.id: project for project in TSC.Pager(server.projects)}
update_workbooks_by_paths(all_projects, data_acceleration_config,
server, workbook_path_mapping, workbook_id_to_schedules)
elif workbook_path is not None:
update_workbook_by_path(workbook_path, server, data_acceleration_config, workbook_id_to_schedules)
return True
def print_table(rows, columns, header, right_aligned_columns=None):
if rows is None or len(rows) == 0:
print("{}: None".format(header))
else:
alignments = list()
for column in columns:
alignments.append("right" if right_aligned_columns is not None and \
column in right_aligned_columns else "left")
pages = [rows[i * PAGE_SIZE: (i + 1) * PAGE_SIZE] for i in range((len(rows) + PAGE_SIZE - 1) // PAGE_SIZE)]
first_page_printed = False
for page in pages:
if first_page_printed:
raw_input("Press Enter to Continue...")
table = tabulate(page, columns, tablefmt="pretty", colalign=alignments)
print("{} {}".format(header, "(Cont.)" if first_page_printed else ""))
print(table)
if not first_page_printed:
first_page_printed = True
def confirm_workbook_update(workbook, path, workbook_id_to_schedules,
data_acceleration_config, previous_confirmation):
if previous_confirmation in [UserResponse.YES_FOR_ALL, UserResponse.NO_FOR_ALL]:
return previous_confirmation
if data_acceleration_config["acceleration_enabled"]:
return UserResponse.YES_FOR_ALL
if workbook.id not in workbook_id_to_schedules:
return UserResponse.YES
return confirm("{}/{} is on schedules {}. Disabling it will "
"remove it from the schedules. Would you confirm? \n"
"Press Y for yes, N for No, A for yes_for_all, Q for no_for_all: ".
format(path, workbook.name, workbook_id_to_schedules[workbook.id]),
[UserResponse.YES, UserResponse.NO, UserResponse.YES_FOR_ALL, UserResponse.NO_FOR_ALL])
def update_workbooks_by_paths(all_projects, data_acceleration_config, server,
workbook_path_mapping, workbook_id_to_schedules):
rows = list()
update_confirmation = None
for workbook_name, workbook_paths in workbook_path_mapping.items():
req_option = TSC.RequestOptions()
req_option.filter.add(TSC.Filter(TSC.RequestOptions.Field.Name,
TSC.RequestOptions.Operator.Equals,
workbook_name))
workbooks = list(TSC.Pager(server.workbooks, req_option))
all_paths = set(workbook_paths[:])
for workbook in workbooks:
path = find_project_path(all_projects[workbook.project_id], all_projects, "")
if path in workbook_paths:
all_paths.remove(path)
workbook.data_acceleration_config = data_acceleration_config
update_confirmation = confirm_workbook_update(workbook, path, workbook_id_to_schedules,
data_acceleration_config, update_confirmation)
if update_confirmation in [UserResponse.YES_FOR_ALL, UserResponse.YES]:
try:
update_workbook_internal(server, workbook)
rows.append(["{}/{}".format(path, workbook.name)])
except ServerResponseError as error:
print("Unable to {} {}/{} due to {}".format(
"enable" if data_acceleration_config["acceleration_enabled"] else "disable",
path, workbook.name, error.detail
))
for path in all_paths:
print("Cannot find workbook path: {}, each line should only contain one workbook path"
.format(path + '/' + workbook_name))
enabled_or_disabled = "Enabled" if data_acceleration_config["acceleration_enabled"] else "Disabled"
print_table(rows, ["Project/Workbook"], "Workbooks {}".format(enabled_or_disabled))
def update_site(server, args, site_content_url):
if not assert_site_options_valid(args):
return False
site_to_update = server.sites.get_by_content_url(site_content_url)
site_to_update.data_acceleration_mode = "enable_selective" if args.enable is not None else "disable"
server.sites.update(site_to_update)
print("Updated Workbook Acceleration settings for site: {}\n".format(site_to_update.name))
return True
def create_data_acceleration_config(args):
data_acceleration_config = dict()
data_acceleration_config['acceleration_enabled'] = args.disable is None and args.enable is not None
data_acceleration_config['accelerate_now'] = True if args.accelerate_now else False
data_acceleration_config['last_updated_at'] = None
data_acceleration_config['acceleration_status'] = None
return data_acceleration_config
def assert_site_options_valid(args):
if args.accelerate_now:
print('"--accelerate-now" only applies to workbook/project type')
return False
return True
def assert_site_enabled_for_materialized_views(server, site_content_url):
parent_site = server.sites.get_by_content_url(site_content_url)
if parent_site.data_acceleration_mode == "disable":
print('Cannot update workbook/project because site is disabled for Workbook Acceleration')
return False
return True
def assert_project_valid(project_name, projects):
if len(projects) == 0:
print("Cannot find project: {}".format(project_name))
return False
return True
def | |
'type': 'ViInt32',
'documentation': {
'description': 'Specifies the number of Script triggers supported by the device. Use this attribute when NIFGEN_ATTR_OUTPUT_MODE is set to NIFGEN_VAL_OUTPUT_SCRIPT.',
},
},
1150273: {
'access': 'read only',
'channel_based': 'False',
'lv_property': 'Instrument:Data Marker Events Count',
'name': 'DATA_MARKER_EVENTS_COUNT',
'resettable': 'No',
'type': 'ViInt32',
'documentation': {
'description': 'Returns the number of Data Marker Events supported by the device.',
},
},
1150274: {
'access': 'read-write',
'channel_based': 'False',
'lv_property': 'Arbitrary Waveform:Data Transfer:Direct DMA:Window Address',
'name': 'DIRECT_DMA_WINDOW_ADDRESS',
'resettable': 'Yes',
'type': 'ViInt32',
'documentation': {
'description': 'Specifies the window address (beginning of window) of the waveform data source. This window address is specified by your Direct DMA-compatible data source.',
},
},
1150280: {
'access': 'read-write',
'channel_based': 'False',
'enum': 'StartTriggerType',
'lv_property': 'Triggers:Start:Trigger Type',
'name': 'START_TRIGGER_TYPE',
'resettable': 'Yes',
'type': 'ViInt32',
'documentation': {
'description': 'Specifies whether you want the Start trigger to be a Digital Edge, or Software trigger. You can also choose None as the value for this attribute.',
},
},
1150281: {
'access': 'read-write',
'channel_based': 'False',
'lv_property': 'Triggers:Start:Digital Edge:Source',
'name': 'DIGITAL_EDGE_START_TRIGGER_SOURCE',
'resettable': 'Yes',
'type': 'ViString',
'documentation': {
'description': 'Specifies the source terminal for the Start trigger. This attribute is used only when NIFGEN_ATTR_START_TRIGGER_TYPE is set to Digital Edge.',
},
},
1150282: {
'access': 'read-write',
'channel_based': 'False',
'enum': 'StartTriggerDigitalEdgeEdge',
'lv_property': 'Triggers:Start:Digital Edge:Edge',
'name': 'DIGITAL_EDGE_START_TRIGGER_EDGE',
'resettable': 'Yes',
'type': 'ViInt32',
'documentation': {
'description': 'Specifies the active edge for the Start trigger. This attribute is used only when NIFGEN_ATTR_START_TRIGGER_TYPE is set to Digital Edge.',
},
},
1150283: {
'access': 'read-write',
'channel_based': 'False',
'lv_property': 'Triggers:Start:Output Terminal',
'name': 'EXPORTED_START_TRIGGER_OUTPUT_TERMINAL',
'resettable': 'Yes',
'type': 'ViString',
'documentation': {
'description': 'Specifies the destination terminal for exporting the Start trigger.',
},
},
1150290: {
'access': 'read-write',
'channel_based': 'False',
'enum': 'ScriptTriggerType',
'lv_property': 'Triggers:Script:Trigger Type',
'name': 'SCRIPT_TRIGGER_TYPE',
'resettable': 'Yes',
'type': 'ViInt32',
'documentation': {
'description': 'Specifies the Script trigger type. Depending upon the value of this attribute, additional attributes may need to be configured to fully configure the trigger.',
},
},
1150291: {
'access': 'read-write',
'channel_based': 'False',
'lv_property': 'Triggers:Script:Digital Edge:Source',
'name': 'DIGITAL_EDGE_SCRIPT_TRIGGER_SOURCE',
'resettable': 'Yes',
'type': 'ViString',
'documentation': {
'description': 'Specifies the source terminal for the Script trigger. This attribute is used when NIFGEN_ATTR_SCRIPT_TRIGGER_TYPE is set to Digital Edge.',
},
},
1150292: {
'access': 'read-write',
'channel_based': 'False',
'enum': 'ScriptTriggerDigitalEdgeEdge',
'lv_property': 'Triggers:Script:Digital Edge:Edge',
'name': 'DIGITAL_EDGE_SCRIPT_TRIGGER_EDGE',
'resettable': 'Yes',
'type': 'ViInt32',
'documentation': {
'description': 'Specifies the active edge for the Script trigger. This attribute is used when NIFGEN_ATTR_SCRIPT_TRIGGER_TYPE is set to Digital Edge.',
},
},
1150293: {
'access': 'read-write',
'channel_based': 'False',
'lv_property': 'Triggers:Script:Digital Level:Source',
'name': 'DIGITAL_LEVEL_SCRIPT_TRIGGER_SOURCE',
'resettable': 'Yes',
'type': 'ViString',
'documentation': {
'description': 'Specifies the source terminal for the Script trigger. This attribute is used when NIFGEN_ATTR_SCRIPT_TRIGGER_TYPE is set to Digital Level.',
},
},
1150294: {
'access': 'read-write',
'channel_based': 'False',
'enum': 'ScriptTriggerDigitalLevelActiveLevel',
'lv_property': 'Triggers:Script:Digital Level:Active Level',
'name': 'DIGITAL_LEVEL_SCRIPT_TRIGGER_ACTIVE_LEVEL',
'resettable': 'Yes',
'type': 'ViInt32',
'documentation': {
'description': 'Specifies the active level for the Script trigger. This attribute is used when NIFGEN_ATTR_SCRIPT_TRIGGER_TYPE is set to Digital Level.',
},
},
1150295: {
'access': 'read-write',
'channel_based': 'False',
'lv_property': 'Triggers:Script:Output Terminal',
'name': 'EXPORTED_SCRIPT_TRIGGER_OUTPUT_TERMINAL',
'resettable': 'Yes',
'type': 'ViString',
'documentation': {
'description': '''
Specifies the output terminal for the exported Script trigger.
Setting this attribute to an empty string means that when you commit the session, the signal is removed from that terminal and, if possible, the terminal is tristated.
''',
},
},
1150310: {
'access': 'read-write',
'channel_based': 'False',
'lv_property': 'Events:Ready For Start:Output Terminal',
'name': 'READY_FOR_START_EVENT_OUTPUT_TERMINAL',
'resettable': 'Yes',
'type': 'ViString',
'documentation': {
'description': 'Specifies the destination terminal for the Ready for Start Event.',
},
},
1150311: {
'access': 'read-write',
'channel_based': 'False',
'enum': 'ReadyForStartEventActiveLevel',
'lv_property': 'Events:Ready For Start:Level:Active Level',
'name': 'READY_FOR_START_EVENT_LEVEL_ACTIVE_LEVEL',
'resettable': 'Yes',
'type': 'ViInt32',
'documentation': {
'description': 'Specifies the output polarity of the Ready for Start Event.',
},
},
1150312: {
'access': 'read-write',
'channel_based': 'False',
'lv_property': 'Events:Marker:Output Terminal',
'name': 'MARKER_EVENT_OUTPUT_TERMINAL',
'resettable': 'Yes',
'type': 'ViString',
'documentation': {
'description': 'Specifies the destination terminal for the Marker Event.',
},
},
1150313: {
'access': 'read-write',
'channel_based': 'False',
'enum': 'MarkerEventPulsePolarity',
'lv_property': 'Events:Marker:Pulse:Polarity',
'name': 'MARKER_EVENT_PULSE_POLARITY',
'resettable': 'Yes',
'type': 'ViInt32',
'documentation': {
'description': 'Specifies the output polarity of the Marker Event.',
},
},
1150314: {
'access': 'read-write',
'channel_based': 'False',
'lv_property': 'Events:Started:Output Terminal',
'name': 'STARTED_EVENT_OUTPUT_TERMINAL',
'resettable': 'Yes',
'type': 'ViString',
'documentation': {
'description': 'Specifies the destination terminal for the Started Event.',
},
},
1150315: {
'access': 'read-write',
'channel_based': 'False',
'lv_property': 'Events:Done:Output Terminal',
'name': 'DONE_EVENT_OUTPUT_TERMINAL',
'resettable': 'Yes',
'type': 'ViString',
'documentation': {
'description': 'Specifies the destination terminal for the Done Event.',
},
},
1150316: {
'access': 'read-write',
'channel_based': 'False',
'enum': 'StartedEventActiveLevel',
'lv_property': 'Events:Started:Level:Active Level',
'name': 'STARTED_EVENT_LEVEL_ACTIVE_LEVEL',
'resettable': 'Yes',
'type': 'ViInt32',
'documentation': {
'description': 'Specifies the output polarity of the Started Event.',
},
},
1150317: {
'access': 'read-write',
'channel_based': 'False',
'enum': 'DoneEventActiveLevel',
'lv_property': 'Events:Done:Level:Active Level',
'name': 'DONE_EVENT_LEVEL_ACTIVE_LEVEL',
'resettable': 'Yes',
'type': 'ViInt32',
'documentation': {
'description': 'Specifies the output polarity of the Done Event.',
},
},
1150318: {
'access': 'read-write',
'channel_based': 'False',
'enum': 'StartedEventPulsePolarity',
'lv_property': 'Events:Started:Pulse:Polarity',
'name': 'STARTED_EVENT_PULSE_POLARITY',
'resettable': 'Yes',
'type': 'ViInt32',
'documentation': {
'description': 'Specifies the output polarity of the Started Event.',
},
},
1150319: {
'access': 'read-write',
'channel_based': 'False',
'enum': 'DoneEventPulsePolarity',
'lv_property': 'Events:Done:Pulse:Polarity',
'name': 'DONE_EVENT_PULSE_POLARITY',
'resettable': 'Yes',
'type': 'ViInt32',
'documentation': {
'description': 'Specifies the output polarity of the Done Event.',
},
},
1150320: {
'access': 'read-write',
'channel_based': 'False',
'lv_property': 'Clocks:Sample Clock:Export Output Terminal',
'name': 'EXPORTED_SAMPLE_CLOCK_OUTPUT_TERMINAL',
'resettable': 'Yes',
'type': 'ViString',
'documentation': {
'description': 'Specifies the terminal to which to export the Sample Clock.',
},
},
1150321: {
'access': 'read-write',
'channel_based': 'False',
'lv_property': 'Clocks:Reference Clock:Export Output Terminal',
'name': 'EXPORTED_REFERENCE_CLOCK_OUTPUT_TERMINAL',
'resettable': 'Yes',
'type': 'ViString',
'documentation': {
'description': 'Specifies the terminal to which to export the Reference Clock.',
},
},
1150322: {
'access': 'read-write',
'channel_based': 'False',
'lv_property': 'Clocks:Reference Clock:Onboard Reference Clock:Export Output Terminal',
'name': 'EXPORTED_ONBOARD_REFERENCE_CLOCK_OUTPUT_TERMINAL',
'resettable': 'Yes',
'type': 'ViString',
'documentation': {
'description': 'Specifies the terminal to which to export the Onboard Reference Clock.',
},
},
1150323: {
'access': 'read-write',
'channel_based': 'False',
'lv_property': 'Output:Filters:Flatness Correction Enabled',
'name': 'FLATNESS_CORRECTION_ENABLED',
'resettable': 'Yes',
'type': 'ViBoolean',
'documentation': {
'description': '''
When VI_TRUE, the signal generator applies a flatness correction factor to the generated sine wave in order to ensure the same output power level at all frequencies.
This attribute should be set to VI_FALSE when performing Flatness Calibration.
''',
},
},
1150324: {
'access': 'read-write',
'channel_based': 'False',
'lv_property': 'Arbitrary Waveform:Data Transfer:Streaming:Streaming Waveform Handle',
'name': 'STREAMING_WAVEFORM_HANDLE',
'resettable': 'No',
'type': 'ViInt32',
'documentation': {
'description': '''
Specifies the waveform handle of the waveform used to continuously stream data during generation. This attribute defaults to -1 when no streaming waveform is specified.
Used in conjunction with NIFGEN_ATTR_STREAMING_SPACE_AVAILABLE_IN_WAVEFORM.
''',
},
},
1150325: {
'access': 'read only',
'channel_based': 'False',
'lv_property': 'Arbitrary Waveform:Data Transfer:Streaming:Space Available in Streaming Waveform',
'name': 'STREAMING_SPACE_AVAILABLE_IN_WAVEFORM',
'resettable': 'No',
'type': 'ViInt32',
'documentation': {
'description': '''
Indicates the space available (in samples) in the streaming waveform for writing new data. During generation, this available space may be in multiple locations with, for example, part of the available space at the end of the streaming waveform and the rest at the beginning. In this situation, writing a block of waveform data the size of the total space available in the streaming waveform causes NI-FGEN to return an error, as NI-FGEN will not wrap the data from the end of the waveform to the beginning and cannot write data past the end of the waveform buffer.
To avoid writing data past the end of the waveform, write new data to the waveform in a fixed size that is an integer divisor of the total size of the streaming waveform.
Used in conjunction with the NIFGEN_ATTR_STREAMING_WAVEFORM_HANDLE or NIFGEN_ATTR_STREAMING_WAVEFORM_NAME attributes.
''',
},
},
1150326: {
'access': 'read-write',
'channel_based': 'False',
'lv_property': 'Arbitrary Waveform:Data Transfer:Streaming:Streaming Waveform Name',
'name': 'STREAMING_WAVEFORM_NAME',
'resettable': 'Yes',
'type': 'ViString',
'documentation': {
'description': '''
Specifies the name of the waveform used to continuously stream data during generation. This attribute defaults to // when no streaming waveform is specified.
Use in conjunction with NIFGEN_ATTR_STREAMING_SPACE_AVAILABLE_IN_WAVEFORM.
''',
},
},
1150327: {
'access': 'read-write',
'channel_based': 'False',
'lv_property': 'Arbitrary Waveform:Arbitrary Waveform Mode:Marker Position',
'name': 'ARB_MARKER_POSITION',
'resettable': 'No',
'type': 'ViInt32',
'documentation': {
'description': '''
Specifies the position for a marker to be asserted in the arbitrary waveform. This attribute defaults to -1 when no marker position is specified. Use this attribute when NIFGEN_ATTR_OUTPUT_MODE is set to NIFGEN_VAL_OUTPUT_ARB.
Use niFgen_ExportSignal to export the marker signal.
''',
},
},
1150328: {
'access': 'read-write',
'channel_based': 'False',
'lv_property': 'Arbitrary Waveform:Arbitrary Waveform Mode:Repeat Count',
'name': 'ARB_REPEAT_COUNT',
'resettable': 'No',
'type': 'ViInt32',
'documentation': {
'description': '''
Specifies number of times to repeat the arbitrary waveform when the triggerMode parameter of nifgen_ConfigureTriggerMode is set to NIFGEN_VAL_SINGLE or NIFGEN_VAL_STEPPED. This attribute is ignored if the triggerMode parameter is set to NIFGEN_VAL_CONTINUOUS or NIFGEN_VAL_BURST. Use this attribute when NIFGEN_ATTR_OUTPUT_MODE is set to | |
client_id, flow_id, next_needed_request=1)
self.assertEqual(requests_for_processing, {})
client_id, flow_id = self._SetupClientAndFlow(next_request_to_process=3)
for request_id in [1, 3, 4, 5, 7]:
request = rdf_flow_objects.FlowRequest(
client_id=client_id,
flow_id=flow_id,
request_id=request_id,
needs_processing=True)
self.db.WriteFlowRequests([request])
# Request 4 has some responses.
responses = [
rdf_flow_objects.FlowResponse(
client_id=client_id, flow_id=flow_id, request_id=4, response_id=i)
for i in range(3)
]
self.db.WriteFlowResponses(responses)
requests_for_processing = self.db.ReadFlowRequestsReadyForProcessing(
client_id, flow_id, next_needed_request=3)
# We expect three requests here. Req #1 is old and should not be there, req
# #7 can't be processed since we are missing #6 in between. That leaves
# requests #3, #4 and #5.
self.assertLen(requests_for_processing, 3)
self.assertEqual(list(requests_for_processing), [3, 4, 5])
for request_id in requests_for_processing:
request, _ = requests_for_processing[request_id]
self.assertEqual(request_id, request.request_id)
self.assertEqual(requests_for_processing[4][1], responses)
def testFlowProcessingRequestsQueue(self):
flow_ids = []
for _ in range(5):
client_id, flow_id = self._SetupClientAndFlow()
flow_ids.append(flow_id)
request_queue = queue.Queue()
def Callback(request):
self.db.AckFlowProcessingRequests([request])
request_queue.put(request)
self.db.RegisterFlowProcessingHandler(Callback)
self.addCleanup(self.db.UnregisterFlowProcessingHandler)
requests = []
for flow_id in flow_ids:
requests.append(
rdf_flows.FlowProcessingRequest(client_id=client_id, flow_id=flow_id))
self.db.WriteFlowProcessingRequests(requests)
got = []
while len(got) < 5:
try:
l = request_queue.get(True, timeout=6)
except queue.Empty:
self.fail("Timed out waiting for messages, expected 5, got %d" %
len(got))
got.append(l)
self.assertCountEqual(requests, got)
def testFlowProcessingRequestsQueueWithDelay(self):
flow_ids = []
for _ in range(5):
client_id, flow_id = self._SetupClientAndFlow()
flow_ids.append(flow_id)
request_queue = queue.Queue()
def Callback(request):
self.db.AckFlowProcessingRequests([request])
request_queue.put(request)
self.db.RegisterFlowProcessingHandler(Callback)
self.addCleanup(self.db.UnregisterFlowProcessingHandler)
now = rdfvalue.RDFDatetime.Now()
delivery_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(
now.AsSecondsSinceEpoch() + 0.5)
requests = []
for flow_id in flow_ids:
requests.append(
rdf_flows.FlowProcessingRequest(
client_id=client_id, flow_id=flow_id,
delivery_time=delivery_time))
self.db.WriteFlowProcessingRequests(requests)
got = []
while len(got) < 5:
try:
l = request_queue.get(True, timeout=6)
except queue.Empty:
self.fail("Timed out waiting for messages, expected 5, got %d" %
len(got))
got.append(l)
self.assertGreater(rdfvalue.RDFDatetime.Now(), l.delivery_time)
self.assertCountEqual(requests, got)
leftover = self.db.ReadFlowProcessingRequests()
self.assertEqual(leftover, [])
def testAcknowledgingFlowProcessingRequestsWorks(self):
flow_ids = []
for _ in range(5):
client_id, flow_id = self._SetupClientAndFlow()
flow_ids.append(flow_id)
flow_ids.sort()
now = rdfvalue.RDFDatetime.Now()
delivery_time = now + rdfvalue.Duration.From(10, rdfvalue.MINUTES)
requests = []
for flow_id in flow_ids:
requests.append(
rdf_flows.FlowProcessingRequest(
client_id=client_id, flow_id=flow_id,
delivery_time=delivery_time))
self.db.WriteFlowProcessingRequests(requests)
# We stored 5 FlowProcessingRequests, read them back and check they are all
# there.
stored_requests = self.db.ReadFlowProcessingRequests()
stored_requests.sort(key=lambda r: r.flow_id)
self.assertLen(stored_requests, 5)
self.assertCountEqual([r.flow_id for r in stored_requests], flow_ids)
# Now we ack requests 1 and 2. There should be three remaining in the db.
self.db.AckFlowProcessingRequests(stored_requests[1:3])
stored_requests = self.db.ReadFlowProcessingRequests()
self.assertLen(stored_requests, 3)
self.assertCountEqual([r.flow_id for r in stored_requests],
[flow_ids[0], flow_ids[3], flow_ids[4]])
# Make sure DeleteAllFlowProcessingRequests removes all requests.
self.db.DeleteAllFlowProcessingRequests()
self.assertEqual(self.db.ReadFlowProcessingRequests(), [])
self.db.UnregisterFlowProcessingHandler()
def _SampleResults(self, client_id, flow_id, hunt_id=None):
sample_results = []
for i in range(10):
sample_results.append(
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_id,
tag="tag_%d" % i,
payload=rdf_client.ClientSummary(
client_id=client_id,
system_manufacturer="manufacturer_%d" % i,
install_date=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10 +
i))))
return sample_results
def _WriteFlowResults(self, sample_results=None, multiple_timestamps=False):
if multiple_timestamps:
for r in sample_results:
self.db.WriteFlowResults([r])
else:
# Use random.shuffle to make sure we don't care about the order of
# results here, as they all have the same timestamp.
random.shuffle(sample_results)
self.db.WriteFlowResults(sample_results)
return sample_results
def testWritesAndCounts40001FlowResults(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = [
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(client_id=client_id))
] * 40001
self.db.WriteFlowResults(sample_results)
result_count = self.db.CountFlowResults(client_id, flow_id)
self.assertEqual(result_count, 40001)
def testWritesAndReadsSingleFlowResultOfSingleType(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_result = rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(client_id=client_id))
with test_lib.FakeTime(42):
self.db.WriteFlowResults([sample_result])
results = self.db.ReadFlowResults(client_id, flow_id, 0, 100)
self.assertLen(results, 1)
self.assertEqual(results[0].payload, sample_result.payload)
self.assertEqual(results[0].timestamp.AsSecondsSinceEpoch(), 42)
def testWritesAndReadsMultipleFlowResultsOfSingleType(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(
self._SampleResults(client_id, flow_id))
results = self.db.ReadFlowResults(client_id, flow_id, 0, 100)
self.assertLen(results, len(sample_results))
# All results were written with the same timestamp (as they were written
# via a single WriteFlowResults call), so no assumptions about
# the order are made.
self.assertCountEqual(
[i.payload for i in results],
[i.payload for i in sample_results],
)
def testWritesAndReadsMultipleFlowResultsWithDifferentTimestamps(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(
self._SampleResults(client_id, flow_id), multiple_timestamps=True)
results = self.db.ReadFlowResults(client_id, flow_id, 0, 100)
self.assertLen(results, len(sample_results))
# Returned results have to be sorted by the timestamp in the ascending
# order.
self.assertEqual(
[i.payload for i in results],
[i.payload for i in sample_results],
)
def testWritesAndReadsMultipleFlowResultsOfMultipleTypes(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(sample_results=[
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(
client_id=client_id,
install_date=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10 +
i)))
for i in range(10)
])
sample_results.extend(
self._WriteFlowResults(sample_results=[
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientCrash(
client_id=client_id,
timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10 +
i)))
for i in range(10)
]))
sample_results.extend(
self._WriteFlowResults(sample_results=[
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientInformation(client_version=i))
for i in range(10)
]))
results = self.db.ReadFlowResults(client_id, flow_id, 0, 100)
self.assertLen(results, len(sample_results))
self.assertCountEqual(
[i.payload for i in results],
[i.payload for i in sample_results],
)
def testReadFlowResultsCorrectlyAppliesOffsetAndCountFilters(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(
self._SampleResults(client_id, flow_id), multiple_timestamps=True)
for l in range(1, 11):
for i in range(10):
results = self.db.ReadFlowResults(client_id, flow_id, i, l)
expected = sample_results[i:i + l]
result_payloads = [x.payload for x in results]
expected_payloads = [x.payload for x in expected]
self.assertEqual(
result_payloads, expected_payloads,
"Results differ from expected (from %d, size %d): %s vs %s" %
(i, l, result_payloads, expected_payloads))
def testReadFlowResultsCorrectlyAppliesWithTagFilter(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(
self._SampleResults(client_id, flow_id), multiple_timestamps=True)
results = self.db.ReadFlowResults(
client_id, flow_id, 0, 100, with_tag="blah")
self.assertFalse(results)
results = self.db.ReadFlowResults(
client_id, flow_id, 0, 100, with_tag="tag")
self.assertFalse(results)
results = self.db.ReadFlowResults(
client_id, flow_id, 0, 100, with_tag="tag_1")
self.assertEqual([i.payload for i in results], [sample_results[1].payload])
def testReadFlowResultsCorrectlyAppliesWithTypeFilter(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(sample_results=[
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(
client_id=client_id,
install_date=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10 +
i)))
for i in range(10)
])
sample_results.extend(
self._WriteFlowResults(sample_results=[
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientCrash(
client_id=client_id,
timestamp=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10 +
i)))
for i in range(10)
]))
results = self.db.ReadFlowResults(
client_id,
flow_id,
0,
100,
with_type=compatibility.GetName(rdf_client.ClientInformation))
self.assertFalse(results)
results = self.db.ReadFlowResults(
client_id,
flow_id,
0,
100,
with_type=compatibility.GetName(rdf_client.ClientSummary))
self.assertCountEqual(
[i.payload for i in results],
[i.payload for i in sample_results[:10]],
)
def testReadFlowResultsCorrectlyAppliesWithSubstringFilter(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(
self._SampleResults(client_id, flow_id), multiple_timestamps=True)
results = self.db.ReadFlowResults(
client_id, flow_id, 0, 100, with_substring="blah")
self.assertFalse(results)
results = self.db.ReadFlowResults(
client_id, flow_id, 0, 100, with_substring="manufacturer")
self.assertEqual(
[i.payload for i in results],
[i.payload for i in sample_results],
)
results = self.db.ReadFlowResults(
client_id, flow_id, 0, 100, with_substring="manufacturer_1")
self.assertEqual([i.payload for i in results], [sample_results[1].payload])
def testReadFlowResultsCorrectlyAppliesVariousCombinationsOfFilters(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(
self._SampleResults(client_id, flow_id), multiple_timestamps=True)
tags = {None: list(sample_results), "tag_1": [sample_results[1]]}
substrings = {
None: list(sample_results),
"manufacturer": list(sample_results),
"manufacturer_1": [sample_results[1]]
}
types = {
None: list(sample_results),
compatibility.GetName(rdf_client.ClientSummary): list(sample_results),
}
for tag_value, tag_expected in tags.items():
for substring_value, substring_expected in substrings.items():
for type_value, type_expected in types.items():
expected = [
r for r in tag_expected
if r in substring_expected and r in type_expected
]
results = self.db.ReadFlowResults(
client_id,
flow_id,
0,
100,
with_tag=tag_value,
with_type=type_value,
with_substring=substring_value)
self.assertCountEqual(
[i.payload for i in expected], [i.payload for i in results],
"Result items do not match for "
"(tag=%s, type=%s, substring=%s): %s vs %s" %
(tag_value, type_value, substring_value, expected, results))
def testReadFlowResultsReturnsPayloadWithMissingTypeAsSpecialValue(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(
self._SampleResults(client_id, flow_id), multiple_timestamps=True)
type_name = compatibility.GetName(rdf_client.ClientSummary)
try:
cls = rdfvalue.RDFValue.classes.pop(type_name)
results = self.db.ReadFlowResults(client_id, flow_id, 0, 100)
finally:
rdfvalue.RDFValue.classes[type_name] = cls
self.assertLen(sample_results, len(results))
for r in results:
self.assertIsInstance(r.payload,
rdf_objects.SerializedValueOfUnrecognizedType)
self.assertEqual(r.payload.type_name, type_name)
def testCountFlowResultsReturnsCorrectResultsCount(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(
self._SampleResults(client_id, flow_id), multiple_timestamps=True)
num_results = self.db.CountFlowResults(client_id, flow_id)
self.assertEqual(num_results, len(sample_results))
def testCountFlowResultsCorrectlyAppliesWithTagFilter(self):
client_id, flow_id = self._SetupClientAndFlow()
self._WriteFlowResults(
self._SampleResults(client_id, flow_id), multiple_timestamps=True)
num_results = self.db.CountFlowResults(client_id, flow_id, with_tag="blah")
self.assertEqual(num_results, 0)
num_results = self.db.CountFlowResults(client_id, flow_id, with_tag="tag_1")
self.assertEqual(num_results, 1)
def testCountFlowResultsCorrectlyAppliesWithTypeFilter(self):
client_id, flow_id = self._SetupClientAndFlow()
self._WriteFlowResults(sample_results=[
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(client_id=client_id))
for _ in range(10)
])
self._WriteFlowResults(sample_results=[
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientCrash(client_id=client_id))
for _ in range(10)
])
num_results = self.db.CountFlowResults(
client_id,
flow_id,
with_type=compatibility.GetName(rdf_client.ClientInformation))
self.assertEqual(num_results, 0)
num_results = self.db.CountFlowResults(
client_id,
flow_id,
with_type=compatibility.GetName(rdf_client.ClientSummary))
self.assertEqual(num_results, 10)
num_results = self.db.CountFlowResults(
client_id,
flow_id,
with_type=compatibility.GetName(rdf_client.ClientCrash))
self.assertEqual(num_results, 10)
def testCountFlowResultsCorrectlyAppliesWithTagAndWithTypeFilters(self):
client_id, flow_id = self._SetupClientAndFlow()
self._WriteFlowResults(
self._SampleResults(client_id, flow_id), multiple_timestamps=True)
num_results = self.db.CountFlowResults(
client_id,
flow_id,
with_tag="tag_1",
with_type=compatibility.GetName(rdf_client.ClientSummary))
self.assertEqual(num_results, 1)
def testCountFlowResultsByTypeReturnsCorrectNumbers(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_results = self._WriteFlowResults(sample_results=[
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(client_id=client_id))
] * 3)
sample_results.extend(
self._WriteFlowResults(sample_results=[
rdf_flow_objects.FlowResult(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientCrash(client_id=client_id))
] * 5))
counts_by_type = self.db.CountFlowResultsByType(client_id, flow_id)
self.assertEqual(counts_by_type, {
"ClientSummary": 3,
"ClientCrash": 5,
})
def _CreateErrors(self, client_id, flow_id, hunt_id=None):
sample_errors = []
for i in range(10):
sample_errors.append(
rdf_flow_objects.FlowError(
client_id=client_id,
flow_id=flow_id,
hunt_id=hunt_id,
tag="tag_%d" % i,
payload=rdf_client.ClientSummary(
client_id=client_id,
system_manufacturer="manufacturer_%d" % i,
install_date=rdfvalue.RDFDatetime.FromSecondsSinceEpoch(10 +
i))))
return sample_errors
def _WriteFlowErrors(self, sample_errors=None, multiple_timestamps=False):
if multiple_timestamps:
for r in sample_errors:
self.db.WriteFlowErrors([r])
else:
# Use random.shuffle to make sure we don't care about the order of
# errors here, as they all have the same timestamp.
random.shuffle(sample_errors)
self.db.WriteFlowErrors(sample_errors)
return sample_errors
def testWritesAndCounts40001FlowErrors(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_errors = [
rdf_flow_objects.FlowError(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(client_id=client_id))
] * 40001
self.db.WriteFlowErrors(sample_errors)
error_count = self.db.CountFlowErrors(client_id, flow_id)
self.assertEqual(error_count, 40001)
def testWritesAndReadsSingleFlowErrorOfSingleType(self):
client_id, flow_id = self._SetupClientAndFlow()
sample_error = rdf_flow_objects.FlowError(
client_id=client_id,
flow_id=flow_id,
payload=rdf_client.ClientSummary(client_id=client_id))
with test_lib.FakeTime(42):
self.db.WriteFlowErrors([sample_error])
errors = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.