input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<reponame>MWATelescope/eda1
#!/usr/bin/env python
"""
Runs on the Raspberry Pi connected to the Kaelus beamformer via USB, to send pointing commands, etc.
"""
import os
import logging
from logging import handlers
import random
import sys
import time
import astropy
import astropy.time
import astropy.units
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation
import serial
from serial import Serial
import threading
if sys.version_info.major == 2:
# noinspection PyUnresolvedReferences
STR_CLASS = basestring
else:
STR_CLASS = str
# set up the logging
LOGLEVEL_CONSOLE = logging.INFO # Logging level for console messages (INFO, DEBUG, ERROR, CRITICAL, etc)
LOGLEVEL_LOGFILE = logging.INFO # Logging level for logfile
LOGLEVEL_REMOTE = logging.INFO
LOGFILE = "/tmp/kaeslave.log"
class MWALogFormatter(logging.Formatter):
def format(self, record):
return "%s: time %10.6f - %s" % (record.levelname, time.time(), record.getMessage())
mwalf = MWALogFormatter()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = handlers.RotatingFileHandler(LOGFILE, maxBytes=1000000000,
backupCount=5) # 1 Gb per file, max of five old log files
fh.setLevel(LOGLEVEL_LOGFILE)
fh.setFormatter(mwalf)
ch = logging.StreamHandler()
ch.setLevel(LOGLEVEL_CONSOLE)
ch.setFormatter(mwalf)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
import Pyro4
sys.excepthook = Pyro4.util.excepthook
Pyro4.config.DETAILED_TRACEBACK = True
Pyro4.config.SERIALIZERS_ACCEPTED.add('pickle')
import pyslave
import beamformer
import pointing
TILEID = 99
CLIENTNAME = 'Kaelus'
SIMULATE = False # Set to False to control a real Kaelus beamformer
TEST = False
STRICT = False
ONLYBFs = None
# ONLYBFs = ['E']
CPOS = (0.0, 0.0, 0.0) # Offset from geometric centre, in metres, to use as delay centre for pointing calculations
SLAVEPORT = 19987
DEVICE0 = '/dev/ttyUSB0'
DEVICE1 = '/dev/ttyUSB1'
DEFATTEN = 0 # Default channel attenuation, 0-255
DIPOLEFILE = None # Filename containing dipole offsets from the centre in metres, or None to use MWA tile spacings
MWAPOS = EarthLocation.from_geodetic(lon="116:40:14.93", lat="-26:42:11.95", height=377.8)
# Timeout for PyController comms to the PointingSlave instance
PS_TIMEOUT = 60
class KaelusBeamformer(object):
"""Represents a single Kaelus beamformer. Two attributes (.X and .Y) contain
instances of the 'Beamformer' class from beamformer.py, to handle serial
communications.
"""
def __init__(self, simulate=False, dipolefile=DIPOLEFILE):
"""
Create an instance of a Kaelus beamformer.
:param simulate: If True, don't talk to the actual hardware, just simulate a physical box
:param dipolefile: File name to read dipole physical locations from.
"""
self.simulate = simulate
if not self.simulate:
try:
serial0 = Serial(DEVICE0) # serial port for X pol board
serial1 = Serial(DEVICE1) # serial port for Y pol board
except serial.serialutil.SerialException:
logger.critical("Error opening serial port, exiting")
sys.exit()
A = beamformer.Beamformer(serial0, name=DEVICE0)
B = beamformer.Beamformer(serial1, name=DEVICE1)
# Create two threads to initialise the X and Y beamformer boxes in parallel. Wait until both
# have finished before we exit. We don't know which is X and which is Y until after the initialisation,
# when we can read the serial numbers.
athread = threading.Thread(target=self._init_pol, args=(A,))
bthread = threading.Thread(target=self._init_pol, args=(B,))
athread.start()
bthread.start()
athread.join()
bthread.join()
if 'TX2150200007' in A.BFSerial:
self.X = A
self.Y = B
elif 'TX2150200008' in A.BFSerial:
self.X = B
self.Y = A
else:
logger.critical('Unable to find a valid BFSerial value, X/Y polarisations could be wrong: %s, %s' % (A.BFSerial, B.BFSerial))
self.X = A
self.Y = B
logger.info('Assigned delay board TX2150200007 as X, TX2150200008 as Y')
self.X.name = 'X'
self.Y.name = 'Y'
self.channels = [self.X, self.Y]
else:
logger.critical("Running in simulation mode, not controlling the actual Kaelus hardware!")
self.X = None
self.Y = None
self.channels = []
self.offsets = pointing.getOffsets(dipolefile=dipolefile) # Read dipole offsets in from file
def _init_pol(self, pol):
"""
Initialise one physical Kaelus component (X or Y)
:param pol: An instance of beamformer.Beamformer, either self.X or self.Y
"""
logger.info("Initialising %s" % pol.name)
with pol.lock:
pol.OpenConnection()
pol.ClearAlarms()
pol.CombinerLNA = 1
pol.ChannelEnableDiag = [
15] * 16 # 0x01 is LNA2, 0x02 is LNA1, 0x04 is output, 0x08 is input, 0x10 is antenna LNA
pol.ChannelDelay = [128] * 16
pol.ChannelAttenuators = [DEFATTEN] * 16
pol.UpdateSettingsDiagnostics()
pol.ReadAll()
pol.PrintInfo(diag=1)
pol.CloseConnection()
logger.info("%s initialisation finished." % pol.name)
def onlybfs(self, bfids=None):
"""Set which of the MWA beamformers contribute to the EDA output. Unused inputs
are disabled to avoid adding in noise.
If called with bfids=None, enables all first stage inputs to the Kaelus beamformer. If bfids
is a list or string of single hex digits, disable all Kaelus inputs except the ones specified.
The state is stored in a global variable, and returned by the get_status call in the PointingSlave
class.
Result is True if the call suceeded, False if there was a problem with the bfids parameter.
:param bfids: A list or a string of hex digits specifying imputs to use, or None to use all of them.
"""
enables = [15] * 16
global ONLYBFs
onlybfs = None
if bfids is None:
logger.info('Enabling all channels')
enables = [15] * 16
elif (type(bfids) == list) or (isinstance(bfids, STR_CLASS)):
enables = [0] * 16
onlybfs = []
for bfid in bfids:
if (isinstance(bfid, STR_CLASS)) and (len(bfid) == 1):
if bfid.upper() in pointing.HEXD:
onlybfs.append(bfid.upper())
enables[pointing.HEXD.index(bfid.upper())] = 15
else:
logger.error("Invalid BFID code: %s" % bfid)
return False
else:
logger.error("Invalid BFID: %s" % bfid)
return False
logger.info('Enabling only beamformers: %s' % bfids)
for pol in [self.X, self.Y]:
with pol.lock:
pol.ChannelEnableDiag = enables
pol.OpenConnection()
pol.UpdateSettingsDiagnostics()
pol.ReadAll()
pol.PrintInfo(diag=1)
pol.CloseConnection()
logger.info('Finished KaelusBeamformer.only1bf(bfids=%s) --> %s, %s' % (bfids, onlybfs, enables))
ONLYBFs = onlybfs
return True
def MarcinHack(self):
"""Enable a couple of specific Kaelus inputs for testing, and disable the rest.
"""
logger.info('MarcinHack: Enabling input 2 in X, and 5 in Y (both indexed from 1)')
for pol in [self.X, self.Y]:
with pol.lock:
enables = [0] * 16 # All channels disabled
if pol is self.X:
enables[4] = 1
elif pol is self.Y:
enables[1] = 1
pol.ChannelEnableDiag = enables
pol.OpenConnection()
pol.UpdateSettingsDiagnostics()
pol.ReadAll()
pol.PrintInfo(diag=1)
pol.CloseConnection()
logger.info('Finished KaelusBeamformer.MarcinHack with 2Y and 5X enabled.')
return True
def doPointing(self, starttime=0, xaz=0.0, xel=90.0, xdelays=None):
"""Given coordinates or delay settings, repoint the tile.
NOTE that yaz and yel are ignored - only xaz and xel parameters are used to point BOTH polarisations. This is to
save time, as the delay calculations can significant length of time on a Raspberry Pi.
The X and Y polarisations are pointed in independent threads in parallel, to save time.
Result is True for pointed OK, False for below 'horizon', None for simulated.
:param starttime: If supplied, wait until this unix timetamp before actually pointing the tile, then return
:param xaz: azimuth (in degrees) to point to
:param xel: elevation (in degrees) to point to
:param xdelays: raw delays - either None (to use az/el), or a dict (full EDA delays, as returned by pointing.calc_delays()
:return: True on success, False if there was a problem with the parameters
"""
if xdelays and (type(xdelays) == dict): # If delays is a dict, they are EDA delays, so use them. If a list, they are normal MWA tile delays
logger.info("Received raw delays to send to beamformers")
ydelays = xdelays
else:
xdelays, diagnostics = pointing.calc_delays(offsets=self.offsets, az=xaz, el=xel, verbose=True,
strict=STRICT, cpos=CPOS)
if diagnostics is not None:
delays, delayerrs, sqe, maxerr, offcount = diagnostics
if offcount > 0:
logger.warning('Elevation low - %d dipoles disabled because delays were too large to reach in hardware.' % offcount)
ydelays = xdelays
if (xdelays is None) or (ydelays is None):
return False
if self.simulate:
return None
else:
self.X.ChannelDelay = [xdelays['K'][bfid] + 128 for bfid in pointing.HEXD] # Add 128 to rescale from signed (-128 to +127) values
self.Y.ChannelDelay = [ydelays['K'][bfid] + 128 for bfid in pointing.HEXD]
now = time.time()
if starttime > now:
time.sleep(starttime - now)
xthread = threading.Thread(target=self._point_pol, args=(self.X,))
ythread = threading.Thread(target=self._point_pol, args=(self.Y,))
xthread.start()
ythread.start()
xthread.join()
ythread.join()
# self.LogAlarms() # TODO - Comms errors when reading alarms, since the lightning strike in Jan 2017. Re-enable after repairs?
return True
def PrintInfo(self, diag=1):
"""
Get status and version details from the Kaelus hardware, and print it to standard out.
:param diag: 1 (the default) to print diagnostic values, 0 to print calibrated values.
:return: None
"""
print("Beamformer status for X-pol:")
with self.X.lock:
self.X.OpenConnection()
self.X.PrintInfo(diag)
self.X.CloseConnection()
print("\n\nBeamformer status for Y-pol:")
with self.Y.lock:
self.Y.OpenConnection()
self.Y.PrintInfo(diag)
self.Y.CloseConnection()
print('\n')
def LogAlarms(self):
"""
Write current hardware alarm status to the log file.
:return: None
"""
with self.X.lock:
self.X.OpenConnection()
self.X.ReadAlarms()
self.X.CloseConnection()
logger.info("Alarm status for X-pol: %s" % self.X.AlarmStatus)
with self.Y.lock:
self.Y.OpenConnection()
self.Y.ReadAlarms()
self.Y.CloseConnection()
logger.info("Alarm status for Y-pol: %s" % self.Y.AlarmStatus)
def _point_pol(self, pol):
"""
Point the given polarisation (self.X or self.Y) using the previously supplied delay switch settings
:param pol: An | |
dest1d_min) / 2
mean_src_y = (src1d_max + src1d_min) / 2
# Tx = (dest1d_max + dest1d_min)/2 - (src1d_max + src1d_min)/2
Sy = (dest1d_max - dest1d_min + 1) / float(src1d_max - src1d_min + 1)
# apply forward transformation (in pixel space)
# below: only for debugging purpose
# coord_src2d_scaleX = np.copy(coord_src2d) # need to use np.copy to avoid copying pointer
# coord_src2d_scaleX[:, 0] = (coord_src2d[:, 0] - mean_src) * Sx + mean_dest
# coord_init_pix_scaleY = np.copy(coord_init_pix) # need to use np.copy to avoid copying pointer
# coord_init_pix_scaleY[:, 0] = (coord_init_pix[:, 0] - mean_src ) * Sx + mean_dest
range_x = list(range(ix * ny, ix * ny + nx))
coord_init_pix_scaleY[range_x, 1] = (coord_init_pix[range_x, 1] - mean_src_y) * Sy + mean_dest_y
coord_init_pix_scaleYinv[range_x, 1] = (coord_init_pix[range_x, 1] - mean_dest_y) / float(Sy) + mean_src_y
# apply transformation to image
col_scaleYinv = np.reshape(coord_init_pix_scaleYinv[:, 1], [nx, ny])
src2d_scaleXY = warp(src2d, np.array([row_scaleXinv, col_scaleYinv]), order=1)
# regularize Y warping fields
from skimage.filters import gaussian
col_scaleY = np.reshape(coord_init_pix_scaleY[:, 1], [nx, ny])
col_scaleYsmooth = gaussian(col_scaleY, smoothWarpXY)
col_scaleYinvsmooth = gaussian(col_scaleYinv, smoothWarpXY)
# apply smoothed transformation to image
src2d_scaleXYsmooth = warp(src2d, np.array([row_scaleXinv, col_scaleYinvsmooth]), order=1)
# reshape warping field as 1d
coord_init_pix_scaleY[:, 1] = col_scaleYsmooth.ravel()
coord_init_pix_scaleYinv[:, 1] = col_scaleYinvsmooth.ravel()
# display
if verbose == 2:
# FIG 1
plt.figure(figsize=(15, 3))
# plot #1
ax = plt.subplot(141)
plt.imshow(np.swapaxes(src2d, 1, 0), cmap=plt.cm.gray, interpolation='none')
plt.hold(True) # add other layer
plt.imshow(np.swapaxes(dest2d, 1, 0), cmap=plt.cm.copper, interpolation='none', alpha=0.5)
plt.title('src')
plt.xlabel('x')
plt.ylabel('y')
plt.xlim(mean_dest_x - 15, mean_dest_x + 15)
plt.ylim(mean_dest_y - 15, mean_dest_y + 15)
ax.grid(True, color='w')
# plot #2
ax = plt.subplot(142)
plt.imshow(np.swapaxes(src2d_scaleX, 1, 0), cmap=plt.cm.gray, interpolation='none')
plt.hold(True) # add other layer
plt.imshow(np.swapaxes(dest2d, 1, 0), cmap=plt.cm.copper, interpolation='none', alpha=0.5)
plt.title('src_scaleX')
plt.xlabel('x')
plt.ylabel('y')
plt.xlim(mean_dest_x - 15, mean_dest_x + 15)
plt.ylim(mean_dest_y - 15, mean_dest_y + 15)
ax.grid(True, color='w')
# plot #3
ax = plt.subplot(143)
plt.imshow(np.swapaxes(src2d_scaleXY, 1, 0), cmap=plt.cm.gray, interpolation='none')
plt.hold(True) # add other layer
plt.imshow(np.swapaxes(dest2d, 1, 0), cmap=plt.cm.copper, interpolation='none', alpha=0.5)
plt.title('src_scaleXY')
plt.xlabel('x')
plt.ylabel('y')
plt.xlim(mean_dest_x - 15, mean_dest_x + 15)
plt.ylim(mean_dest_y - 15, mean_dest_y + 15)
ax.grid(True, color='w')
# plot #4
ax = plt.subplot(144)
plt.imshow(np.swapaxes(src2d_scaleXYsmooth, 1, 0), cmap=plt.cm.gray, interpolation='none')
plt.hold(True) # add other layer
plt.imshow(np.swapaxes(dest2d, 1, 0), cmap=plt.cm.copper, interpolation='none', alpha=0.5)
plt.title('src_scaleXYsmooth (s=' + str(smoothWarpXY) + ')')
plt.xlabel('x')
plt.ylabel('y')
plt.xlim(mean_dest_x - 15, mean_dest_x + 15)
plt.ylim(mean_dest_y - 15, mean_dest_y + 15)
ax.grid(True, color='w')
# save figure
plt.savefig(os.path.join(path_qc, 'register2d_columnwise_image_z' + str(iz) + '.png'))
plt.close()
# ============================================================
# CALCULATE TRANSFORMATIONS
# ============================================================
# calculate forward transformation (in physical space)
coord_init_phy_scaleX = np.array(im_dest.transfo_pix2phys(coord_init_pix_scaleX))
coord_init_phy_scaleY = np.array(im_dest.transfo_pix2phys(coord_init_pix_scaleY))
# calculate inverse transformation (in physical space)
coord_init_phy_scaleXinv = np.array(im_src.transfo_pix2phys(coord_init_pix_scaleXinv))
coord_init_phy_scaleYinv = np.array(im_src.transfo_pix2phys(coord_init_pix_scaleYinv))
# compute displacement per pixel in destination space (for forward warping field)
warp_x[:, :, iz] = np.array([coord_init_phy_scaleXinv[i, 0] - coord_init_phy[i, 0] for i in range(nx * ny)]).reshape((nx, ny))
warp_y[:, :, iz] = np.array([coord_init_phy_scaleYinv[i, 1] - coord_init_phy[i, 1] for i in range(nx * ny)]).reshape((nx, ny))
# compute displacement per pixel in source space (for inverse warping field)
warp_inv_x[:, :, iz] = np.array([coord_init_phy_scaleX[i, 0] - coord_init_phy[i, 0] for i in range(nx * ny)]).reshape((nx, ny))
warp_inv_y[:, :, iz] = np.array([coord_init_phy_scaleY[i, 1] - coord_init_phy[i, 1] for i in range(nx * ny)]).reshape((nx, ny))
# Generate forward warping field (defined in destination space)
generate_warping_field(fname_dest, warp_x, warp_y, fname_warp, verbose)
# Generate inverse warping field (defined in source space)
generate_warping_field(fname_src, warp_inv_x, warp_inv_y, fname_warp_inv, verbose)
def register2d(fname_src, fname_dest, fname_mask='', fname_warp='warp_forward.nii.gz',
fname_warp_inv='warp_inverse.nii.gz',
paramreg=Paramreg(step='0', type='im', algo='Translation', metric='MI', iter='5', shrink='1', smooth='0',
gradStep='0.5'),
ants_registration_params={'rigid': '', 'affine': '', 'compositeaffine': '', 'similarity': '',
'translation': '', 'bspline': ',10', 'gaussiandisplacementfield': ',3,0',
'bsplinedisplacementfield': ',5,10', 'syn': ',3,0', 'bsplinesyn': ',1,3'},
verbose=0):
"""
Slice-by-slice registration of two images.
:param fname_src: name of moving image (type: string)
:param fname_dest: name of fixed image (type: string)
:param fname_mask: name of mask file (type: string) (parameter -x of antsRegistration)
:param fname_warp: name of output 3d forward warping field
:param fname_warp_inv: name of output 3d inverse warping field
:param paramreg: Class Paramreg()
:param ants_registration_params: dict: specific algorithm's parameters for antsRegistration
:param verbose:
:return:
if algo==translation:
x_displacement: list of translation along x axis for each slice (type: list)
y_displacement: list of translation along y axis for each slice (type: list)
if algo==rigid:
x_displacement: list of translation along x axis for each slice (type: list)
y_displacement: list of translation along y axis for each slice (type: list)
theta_rotation: list of rotation angle in radian (and in ITK's coordinate system) for each slice (type: list)
if algo==affine or algo==syn or algo==bsplinesyn:
creation of two 3D warping fields (forward and inverse) that are the concatenations of the slice-by-slice
warps.
"""
# set metricSize
# TODO: create internal function get_metricSize()
if paramreg.metric == 'MI':
metricSize = '32' # corresponds to number of bins
else:
metricSize = '4' # corresponds to radius (for CC, MeanSquares...)
# Get image dimensions and retrieve nz
logger.info(f"\nGet image dimensions of destination image...")
nx, ny, nz, nt, px, py, pz, pt = image.Image(fname_dest).dim
logger.info(f" matrix size: {str(nx)} x {str(ny)} x {str(nz)}")
logger.info(f" voxel size: {str(px)}mm x {str(py)}mm x {str(nz)}mm")
# Split input volume along z
logger.info(f"\nSplit input volume...")
im_src = image.Image(fname_src)
split_source_list = image.split_img_data(im_src, 2)
for im in split_source_list:
im.save()
# Split destination volume along z
logger.info(f"\nSplit destination volume...")
im_dest = image.Image(fname_dest)
split_dest_list = image.split_img_data(im_dest, 2)
for im in split_dest_list:
im.save()
# Split mask volume along z
if fname_mask != '':
logger.info(f"\nSplit mask volume...")
im_mask = image.Image('mask.nii.gz')
split_mask_list = image.split_img_data(im_mask, 2)
for im in split_mask_list:
im.save()
# initialization
if paramreg.algo in ['Translation']:
x_displacement = [0 for i in range(nz)]
y_displacement = [0 for i in range(nz)]
theta_rotation = [0 for i in range(nz)]
if paramreg.algo in ['Rigid', 'Affine', 'BSplineSyN', 'SyN']:
list_warp = []
list_warp_inv = []
# loop across slices
for i in range(nz):
# set masking
logger.info(f"Registering slice {str(i)}/{str(nz-1)}...")
num = numerotation(i)
prefix_warp2d = 'warp2d_' + num
# if mask is used, prepare command for ANTs
if fname_mask != '':
masking = ['-x', 'mask_Z' + num + '.nii.gz']
else:
masking = []
# main command for registration
# TODO fixup isct_ants* parsers
cmd = ['isct_antsRegistration',
'--dimensionality', '2',
'--transform', paramreg.algo + '[' + str(paramreg.gradStep) + ants_registration_params[paramreg.algo.lower()] + ']',
'--metric', paramreg.metric + '[dest_Z' + num + '.nii' + ',src_Z' + num + '.nii' + ',1,' + metricSize + ']', #[fixedImage,movingImage,metricWeight +nb_of_bins (MI) or radius (other)
'--convergence', str(paramreg.iter),
'--shrink-factors', str(paramreg.shrink),
'--smoothing-sigmas', str(paramreg.smooth) + 'mm',
'--output', '[' + prefix_warp2d + ',src_Z' + num + '_reg.nii]', #--> file.mat (contains Tx,Ty, theta)
'--interpolation', 'BSpline[3]',
'--verbose', '1',
] + masking
# add init translation
if not paramreg.init == '':
init_dict = {'geometric': '0', 'centermass': '1', 'origin': '2'}
cmd += ['-r', '[dest_Z' + num + '.nii' + ',src_Z' + num + '.nii,' + init_dict[paramreg.init] + ']']
try:
# run registration
run_proc(cmd, is_sct_binary=True)
if paramreg.algo in ['Translation']:
file_mat = prefix_warp2d + '0GenericAffine.mat'
matfile = loadmat(file_mat, struct_as_record=True)
array_transfo = matfile['AffineTransform_double_2_2']
x_displacement[i] = array_transfo[4][0] # Tx in ITK'S coordinate system
y_displacement[i] = array_transfo[5][0] # Ty in ITK'S and fslview's coordinate systems
theta_rotation[i] = asin(array_transfo[2]) # angle of rotation theta in ITK'S coordinate system (minus theta for fslview)
if paramreg.algo in ['Rigid', 'Affine', 'BSplineSyN', 'SyN']:
# List names of 2d warping fields for subsequent merge along Z
file_warp2d = prefix_warp2d + '0Warp.nii.gz'
file_warp2d_inv = prefix_warp2d + '0InverseWarp.nii.gz'
list_warp.append(file_warp2d)
list_warp_inv.append(file_warp2d_inv)
if paramreg.algo in ['Rigid', 'Affine']:
# Generating null 2d warping field (for subsequent concatenation with affine transformation)
# TODO fixup isct_ants* parsers
run_proc(['isct_antsRegistration',
'-d', '2',
'-t', 'SyN[1,1,1]',
'-c', '0',
'-m', 'MI[dest_Z' + num + '.nii,src_Z' + num + '.nii,1,32]',
'-o', 'warp2d_null',
'-f', '1',
'-s', '0',
], is_sct_binary=True)
# --> outputs: warp2d_null0Warp.nii.gz, warp2d_null0InverseWarp.nii.gz
file_mat = prefix_warp2d + '0GenericAffine.mat'
# Concatenating mat transfo and null 2d warping field to obtain 2d warping field of affine transformation
run_proc(['isct_ComposeMultiTransform', '2', file_warp2d, '-R', 'dest_Z' + num + '.nii', 'warp2d_null0Warp.nii.gz', file_mat], is_sct_binary=True)
run_proc(['isct_ComposeMultiTransform', '2', file_warp2d_inv, '-R', 'src_Z' + num + '.nii', 'warp2d_null0InverseWarp.nii.gz', '-i', file_mat], is_sct_binary=True)
# if an exception occurs with ants, take the last value for the transformation
# TODO: DO WE NEED TO DO THAT??? (julien 2016-03-01)
except Exception as e:
# TODO | |
'''
This class implements a 3D FCN for the task of generating CT from MRI
By <NAME> and <NAME>
Oct., 2016
'''
from __future__ import division
import os
import time
from glob import glob
import tensorflow as tf
import numpy as np
from six.moves import xrange
from utils import *
from loss_functions import *
from scipy.misc import imsave
import collections
import datetime
class MR2CT(object):
def __init__(self, sess, batch_size=10, height_MR=64,width_MR=64, height_CT=48,
width_CT=48, l_num=2, wd=0.0005, checkpoint_dir=None, path_patients_h5=None, learning_rate=2e-8,lr_step=30000,
lam_lp=1, lam_gdl=1, lam_adv=1, alpha=2):
"""
Args:
sess: TensorFlow session
batch_size: The size of batch. Should be specified before training.
output_size: (optional) The resolution in pixels of the images. [64]
y_dim: (optional) Dimension of dim for y. [None]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
"""
self.sess = sess
self.lam_lp=lam_lp
self.lam_gdl=lam_gdl
self.lam_adv=lam_adv
self.alpha=alpha
self.lr_step=lr_step
self.l_num=l_num
self.wd=wd
self.learning_rate=learning_rate
self.batch_size=batch_size
self.height_MR=height_MR
self.width_MR=width_MR
self.height_CT=height_CT
self.width_CT=width_CT
self.checkpoint_dir = checkpoint_dir
self.data_generator = Generator_2D_slices(path_patients_h5, self.batch_size)
self.build_model()
def build_model(self):
with tf.device('/gpu:0'):
self.inputMR=tf.placeholder(tf.float32, shape=[None, self.height_MR, self.width_MR, 5])#5 chans input
self.CT_GT=tf.placeholder(tf.float32, shape=[None, self.height_CT, self.width_CT, 1])
batch_size_tf = tf.shape(self.inputMR)[0] #variable batchsize so we can test here
self.train_phase = tf.placeholder(tf.bool, name='phase_train')
self.G, self.layer = self.generator(self.inputMR,batch_size_tf)
print 'G shape ',self.G.get_shape
self.D, self.D_logits = self.discriminator(self.CT_GT)#real CT data
self.D_, self.D_logits_ = self.discriminator(self.G, reuse=True)#fake generated CT data
self.d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits, labels=tf.ones_like(self.D)))
self.d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_, labels=tf.zeros_like(self.D_)))
self.d_loss=self.d_loss_real+self.d_loss_fake
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.g_loss, self.lpterm, self.gdlterm, self.bceterm=self.combined_loss_G(batch_size_tf)
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
with tf.variable_scope(tf.get_variable_scope(),reuse=False):
self.d_optim = tf.train.AdamOptimizer(self.learning_rate, beta1=0.5) \
.minimize(self.d_loss, var_list=self.d_vars)
self.g_optim = tf.train.AdamOptimizer(self.learning_rate, beta1=0.5) \
.minimize(self.g_loss, var_list=self.g_vars, global_step=self.global_step)
print 'shape output G ',self.G.get_shape()
#print 'shape output D ',self.D.get_shape()
print 'learning rate ',self.learning_rate
#self.learning_rate_tensor = tf.train.exponential_decay(self.learning_rate, self.global_step, #self.lr_step, 0.1, staircase=True)
#self.g_optim = tf.train.GradientDescentOptimizer(self.learning_rate_tensor).minimize(self.g_loss, global_step=self.global_step)
#self.g_optim = tf.train.MomentumOptimizer(self.learning_rate_tensor, 0.9).minimize(self.g_loss, global_step=self.global_step)
self.merged = tf.summary.merge_all()
self.writer = tf.summary.FileWriter("./summaries", self.sess.graph)
self.saver = tf.train.Saver()
def generator(self,inputMR,batch_size_tf, reuse = False):
with tf.variable_scope('generator') as scope:
if (reuse):
tf.get_variable_scope().reuse_variables()
######## FCN for the 32x32x32 to 24x24x24 ###################################
print 'input shape, ',inputMR.get_shape()
conv1_a = conv_op_bn(inputMR, name="g_conv1_a", kh=7, kw=7, n_out=128, dh=1, dw=1, wd=self.wd, padding='VALID',train_phase=self.train_phase)#30
conv2_a = conv_op_bn(conv1_a, name="g_conv2_a", kh=5, kw=5, n_out=128, dh=1, dw=1, wd=self.wd, padding='VALID',train_phase=self.train_phase)
conv3_a = conv_op_bn(conv2_a, name="g_conv3_a", kh=3, kw=3, n_out=256, dh=1, dw=1, wd=self.wd, padding='VALID',train_phase=self.train_phase)#28
conv4_a = conv_op_bn(conv3_a, name="g_conv4_a", kh=3, kw=3, n_out=256, dh=1, dw=1, wd=self.wd, padding='VALID',train_phase=self.train_phase)#28
conv5_a = conv_op_bn(conv4_a, name="g_conv5_a", kh=3, kw=3, n_out=128, dh=1, dw=1, wd=self.wd, padding='VALID',train_phase=self.train_phase)
conv6_a = conv_op_bn(conv5_a, name="g_conv6_a", kh=3, kw=3, n_out=128, dh=1, dw=1, wd=self.wd, padding='SAME',train_phase=self.train_phase)#26
conv7_a = conv_op_bn(conv6_a, name="g_conv7_a", kh=3, kw=3, n_out=128, dh=1, dw=1, wd=self.wd, padding='SAME',train_phase=self.train_phase)
conv8_a = conv_op_bn(conv7_a, name="g_conv8_a", kh=3, kw=3, n_out=64, dh=1, dw=1, wd=self.wd, padding='SAME',train_phase=self.train_phase)
#conv7_a = conv_op_3d_bn(conv6_a, name="conv7_a", kh=3, kw=3, n_out=1, dh=1, dw=1, wd=self.wd, padding='SAME',train_phase=self.train_phase)#24
conv9_a = conv_op(conv8_a, name="g_conv9_a", kh=3, kw=3, n_out=1, dh=1, dw=1, wd=self.wd, padding='SAME',activation=False)#24 I modified it here,dong
print 'conv9a shape, ',conv9_a.get_shape()
#self.MR_16_downsampled=conv7_a#JUST FOR TEST
return conv9_a,conv9_a
def discriminator(self, inputCT, reuse=False):
with tf.variable_scope('discriminator') as scope:
if reuse:
tf.get_variable_scope().reuse_variables()
print 'ct shape ',inputCT.get_shape()
h0=conv_op_bn(inputCT, name="d_conv_dis_1_a", kh=5, kw=5, n_out=32, dh=1, dw=1, wd=self.wd, padding='VALID',train_phase=self.train_phase)
print 'h0 shape ',h0.get_shape()
m0=mpool_op(h0, 'pool0', kh=2, kw=2, dh=2, dw=2)
print 'm0 shape ',m0.get_shape()
h1 = conv_op_bn(m0, name="d_conv2_dis_a", kh=5, kw=5, n_out=64, dh=1, dw=1, wd=self.wd, padding='VALID',train_phase=self.train_phase)
print 'h1 shape ',h1.get_shape()
m1=mpool_op(h1, 'pool1', kh=2, kw=2, dh=2, dw=2)
print 'mi shape ',m1.get_shape()
h2 = conv_op_bn(m1, name="d_conv3_dis_a", kh=5, kw=5, n_out=128, dh=1, dw=1, wd=self.wd, padding='VALID',train_phase=self.train_phase)#28
h3 = conv_op_bn(h2, name="d_conv4_dis_a", kh=5, kw=5, n_out=256, dh=1, dw=1, wd=self.wd, padding='VALID',train_phase=self.train_phase)
fc1=fullyconnected_op(h3, name="d_fc1", n_out=512, wd=self.wd, activation=True)
fc2=fullyconnected_op(fc1, name="d_fc2", n_out=128, wd=self.wd, activation=True)
fc3=fullyconnected_op(fc2, name="d_fc3", n_out=1, wd=self.wd, activation=False)
return tf.nn.sigmoid(fc3), fc3
def train(self, config):
path_test='/home/dongnie/warehouse/prostate/ganData64to24Test'
print 'global_step ', self.global_step.name
print 'lr_step ',self.lr_step
print 'trainable vars '
for v in tf.trainable_variables():
print v.name
if self.load(self.checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
self.sess.run(tf.global_variables_initializer())
self.sess.graph.finalize()
start = self.global_step.eval() # get last global_step
print("Start from:", start)
for it in range(start,config.iterations):
X,y=self.data_generator.next()
# Update D network
_, loss_eval_D, = self.sess.run([self.d_optim, self.d_loss],
feed_dict={ self.inputMR: X, self.CT_GT:y, self.train_phase: True })
#### maybe we need to get a different batch???########
# Update G network
_, loss_eval_G, lp_eval,gdl_eval,bce_eval, layer_out_eval = self.sess.run([self.g_optim,
self.g_loss, self.lpterm, self.gdlterm, self.bceterm, self.layer],
feed_dict={ self.inputMR: X, self.CT_GT:y, self.train_phase: True })
if it%config.show_every==0:#show loss every show_every its
#curr_lr=self.sess.run(self.learning_rate_tensor)
#print 'lr= ',curr_lr
print 'time ',datetime.datetime.now(),' it ',it,'loss D bce ',loss_eval_D
print 'loss total G ',loss_eval_G
print 'loss lp G ',lp_eval
print 'loss gdl G',gdl_eval
print 'loss bce G ',bce_eval
print 'layer min ', np.min(layer_out_eval)
print 'layer max ', np.max(layer_out_eval)
print 'layer mean ', np.mean(layer_out_eval)
# print 'trainable vars '
# for v in tf.trainable_variables():
# print v.name
# data_var=self.sess.run(v)
# grads = tf.gradients(self.g_loss, v)
# var_grad_val = self.sess.run(grads, feed_dict={self.inputMR: X, self.CT_GT:y })
# print 'grad min ', np.min(var_grad_val)
# print 'grad max ', np.max(var_grad_val)
# print 'grad mean ', np.mean(var_grad_val)
# #print 'shape ',data_var.shape
# print 'filter min ', np.min(data_var)
# print 'filter max ', np.max(data_var)
# print 'filter mean ', np.mean(data_var)
#self.writer.add_summary(summary, it)
# print 'trainable vars '
if it%config.test_every==0 and it!=0:#==0:#test one subject
mr_test_itk=sitk.ReadImage(os.path.join(path_test,'prostate_1to1_MRI.nii'))
ct_test_itk=sitk.ReadImage(os.path.join(path_test,'prostate_1to1_CT.nii'))
mrnp=sitk.GetArrayFromImage(mr_test_itk)
#mu=np.mean(mrnp)
#mrnp=(mrnp-mu)/(np.max(mrnp)-np.min(mrnp))
ctnp=sitk.GetArrayFromImage(ct_test_itk)
print mrnp.dtype
print ctnp.dtype
ct_estimated=self.test_1_subject(mrnp,ctnp,[64,64,5],[48,48,1],[2,5,5])
psnrval=psnr(ct_estimated,ctnp)
print ct_estimated.dtype
print ctnp.dtype
print 'psnr= ',psnrval
volout=sitk.GetImageFromArray(ct_estimated)
sitk.WriteImage(volout,'ct_estimated_{}'.format(it)+'.nii.gz')
if it%config.save_every==0:#save weights every save_every iterations
self.save(self.checkpoint_dir, it)
def evaluate(self,patch_MR):
""" patch_MR is a np array of shape [H,W,nchans]
"""
patch_MR=np.expand_dims(patch_MR,axis=0)#[1,H,W,nchans]
#patch_MR=np.expand_dims(patch_MR,axis=4)#[1,H,W,nchans]
#patch_MR=patch_MR.astype(np.float32)
patch_CT_pred, MR16_eval= self.sess.run([self.G,self.layer],
feed_dict={ self.inputMR: patch_MR, self.train_phase: False})
patch_CT_pred=np.squeeze(patch_CT_pred)#[Z,H,W]
#imsave('mr32.png',np.squeeze(MR16_eval[0,:,:,2]))
#imsave('ctpred.png',np.squeeze(patch_CT_pred[0,:,:,0]))
#print 'mean of layer ',np.mean(MR16_eval)
#print 'min ct estimated ',np.min(patch_CT_pred)
#print 'max ct estimated ',np.max(patch_CT_pred)
#print 'mean of ctpatch estimated ',np.mean(patch_CT_pred)
return patch_CT_pred
def test_1_subject(self,MR_image,CT_GT,MR_patch_sz,CT_patch_sz,step):
"""
receives an MR image and returns an estimated CT image of the same size
"""
matFA=MR_image
matSeg=CT_GT
dFA=MR_patch_sz
dSeg=CT_patch_sz
eps=1e-5
[row,col,leng]=matFA.shape
margin1=int((dFA[0]-dSeg[0])/2)
margin2=int((dFA[1]-dSeg[1])/2)
margin3=int((dFA[2]-dSeg[2])/2)
cubicCnt=0
marginD=[margin1,margin2,margin3]
print 'matFA shape is ',matFA.shape
matFAOut=np.zeros([row+2*marginD[0],col+2*marginD[1],leng+2*marginD[2]])
print 'matFAOut shape is ',matFAOut.shape
matFAOut[marginD[0]:row+marginD[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA
matFAOut[0:marginD[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA[0:marginD[0],:,:] #we'd better flip it along the first dimension
matFAOut[row+marginD[0]:matFAOut.shape[0],marginD[1]:col+marginD[1],marginD[2]:leng+marginD[2]]=matFA[row-marginD[0]:matFA.shape[0],:,:] #we'd better flip it along the 1st dimension
matFAOut[marginD[0]:row+marginD[0],0:marginD[1],marginD[2]:leng+marginD[2]]=matFA[:,0:marginD[1],:] #we'd better flip it along the 2nd dimension
matFAOut[marginD[0]:row+marginD[0],col+marginD[1]:matFAOut.shape[1],marginD[2]:leng+marginD[2]]=matFA[:,col-marginD[1]:matFA.shape[1],:] #we'd better to flip it along the 2nd dimension
matFAOut[marginD[0]:row+marginD[0],marginD[1]:col+marginD[1],0:marginD[2]]=matFA[:,:,0:marginD[2]] #we'd better flip it along the 3rd dimension
matFAOut[marginD[0]:row+marginD[0],marginD[1]:col+marginD[1],marginD[2]+leng:matFAOut.shape[2]]=matFA[:,:,leng-marginD[2]:matFA.shape[2]]
matOut=np.zeros((matSeg.shape[0],matSeg.shape[1],matSeg.shape[2]))
used=np.zeros((matSeg.shape[0],matSeg.shape[1],matSeg.shape[2]))+eps
#fid=open('trainxxx_list.txt','a');
print 'last i ',row-dSeg[0]
for i in range(0,row-dSeg[0]+1,step[0]):
print 'i ',i
for j in range(0,col-dSeg[1]+1,step[1]):
for k in range(0,leng-dSeg[2]+1,step[2]):
volSeg=matSeg[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]
#print 'volSeg shape is ',volSeg.shape
volFA=matFAOut[i:i+dSeg[0]+2*marginD[0],j:j+dSeg[1]+2*marginD[1],k:k+dSeg[2]+2*marginD[2]]
#print 'volFA shape is ',volFA.shape
#mynet.blobs['dataMR'].data[0,0,...]=volFA
#mynet.forward()
#temppremat = mynet.blobs['softmax'].data[0].argmax(axis=0) #Note you have add softmax layer in deploy prototxt
temppremat=self.evaluate(volFA)
if len(temppremat.shape)==2:
temppremat=np.expand_dims(temppremat,axis=2)
#print 'patchout shape ',temppremat.shape
#temppremat=volSeg
matOut[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]=matOut[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]+temppremat;
used[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]=used[i:i+dSeg[0],j:j+dSeg[1],k:k+dSeg[2]]+1;
matOut=matOut/used
return matOut
def save(self, checkpoint_dir, step):
model_name = "MR2CT.model"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load(self, checkpoint_dir):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
return True
else:
return False
def combined_loss_G(self,batch_size_tf):
"""
Calculates the sum of the combined adversarial, lp and GDL losses in the given proportion. Used
for training the generative model.
@param gen_frames: A list of tensors of the generated frames at each scale.
@param gt_frames: A list of tensors of the ground truth frames at each scale.
@param d_preds: A list of tensors of the classifications made by the discriminator model at each
scale.
@param lam_adv: The percentage of the adversarial loss to use in the combined loss.
@param lam_lp: The percentage of the lp loss to use in the combined loss.
@param lam_gdl: The percentage of the GDL loss to use in the combined loss.
@param l_num: 1 or 2 for l1 and l2 loss, respectively).
@param alpha: The power to which each gradient term is raised in GDL loss.
@return: The combined adversarial, lp and GDL losses.
"""
lpterm=lp_loss(self.G, self.CT_GT, self.l_num, batch_size_tf)
gdlterm=gdl_loss(self.G, self.CT_GT, self.alpha,batch_size_tf)
bceterm=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,labels=tf.ones_like(self.D_)))
loss_=self.lam_lp*lpterm + self.lam_gdl*gdlterm + self.lam_adv*bceterm
tf.add_to_collection('losses', loss_)
loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
return | |
= []
for i, (model_group, models_g) in enumerate(model_struct.items()):
logging.info(
f"-- Group {i} ({model_group}) out of {len(model_struct)}")
# Take metadata information about the group from the group name
groups.append(model_group)
metadata_i = model_group.split('_')
n_topics.append(metadata_i[2])
alpha.append(metadata_i[4])
interval.append(metadata_i[6])
# Number of runs
n_run.append(len(models_g))
# Loop over the graphs in model_group
for j, model in enumerate(models_g):
self.SG.activate_snode(model)
n_nodes = self.SG.snodes[model].n_nodes
Wj = self.SG.snodes[model].get_matrix()
n_e = self.SG.snodes[model].n_edges
n_n = self.SG.snodes[model].n_nodes
print(f"-- Number of nodes: {n_n}")
print(f"-- Number of edges: {n_e}")
print(f"-- Number of edges (II): {len(Wj.data)}")
print(f"-- Min-weight: {min(Wj.data)}")
print(f"-- Max-weight: {max(Wj.data)}")
if j == 0:
epn_i = [n_e / n_n]
th_50 = np.percentile(Wj.data, 50)
th_80 = np.percentile(Wj.data, 80)
th_90 = np.percentile(Wj.data, 90)
Wi = copy.copy(Wj)
else:
epn_i.append(n_e / n_n)
th_50 = max(th_50, np.percentile(Wj.data, 50))
th_80 = max(th_80, np.percentile(Wj.data, 80))
th_90 = max(th_90, np.percentile(Wj.data, 90))
# Compute the component-wise minimum of all Wj
Wi = Wi.minimum(Wj)
self.SG.deactivate_snode(model)
# Get parameters aggregating data from all graphs in the group
epn.append(np.mean(epn_i))
data = Wi.data
w00.append(np.sum(data))
c00.append(np.count_nonzero(data))
data = data * (data > th_50)
# data = data * (data > 0.5)
w50.append(np.sum(data))
c50.append(np.count_nonzero(data))
data = data * (data > th_80)
# data = data * (data > 0.8)
w80.append(np.sum(data))
c80.append(np.count_nonzero(data))
data = data * (data > th_90)
# data = data * (data > 0.9)
w90.append(np.sum(data))
c90.append(np.count_nonzero(data))
# ############
# Save results
# Sort result variables by number of topics
# We need to save the original ordering of the number of topics to
# sort the cd metrics afterwards.
(n_topics, models, epn, alpha, interval, n_run,
w00, c00, w50, c50, w80, c80, w90, c90) = tuple(zip(*sorted(
zip(n_topics, models, epn, alpha, interval, n_run,
w00, c00, w50, c50, w80, c80, w90, c90))))
# Create summary table
df = pd.DataFrame({'Model': groups,
'No. of topics': n_topics,
'Average edges per node': epn,
'alpha': alpha,
'interval': interval,
'n_run': n_run,
'w00': w00, 'c00': c00,
'w50': w50, 'c50': c50,
'w80': w80, 'c80': c80,
'w90': w90, 'c90': c90})
print("Summary of results:")
print(df)
# Save summary table
preffix = f'{corpus}_{n_nodes}_{self.epn}'
fname = f'{preffix}.xls'
if not os.path.exists(self.path2var):
os.makedirs(self.path2var)
out_path = os.path.join(self.path2var, fname)
df.to_excel(out_path)
return
def analyze_variability(self):
"""
Analyzes the influence of the topic model on te quality of the
similarity graphs
The similarity graph is validated using a citations graph.
Parameters
----------
corpus : str {'S2', 'Crunch'}
Corpus (Pu: Semantic Scholar, or Co: Crunchbase data)
"""
corpus_data = self.model
ref_col = corpus_data['ref_col']
path2models = corpus_data['path2models']
# Parameters
print(f"Number of edges per node: {self.epn}")
# Validate modesl, one by one..
self._analyze_variability(
path2models, self.corpus_name, ref_col=ref_col)
return
def show_validation_results(self):
"""
Shows the results of the topic model validation in
self.validate_topic_models()
Parameters
----------
path: str
Path to data
"""
# ###############
# Read data files
# ###############
# Read the file names in the folder containing the xls reports
data_dir = self.path2rgs
data_files = sorted(os.listdir(data_dir))
# Read all result files
# sim, rescale, n_nodes, n_edges, ig, tm_class = [], [], [], [], [], []
n_nodes, epn = [], []
alpha, n_run, interval = [], [], []
params, df_dict = {}, {}
# fname_struct = ['corpus', 'sim', 'rescale', 'n_nodes', 'n_edges',
# 'ig', 'tm_class']
no_files = True
for f in data_files:
if f.endswith('.xls'):
fname = os.path.splitext(f)[0]
fname_parts = fname.split('_')
# Read parameters from the file name
n_nodes_f = fname_parts[1]
epn_f = fname_parts[2]
n_nodes.append(n_nodes_f)
epn.append(epn_f)
# Read report from file
fpath = os.path.join(data_dir, f)
df_dict[fname] = pd.read_excel(fpath)
params[fname] = {'n': n_nodes_f, 'e': epn_f}
alpha += df_dict[fname]['alpha'].tolist()
n_run += df_dict[fname]['n_run'].tolist()
interval += df_dict[fname]['interval'].tolist()
no_files = False
if no_files:
logging.warning('-- -- No result files available')
# ############
# Plot results
# ############
# Ordered list of parameters.
n_nodes = sorted(list(set(n_nodes)))
epn = sorted(list(set(epn)))
alpha = sorted(list(set(alpha)))
n_run = sorted(list(set(n_run)))
interval = sorted(list(set(interval)))
# Get the list of all possiblo dataframe columns to visualize
cols = []
for name, df in df_dict.items():
cols += df.columns.tolist()
# Remove columns that will not become y-coordinates
cols = list(set(cols) - {'Unnamed: 0', 'Model', 'No. of topics',
'Number of nodes'})
# Dictionary of abreviations (for the file names)
cols2plot = list(set(cols) - {'alpha', 'interval', 'n_run'})
abbreviation = {x: x for x in cols2plot}
abbreviation.update({'Radius': 'Rad',
'Time': 't',
'Number of edges': 'ne',
'Connected components': 'cc',
'Largest component': 'lc',
'Relative max CC': 'rc',
'Ref. graph similarity': 'cs'})
# The following nested loop is aimed to make multiple plots from xls
# files inside the same directory. It is actually not needed if all
# xls files in the given folder have the same parameter values.
for n in n_nodes:
for e in epn:
fnames = [x for x in df_dict if
params[x]['n'] == n and params[x]['e'] == e]
if len(fnames) == 0:
continue
for var, ab in abbreviation.items():
# #################################################
# Plot figure for the current value of e, n and var
fig, ax = plt.subplots()
print(fnames)
for fname in fnames:
df_f = df_dict[fname]
for a in alpha:
df_a = df_f[df_f['alpha'] == a]
for i in interval:
df = df_a[df_a['interval'] == i]
x = df['No. of topics']
y = df[var]
base_line, = ax.plot(x, y, '.')
df_av = df.groupby('No. of topics').mean()
x = df_av.index
y = df_av[var]
ax.plot(x, y, '.-', label=f'a={a}, i={i}',
color=base_line.get_color())
ax.set_xlabel('No. of topics')
ax.set_ylabel(var)
ax.set_title(f'Nodes: {n}, Edges per node: {e}, ')
ax.legend()
ax.grid()
plt.show(block=False)
# Save figure
out_dir = os.path.join(self.path2rgs, 'figs')
tag = '_'.join(fname_parts[0:5])
fname = f'{tag}_{ab}.png'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out_path = os.path.join(out_dir, fname)
plt.savefig(out_path)
return
def show_variability_results(self):
"""
Shows the results of the topic model validation in
self.validate_topic_models()
Parameters
----------
path: str
Path to data
"""
# ###############
# Read data files
# ###############
# Read the file names in the folder containing the xls reports
data_dir = self.path2var
data_files = sorted(os.listdir(data_dir))
# Read all result files
n_nodes, epn = [], []
alpha, interval = [], []
params, df_dict = {}, {}
# fname_struct = ['corpus', 'n_nodes', 'epn']
for f in data_files:
if f.endswith('.xls'):
fname = os.path.splitext(f)[0]
fname_parts = fname.split('_')
# Read parameters from the file name
n_nodes_f = fname_parts[1]
epn_f = fname_parts[2]
n_nodes.append(n_nodes_f)
epn.append(epn_f)
# Read report from file
fpath = os.path.join(data_dir, f)
df_dict[fname] = pd.read_excel(fpath)
params[fname] = {'n': n_nodes_f, 'e': epn_f}
alpha += df_dict[fname]['alpha'].tolist()
interval += df_dict[fname]['interval'].tolist()
# ############
# Plot results
# ############
# Ordered list of parameters.
n_nodes = sorted(list(set(n_nodes)))
epn = sorted(list(set(epn)))
alpha = sorted(list(set(alpha)))
interval = sorted(list(set(interval)))
# Get the list of all possiblo dataframe columns to visualize
cols = []
for name, df in df_dict.items():
cols += df.columns.tolist()
# Remove columns that will not become y-coordinates
cols = list(set(cols) - {'Unnamed: 0', 'Model', 'No. of topics',
'Number of nodes'})
# Dictionary of abreviations (for the file names)
cols2plot = list(set(cols) - {'alpha', 'interval', 'n_run'})
abbreviation = {x: x for x in cols2plot}
# abbreviation.update({'Radius': 'Rad',
# 'Time': 't',
# 'Number of edges': 'ne',
# 'Connected components': 'cc',
# 'Largest component': 'lc',
# 'Relative max CC': 'rc',
# 'Ref. graph similarity': 'cs'})
# Index(['Unnamed: 0', 'Model', 'No. of topics',
# 'Average edges per node', 'alpha', 'interval', 'n_run',
# 'w00', 'c00', 'w50', 'c50', 'w80', 'c80', 'w90', 'c90'],
# The following nested loop is aimed to make multiple plots from xls
# files inside the same directory. It is actually not needed if all
# xls files in the given folder have the same parameter values.
for n in n_nodes:
for e in epn:
fnames = [x for x in df_dict if
params[x]['n'] == n and params[x]['e'] == e]
if len(fnames) == 0:
continue
for var, ab in abbreviation.items():
# #################################################
# Plot figure for the current value of e, n and var
fig, ax = plt.subplots()
print(fnames)
for fname in fnames:
df_f = df_dict[fname]
for a in alpha:
df_a = df_f[df_f['alpha'] == a]
for i in interval:
df = df_a[df_a['interval'] == i]
x = df['No. of topics']
y = df[var]
base_line, = ax.plot(x, y, '.')
df_av = df.groupby('No. of topics').mean()
x = df_av.index
y = df_av[var]
ax.plot(x, y, '.-', label=f'a={a}, i={i}',
color=base_line.get_color())
ax.set_xlabel('No. of topics')
ax.set_ylabel(var)
| |
from __future__ import division
import tensorflow as tf
import numpy as np
import os
import matplotlib.pyplot as mp
from .ops import *
from .util import *
from progressbar import ETA, Bar, Percentage, ProgressBar
class STGConvnet(object):
def __init__(self, sess, config):
self.sess = sess
self.net_type = config.net_type
self.state_cold_start = config.state_cold_start
self.batch_size = config.batch_size
self.dense_layer = config.dense_layer
self.num_frames = config.num_frames
self.num_chain = config.num_chain
self.num_epochs = config.num_epochs
self.lr = config.lr
self.beta1 = config.beta1
self.step_size = config.step_size
self.sample_steps = config.sample_steps
self.action_step_size = config.step_size
self.action_sample_steps = config.action_sample_steps * self.sample_steps
self.action_size = 3
self.action_cold_start = config.action_cold_start
self.category = config.category
self.data_path = os.path.join(config.data_path) # , config.category)
self.log_step = config.log_step
self.output_dir = os.path.join(config.output_dir, config.category)
self.log_dir = os.path.join(self.output_dir, 'log')
self.train_dir = os.path.join(self.output_dir, 'observed_sequence')
self.sample_dir = os.path.join(self.output_dir, 'synthesis_sequence')
self.model_dir = os.path.join(self.output_dir, 'model')
self.result_dir = os.path.join(self.output_dir, 'final_result')
if tf.gfile.Exists(self.log_dir):
tf.gfile.DeleteRecursively(self.log_dir)
tf.gfile.MakeDirs(self.log_dir)
def descriptor(self, inputs, reuse=False, input_action=None, dense_layer=True):
with tf.variable_scope('des', reuse=reuse):
if self.net_type == 'STG_5_xzz':
# STG_action V0.4 20180217
conv1 = conv3d_leaky_relu(inputs, 50, (3, 5, 5), strides=(1, 2, 3), padding="VALID", name="conv1")
conv2 = conv3d_leaky_relu(conv1, 50, (3, 5, 5), strides=(1, 2, 2), padding=(0, 0, 0), name="conv2")
conv3 = conv3d_leaky_relu(conv2, 27, (1, 11, 14), strides=(1, 2, 2), padding=(0, 0, 0), name="conv3")
if input_action is not None:
conv3 = tf.concat([input_action, tf.layers.flatten(conv3)], 1)
dense1 = tf.layers.dense(conv3, 128, activation=tf.nn.tanh, name="dense1/w")
dense2 = tf.layers.dense(dense1, 128, activation=tf.nn.tanh, name="dense2/w")
dense = tf.layers.dense(dense2, 1, activation=tf.nn.tanh, name="dense/w")
return dense
if self.net_type == 'STG_5_V2.0':
"""
STG_action V2.0 20180227 V1.3-2 + more fc before concat
"""
conv1 = conv3d_leaky_relu(inputs, 120, (3, 5, 5), strides=(1, 2, 3), padding="VALID", name="conv1")
conv2 = conv3d_leaky_relu(conv1, 30, (3, 5, 5), strides=(1, 2, 2), padding=(0, 0, 0), name="conv2")
conv3 = conv3d_leaky_relu(conv2, 25, (1, 11, 14), strides=(1, 2, 2), padding=(0, 0, 0), name="conv3")
fc1 = tf.layers.dense(input_action, 50, activation=tf.nn.leaky_relu, name="fc1/w")
fc2 = tf.layers.dense(fc1, 25, activation=tf.nn.leaky_relu, name="fc2/w")
concat_layer = tf.concat([fc2, tf.layers.flatten(conv3)], 1)
fc3 = tf.layers.dense(concat_layer, 50, activation=tf.nn.leaky_relu, name="fc3/w")
fc4 = tf.layers.dense(fc3, 1, activation=tf.nn.leaky_relu, name="fc4/w")
return fc4
if self.net_type == 'STG_5_V1.3-2':
"""
STG_action V1.3 20180220 V1.2 + concat less.
"""
conv1 = conv3d_leaky_relu(inputs, 120, (3, 5, 5), strides=(1, 2, 3), padding="VALID", name="conv1")
conv2 = conv3d_leaky_relu(conv1, 30, (3, 5, 5), strides=(1, 2, 2), padding=(0, 0, 0), name="conv2")
conv3 = conv3d_leaky_relu(conv2, 6, (1, 11, 14), strides=(1, 2, 2), padding=(0, 0, 0), name="conv3")
if input_action is not None:
conv3 = tf.concat([input_action, tf.layers.flatten(conv3)], 1)
dense1 = tf.layers.dense(conv3, 50, activation=tf.nn.leaky_relu, name="dense1/w")
dense2 = tf.layers.dense(dense1, 1, activation=tf.nn.leaky_relu, name="dense2/w")
return dense2
if self.net_type == 'STG_5_V1.3':
"""
STG_action V1.3 20180220 V1.2 + concat less.
"""
conv1 = conv3d_leaky_relu(inputs, 120, (3, 5, 5), strides=(1, 2, 3), padding="VALID", name="conv1")
conv2 = conv3d_leaky_relu(conv1, 30, (3, 5, 5), strides=(1, 2, 2), padding=(0, 0, 0), name="conv2")
conv3 = conv3d_leaky_relu(conv2, 3, (1, 11, 14), strides=(1, 2, 2), padding=(0, 0, 0), name="conv3")
if input_action is not None:
conv3 = tf.concat([input_action, tf.layers.flatten(conv3)], 1)
dense1 = tf.layers.dense(conv3, 50, activation=tf.nn.leaky_relu, name="dense1/w")
dense2 = tf.layers.dense(dense1, 1, activation=tf.nn.leaky_relu, name="dense2/w")
return dense2
if self.net_type == 'STG_5_V1.2':
"""
STG_action V1.2 20180220 V1.1 + more fc layer
"""
conv1 = conv3d_leaky_relu(inputs, 50, (3, 5, 5), strides=(1, 2, 3), padding="VALID", name="conv1")
conv2 = conv3d_leaky_relu(conv1, 50, (3, 5, 5), strides=(1, 2, 2), padding=(0, 0, 0), name="conv2")
conv3 = conv3d_leaky_relu(conv2, 27, (1, 11, 14), strides=(1, 2, 2), padding=(0, 0, 0), name="conv3")
if input_action is not None:
conv3 = tf.concat([input_action, tf.layers.flatten(conv3)], 1)
dense1 = tf.layers.dense(conv3, 50, activation=tf.nn.leaky_relu, name="dense1/w")
dense2 = tf.layers.dense(dense1, 1, activation=tf.nn.leaky_relu, name="dense2/w")
return dense2
if self.net_type == 'STG_5_V1.1':
"""
STG_action V1.1 20180220 V1.0 + leaky relu
"""
conv1 = conv3d_leaky_relu(inputs, 50, (3, 5, 5), strides=(1, 2, 3), padding="VALID", name="conv1")
conv2 = conv3d_leaky_relu(conv1, 50, (3, 5, 5), strides=(1, 2, 2), padding=(0, 0, 0), name="conv2")
conv3 = conv3d_leaky_relu(conv2, 27, (1, 11, 14), strides=(1, 2, 2), padding=(0, 0, 0), name="conv3")
if input_action is not None:
conv3 = tf.concat([input_action, tf.layers.flatten(conv3)], 1)
dense = tf.layers.dense(conv3, 1, activation=None, name="dense/w")
return dense
if self.net_type == 'STG_5_V1':
"""
STG_action V1.0 20180220 After V1.0, the image input will be 5 frame, 55*100*3
"""
conv1 = conv3d_relu(inputs, 50, (3, 5, 5), strides=(1, 2, 3), padding="VALID", name="conv1")
conv2 = conv3d_relu(conv1, 50, (3, 5, 5), strides=(1, 2, 2), padding=(0, 0, 0), name="conv2")
conv3 = conv3d_relu(conv2, 27, (1, 11, 14), strides=(1, 2, 2), padding=(0, 0, 0), name="conv3")
if input_action is not None:
conv3 = tf.concat([input_action, tf.layers.flatten(conv3)], 1)
dense = tf.layers.dense(conv3, 1, activation=None, name="dense/w")
return dense
if self.net_type == 'STG_3_demo_4':
"""
STG_action V0.4 20180217
"""
conv1 = conv3d_relu(inputs, 60, (3, 7, 7), strides=(1, 3, 3), padding="SAME", name="conv1")
conv2 = conv3d_relu(conv1, 60, (3, 5, 5), strides=(1, 2, 3), padding=(0, 0, 0), name="conv2")
conv3 = conv3d_relu(conv2, 37, (1, 17, 21), strides=(1, 2, 2), padding=(0, 0, 0), name="conv3")
if input_action is not None:
conv3 = tf.concat([input_action, tf.layers.flatten(conv3)], 1)
dense = tf.layers.dense(conv3, 1, activation=None, name="dense/w")
return dense
if self.net_type == 'STG_3_demo_2':
"""
STG_action V0.3 20180215
"""
conv1 = conv3d(inputs, 60, (3, 7, 7), strides=(1, 3, 3), padding="SAME", name="conv1")
conv1 = tf.nn.relu(conv1)
conv2 = conv3d(conv1, 60, (3, 25, 25), strides=(1, 2, 3), padding=(0, 0, 0), name="conv2")
conv2 = tf.nn.relu(conv2)
conv3 = conv3d(conv2, 8, (1, 5, 10), strides=(1, 2, 2), padding=(0, 0, 0), name="conv3")
conv3 = tf.nn.relu(conv3)
if input_action is not None:
conv3 = tf.layers.flatten(conv3)
conv3 = tf.concat([input_action, conv3], 1)
if dense_layer:
dense = tf.layers.dense(conv3, 50, activation=tf.nn.relu, name="dense/w")
return dense
elif self.net_type == 'STG_3_demo_1':
"""
STG_action V0.2 20180214
"""
conv1 = conv3d(inputs, 60, (3, 7, 7), strides=(1, 3, 3), padding="SAME", name="conv1")
conv1 = tf.nn.relu(conv1)
conv2 = conv3d(conv1, 60, (3, 25, 25), strides=(1, 2, 3), padding=(0, 0, 0), name="conv2")
conv2 = tf.nn.relu(conv2)
conv3 = conv3d(conv2, 47, (1, 7, 15), strides=(1, 1, 1), padding=(0, 0, 0), name="conv3")
conv3 = tf.nn.relu(conv3)
if input_action is not None:
conv3 = tf.layers.flatten(conv3)
conv3 = tf.concat([input_action, conv3], 1)
if dense_layer:
dense = tf.layers.dense(conv3, 50, activation=tf.nn.relu, name="dense/w")
return dense
elif self.net_type == 'STG_20180212':
"""
STG_action V0.1 20180212
"""
conv1 = conv3d_relu(inputs, 120, (3, 7, 7), strides=(1, 3, 3), padding="SAME", name="conv1")
conv2 = conv3d_relu(conv1, 30, (3, 25, 25), strides=(1, 2, 3), padding=(0, 0, 0), name="conv2")
conv3 = conv3d_relu(conv2, 15, (1, 7, 15), strides=(1, 2, 2), padding=(0, 0, 0), name="conv3")
if input_action is not None:
conv3 = tf.layers.flatten(conv3)
conv3 = tf.concat([input_action, conv3], 1)
dense = tf.layers.dense(conv3, 50, activation=tf.nn.relu, name="dense/w")
return dense
elif self.net_type == 'STG_5':
"""
This is for small frame
"""
conv1 = conv3d(inputs, 120, (3, 7, 7), strides=(1, 3, 3), padding="SAME", name="conv1")
conv1 = tf.nn.relu(conv1)
conv2 = conv3d(conv1, 30, (3, 30, 30), strides=(1, 2, 3), padding=(0, 0, 0), name="conv2")
conv2 = tf.nn.relu(conv2)
conv3 = conv3d(conv2, 5, (1, 6, 9), strides=(1, 2, 2), padding=(0, 0, 0), name="conv3")
conv3 = tf.nn.relu(conv3)
elif self.net_type == 'ST':
"""
This is the spatial temporal model used for synthesizing dynamic textures with both spatial and temporal
stationarity. e.g. sea, ocean.
"""
conv1 = conv3d(inputs, 120, (15, 15, 15), strides=(7, 7, 7), padding="SAME", name="conv1")
conv1 = tf.nn.relu(conv1)
conv2 = conv3d(conv1, 40, (7, 7, 7), strides=(3, 3, 3), padding="SAME", name="conv2")
conv2 = tf.nn.relu(conv2)
conv3 = conv3d(conv2, 20, (2, 3, 3), strides=(1, 2, 2), padding="SAME", name="conv3")
conv3 = tf.nn.relu(conv3)
elif self.net_type == 'FC_S':
"""
This is the spatial fully connected model used for synthesizing dynamic textures with only temporal
stationarity with image size of 100. e.g. fire pot, flashing lights.
"""
conv1 = conv3d(inputs, 120, (7, 7, 7), strides=(2, 2, 2), padding="SAME", name="conv1")
conv1 = tf.nn.relu(conv1)
conv2 = conv3d(conv1, 30, (5, 50, 50), strides=(2, 2, 2), padding=(2, 0, 0), name="conv2")
conv2 = tf.nn.relu(conv2)
conv3 = conv3d(conv2, 5, (2, 1, 1), strides=(1, 2, 2), padding=(1, 0, 0), name="conv3")
conv3 = tf.nn.relu(conv3)
elif self.net_type == 'FC_S_large':
"""
This is the spatial fully connected model for images with size of 224.
"""
conv1 = conv3d(inputs, 120, (7, 7, 7), strides=(3, 3, 3), padding="SAME", name="conv1")
conv1 = tf.nn.relu(conv1)
conv2 = conv3d(conv1, 30, (4, 75, 75), strides=(2, 1, 1), padding=(2, 0, 0), name="conv2")
conv2 = tf.nn.relu(conv2)
conv3 = conv3d(conv2, 5, (2, 1, 1), strides=(1, 1, 1), padding=(1, 0, 0), name="conv3")
conv3 = tf.nn.relu(conv3)
else:
return NotImplementedError
return conv3
def langevin_dynamics(self, samples, sample_a, gradient, gradient_a, batch_id,
update_state=True, update_action=True):
for i in range(self.sample_steps):
if update_state:
noise = np.random.randn(*samples.shape)
grad | |
#!BPY
"""
Name: 'TikZ (.tex)...'
Blender: 245
Group: 'Export'
Tooltip: 'Export selected curves as TikZ paths for use with (La)TeX'
"""
__author__ = '<NAME>'
__version__ = "1.0"
__url__ = ("Documentation, http://www.fauskes.net/code/blend2tikz/documentation/",
"Author's homepage, http://www.fauskes.net/",
"TikZ examples, http://www.fauskes.net/pgftikzexamples/")
__bpydoc__ = """\
This script exports selected curves and empties to TikZ format for use with TeX.
PGF and TikZ is a powerful macro package for creating high quality illustrations
and graphics for use with (La|Con)TeX.
Important: TikZ is primarily for creating 2D illustrations. This script will
therefore only export the X and Y coordinates. However, the Z coordinate is used
to determine draw order.
Usage:
Select the objects you want to export and invoke the script from the
"File->Export" menu[1]. Alternatively you can load and run the script from
inside Blender.
A dialog box will pop up with various options:<br>
- Draw: Insert a draw operation in the generated path.<br>
- Fill: Insert a fill operation in the generated path.<br>
- Transform: Apply translation and scale transformations.<br>
- Materials: Export materials assigned to curves.<br>
- Empties: Export empties as named coordinates.<br>
- Only properties: Use on the style property of materials if set.<br>
- Standalone: Create a standalone document.<br>
- Only code: Generate only code for drawing paths.<br>
- Clipboard: Copy generated code to the clipboard. <br>
Properties:
If an object is assigned a ID property or game property named 'style' of type
string, its value will be added to the path as an option. You can use the
Help->Property ID browser to set this value, or use the Logic panel to
add a game property.
Materials:
The exporter has basic support for materials. By default the material's RGB
value is used as fill or draw color. You can also set the alpha value for
transparency effects.
An alternative is to specify style options
directly by putting the values in a 'style' property assigned to the material.
You can use the Help->Property ID browser to set this value.
Issues:<br>
- Only bezier and polyline curves are supported.<br>
- A full Python install is required for clipboard support on Windows. Other platforms
need the standard subprocess module (requires Python 2.4 or later). Additionally:<br>
* Windows users need to install the PyWin32 module.<br>
* Unix-like users need the xclip command line tool or the PyGTK_ module installed.<br>
* OS X users need the pbcopy command line tool installed.<br>
[1] Requires you to put the script in Blender's scripts folder. Blender will
then automatically detect the script.
"""
import Blender
from Blender import sys as bsys
from itertools import izip
import itertools, math
from Blender import Mesh, Mathutils, Registry, Scene, Material, Group
from textwrap import wrap
from string import Template
# Curve types
TYPE_POLY = 0
TYPE_BEZIER = 1
TYPE_NURBS = 4
R2D = 180.0 / math.pi
# Start of configuration section -------
# Templates
standalone_template = r"""
\documentclass{article}
\usepackage{tikz}
%(preamble)s
%(materials)s
\begin{document}
\begin{tikzpicture}
%(pathcode)s
\end{tikzpicture}
\end{document}
"""
fig_template = r"""
%(materials)s
\begin{tikzpicture}
%(pathcode)s
\end{tikzpicture}
"""
REG_KEY = 'tikz_export'
# config options:
STANDALONE = True
CODE_ONLY = False
DRAW_CURVE = True
FILL_CLOSED_CURVE = True
TRANSFORM_CURVE = True
CLIPBOARD_OUTPUT = False
EMPTIES = True
EXPORT_MATERIALS = False
ONLY_PROPERTIES = False
USE_PLOTPATH = False
WRAP_LINES = True
tooltips = {
'STANDALONE': 'Output a standalone document',
'DRAW_CURVE':
'Draw curves',
'FILL_CLOSED_CURVE':
'Fill closed curves',
'TRANSFORM_CURVE':
'Apply transformations',
'CLIPBOARD_OUTPUT':
'Put generated code on clipboard',
'CODE_ONLY':
'Output pathcode only',
'EMPTIES': 'Export empties',
'EXPORT_MATERIALS': 'Apply materials to curves',
'ONLY_PROPERTIES':
'Use only properties for materials with the style property set',
'USE_PLOTPATH':
'Use the plot path operations for polylines',
'WRAP_LINES':
'Wrap long lines',
}
def update_registry():
d = {
'STANDALONE': STANDALONE,
'DRAW_CURVE': DRAW_CURVE,
'FILL_CLOSED_CURVE': FILL_CLOSED_CURVE,
'TRANSFORM_CURVE': TRANSFORM_CURVE,
'CLIPBOARD_OUTPUT': CLIPBOARD_OUTPUT,
'CODE_ONLY': CODE_ONLY,
'EMPTIES': EMPTIES,
'EXPORT_MATERIALS': EXPORT_MATERIALS,
'ONLY_PROPERTIES': ONLY_PROPERTIES,
'USE_PLOTPATH': USE_PLOTPATH,
'WRAP_LINES': WRAP_LINES,
}
Registry.SetKey(REG_KEY, d, True)
# Looking for a saved key in Blender.Registry dict:
rd = Registry.GetKey(REG_KEY, True)
if rd:
try:
STANDALONE = rd['STANDALONE']
DRAW_CURVE = rd['DRAW_CURVE']
FILL_CLOSED_CURVE = rd['FILL_CLOSED_CURVE']
TRANSFORM_CURVE = rd['TRANSFORM_CURVE']
CLIPBOARD_OUTPUT = rd['CLIPBOARD_OUTPUT']
CODE_ONLY = rd['CODE_ONLY']
EMPTIES = rd['EMPTIES']
EXPORT_MATERIALS = rd['EXPORT_MATERIALS']
ONLY_PROPERTIES = rd['ONLY_PROPERTIES']
USE_PLOTPATH = rd['USE_PLOTPATH']
WRAP_LINES = rd['WRAP_LINES']
except KeyError:
print "Keyerror"
update_registry()
else:
print "update registry"
update_registry()
# Start of GUI section ------------------------------------------------
from Blender import Draw
def draw_GUI():
global STANDALONE, DRAW_CURVE, FILL_CLOSED_CURVE, TRANSFORM_CURVE
global CLIPBOARD_OUTPUT, CODE_ONLY, EMPTIES, EXPORT_MATERIALS
global ONLY_PROPERTIES
global USE_PLOTPATH
global WRAP_LINES
standalonetog = Draw.Create(STANDALONE)
codeonlytog = Draw.Create(CODE_ONLY)
drawcurvetog = Draw.Create(DRAW_CURVE)
fillcurvetog = Draw.Create(FILL_CLOSED_CURVE)
transformcurvetog = Draw.Create(TRANSFORM_CURVE)
clipboardtog = Draw.Create(CLIPBOARD_OUTPUT)
emptiestog = Draw.Create(EMPTIES)
materialstog = Draw.Create(EXPORT_MATERIALS)
onlyproptog = Draw.Create(ONLY_PROPERTIES)
useplotpathtog = Draw.Create(USE_PLOTPATH)
wraplinestog = Draw.Create(WRAP_LINES)
block = []
#block.append("Export:")
block.append(("Draw", drawcurvetog, tooltips['DRAW_CURVE']))
block.append(("Fill", fillcurvetog, tooltips['FILL_CLOSED_CURVE']))
block.append(("Transform", transformcurvetog, tooltips['TRANSFORM_CURVE']))
block.append(("Use plot path", useplotpathtog, tooltips['USE_PLOTPATH']))
block.append("Export:")
block.append(("Materials", materialstog, tooltips['EXPORT_MATERIALS']))
block.append(("Empties", emptiestog, tooltips['EMPTIES']))
block.append("Material options:")
block.append(("Only properties", onlyproptog, tooltips['ONLY_PROPERTIES']))
block.append('Ouput options')
block.append(("Standalone", standalonetog, tooltips['STANDALONE']))
block.append(("Only code", codeonlytog, tooltips['CODE_ONLY']))
block.append(("Clipboard", clipboardtog, tooltips['CLIPBOARD_OUTPUT']))
block.append(("Wrap lines", wraplinestog, tooltips['WRAP_LINES']))
retval = Blender.Draw.PupBlock("Blend2TikZ options", block)
if retval:
# set options
STANDALONE = standalonetog.val
DRAW_CURVE = drawcurvetog.val
FILL_CLOSED_CURVE = fillcurvetog.val
TRANSFORM_CURVE = transformcurvetog.val
CLIPBOARD_OUTPUT = clipboardtog.val
CODE_ONLY = codeonlytog.val
EMPTIES = emptiestog.val
EXPORT_MATERIALS = materialstog.val
ONLY_PROPERTIES = onlyproptog.val
USE_PLOTPATH = useplotpathtog.val
WRAP_LINES = wraplinestog.val
update_registry()
return retval
# End of GUI section ----------------------
# End of configuration section ---------
X = 0
Y = 1
used_materials = {}
# Utility functions
def nsplit(seq, n=2):
"""Split a sequence into pieces of length n
If the lengt of the sequence isn't a multiple of n, the rest is discareded.
Note that nsplit will strings into individual characters.
Examples:
>>> nsplit('aabbcc')
[('a', 'a'), ('b', 'b'), ('c', 'c')]
>>> nsplit('aabbcc',n=3)
[('a', 'a', 'b'), ('b', 'c', 'c')]
# Note that cc is discarded
>>> nsplit('aabbcc',n=4)
[('a', 'a', 'b', 'b')]
"""
return [xy for xy in izip(*[iter(seq)] * n)]
def mreplace(s, chararray, newchararray):
for a, b in zip(chararray, newchararray):
s = s.replace(a, b)
return s
def tikzify(s):
if s.strip():
return mreplace(s, r'\,:.', '-+_*')
else:
return ""
def copy_to_clipboard(text):
"""Copy text to the clipboard
Returns True if successful. False otherwise.
Works on Windows, *nix and Mac. Tries the following:
1. Use the win32clipboard module from the win32 package.
2. Calls the xclip command line tool (*nix)
3. Calls the pbcopy command line tool (Mac)
4. Try pygtk
"""
# try windows first
try:
import win32clipboard
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardText(text)
win32clipboard.CloseClipboard()
return True
except:
pass
# try xclip
try:
import subprocess
p = subprocess.Popen(['xclip', '-selection', 'c'], stdin=subprocess.PIPE)
p.stdin.write(text)
p.stdin.close()
retcode = p.wait()
return True
except:
pass
# try pbcopy (Os X)
try:
import subprocess
p = subprocess.Popen(['pbcopy'], stdin=subprocess.PIPE)
p.stdin.write(text)
p.stdin.close()
retcode = p.wait()
return True
except:
pass
# try os /linux
try:
import subprocess
p = subprocess.Popen(['xsel'], stdin=subprocess.PIPE)
p.stdin.write(text)
p.stdin.close()
retcode = p.wait()
return True
except:
pass
# try pygtk
try:
# Code from
# http://www.vector-seven.com/2007/06/27/
# passing-data-between-gtk-applications-with-gtkclipboard/
import pygtk
pygtk.require('2.0')
import gtk
# get the clipboard
clipboard = gtk.clipboard_get()
# set the clipboard text data
clipboard.set_text(text)
# make our data available to other applications
clipboard.store()
except:
return False
def get_property(obj, name):
"""Get named object property
Looks first in custom properties, then game properties. Returns a list.
"""
prop_value = []
try:
prop_value.append(obj.properties[name])
except:
pass
try:
# look for game properties
prop = obj.getProperty(name)
if prop.type == "STRING" and prop.data.strip():
prop_value.append(prop.data)
except:
pass
return prop_value
def get_material(material):
"""Convert material to TikZ options"""
if not material:
return ""
opts = ""
mat_name = tikzify(material.name)
used_materials[mat_name] = material
return mat_name
def write_materials(used_materials):
"""Return code for the used materials"""
c = "% Materials section \n"
for material in used_materials.values():
mat_name = tikzify(material.name)
matopts = ''
proponly = ONLY_PROPERTIES
try:
proponly = material.properties['onlyproperties']
if proponly and type(proponly) == str:
proponly = proponly.lower() not in ('0', 'false')
except:
pass
try:
matopts = material.properties['style']
except:
pass
rgb = material.rgbCol
spec = material.specCol
alpha = material.alpha
flags = material.getMode()
options = []
if not (proponly and matopts):
c += "\\definecolor{%s_col}{rgb}{%s,%s,%s}\n" \
% tuple([mat_name] + rgb)
options.append('%s_col' % mat_name)
if alpha < 1.0:
options.append('opacity=%s' % alpha)
if matopts:
options += [matopts]
c += "\\tikzstyle{%s}= [%s]\n" % (mat_name, ",".join(options))
return c
def write_object(obj, empties):
"""Write Curves"""
s = ""
name = obj.name
prop = obj.properties
mtrx = obj.matrix.rotationPart()
x, y, z = obj.getLocation('worldspace')
rot = obj.getEuler('worldspace')
scale_x, scale_y, scale_z = obj.matrix.scalePart()
# Convert to degrees
rot_z = rot.z * R2D
if obj.type not in ["Curve", "Empty"]:
return s
ps = ""
if obj.type == 'Curve':
curvedata = obj.data
s += "%% %s\n" % name
for curnurb in curvedata:
if curnurb.type == TYPE_BEZIER:
knots = []
handles = []
# Build lists of knots and handles
for | |
doesn't hang things up later...
del self.callback_event
self.callback_stop = True
self.notify_event.set()
ct.join()
class VirtualTimeBase(object):
"""Tests for virtual time functions when virtualtime is enabled"""
def test_datetime_init(self):
"""tests the basic instantiation of datetime objects."""
datetime.datetime(2012, 7, 25) # Richardg's birthday...hooray
datetime.datetime(year=2012, month=7, day=25, hour=10, minute=27, second=3, microsecond=100, tzinfo=pytz.timezone('Africa/Johannesburg'))
# test args, kwargs
args = (2012,7,25)
kwargs = {'hour':10, 'minute':27, 'second':3}
kwargs_only = {'year':2012, 'month':7, 'day': 25, 'hour':10, 'minute':27, 'second':3, 'microsecond':100, 'tzinfo': pytz.timezone('Africa/Johannesburg')}
datetime.datetime(*args)
datetime.datetime(*args, **kwargs)
datetime.datetime(**kwargs_only)
def test_time(self):
"""tests that we can set time"""
run_time_function_tst(time.time, virtualtime.set_time, 100, enabled=self.virtual_time_enabled)
def test_localtime(self):
"""tests that we can set time and it affects localtime"""
run_time_derived_function_tst(time.localtime, time.time, virtualtime.set_time, 100, min_diff=1, enabled=self.virtual_time_enabled)
def test_gmtime(self):
"""tests that we can set time and it affects gmtime"""
run_time_derived_function_tst(time.gmtime, time.time, virtualtime.set_time, 100, min_diff=1, enabled=self.virtual_time_enabled)
def test_asctime(self):
"""tests that we can set time and it affects asctime"""
order_preserving_asctime = lambda: order_preserving_timestr_reslice(time.asctime())
run_time_derived_function_tst(order_preserving_asctime, time.time, virtualtime.set_time, 100, min_diff=1, enabled=self.virtual_time_enabled)
def test_ctime(self):
"""tests that we can set time and it affects ctime"""
order_preserving_ctime = lambda: order_preserving_timestr_reslice(time.ctime())
run_time_derived_function_tst(order_preserving_ctime, time.time, virtualtime.set_time, 100, min_diff=1, enabled=self.virtual_time_enabled)
def test_strftime(self):
"""tests that we can set time and it affects ctime"""
strftime_iso = lambda: time.strftime("%Y-%m-%d %H:%M:%S")
run_time_derived_function_tst(strftime_iso, time.time, virtualtime.set_time, 100, min_diff=1, enabled=self.virtual_time_enabled)
def test_datetime_now(self):
"""tests that setting time and datetime are both possible"""
run_time_function_tst(datetime.datetime.now, virtualtime.set_local_datetime, datetime.timedelta(seconds=100), enabled=self.virtual_time_enabled)
def test_datetime_utcnow(self):
"""tests that setting time and datetime are both possible"""
run_time_function_tst(datetime.datetime.utcnow, virtualtime.set_utc_datetime, datetime.timedelta(seconds=100), enabled=self.virtual_time_enabled)
def test_datetime_tz_now(self):
"""tests that setting time and datetime are both possible"""
run_time_function_tst(datetime_tz.datetime_tz.now, virtualtime.set_local_datetime, datetime.timedelta(seconds=100), enabled=self.virtual_time_enabled)
def test_datetime_tz_utcnow(self):
"""tests that setting time and datetime are both possible"""
run_time_function_tst(datetime_tz.datetime_tz.utcnow, virtualtime.set_utc_datetime, datetime.timedelta(seconds=100), enabled=self.virtual_time_enabled)
def test_datetime_tz_now_other_tz(self):
"""tests that setting time and datetime are both possible"""
for tz_name in ["Asia/Tokyo", "Europe/London", "America/Chicago"]:
tz = pytz.timezone(tz_name)
tz_now = lambda: datetime_tz.datetime_tz.now().astimezone(tz)
run_time_derived_function_tst(tz_now, datetime_tz.datetime_tz.utcnow, virtualtime.set_utc_datetime, datetime.timedelta(seconds=100), enabled=self.virtual_time_enabled)
class TestDisabledVirtualTime(VirtualTimeBase, RunUnpatched):
"""Tests that virtual time functions have no effect when VirtualTime is disabled"""
class TestVirtualTime(VirtualTimeBase, RunPatched):
"""Tests that virtual time functions have no effect when VirtualTime is disabled"""
class SleepBase(object):
def setup_method(self, method): # This is a wrapper of setUp for py.test (py.test and nose take different method setup methods)
self.setUp()
def setUp(self):
self.initial_waiter_count = self.count_waiters()
def teardown_method(self, method): # This is a wrapper of tearDown for py.test (py.test and nose take different method setup methods)
self.tearDown()
def tearDown(self):
del self.initial_waiter_count
if sys.version_info.major < 3:
def count_waiters(self):
return len(virtualtime._virtual_time_state._Condition__waiters)
else:
def count_waiters(self):
return len(virtualtime._virtual_time_state._waiters)
def wait_sleep_started(self, sleep_count, max_wait=5.0):
"""Waits for the given number of sleeps to start before continuing (with a timeout)"""
if not self.virtual_time_enabled:
return
start_wait_check = virtualtime._original_time()
while self.count_waiters() < self.initial_waiter_count + sleep_count:
virtualtime._original_sleep(0.001)
delay = virtualtime._original_time() - start_wait_check
if delay > max_wait:
raise ValueError("Not enough sleepers started waiting in time...")
@restore_time_after
def test_sleep(self):
"""Tests that sleep comes back quicker than normal when time is advanced"""
first_time = time.time()
sleeper_thread = threading.Thread(target=time.sleep, args=(3,), name="test_sleep_sleeper")
sleeper_thread.start()
self.wait_sleep_started(1, 0.2)
virtualtime.set_time(first_time + 5)
sleeper_thread.join()
virtualtime.restore_time()
join_time = time.time()
if self.virtual_time_enabled:
assert join_time - first_time < 0.5
else:
assert join_time - first_time >= 3
@restore_time_after
def test_parallel_sleeps(self):
"""Tests that sleep comes back quicker than normal when time is advanced, and that this works with lots of threads"""
first_time = virtualtime._original_time()
sleeper_threads = {}
REPEATS = 100
for n in range(REPEATS):
sleeper_threads[n] = sleeper_thread = threading.Thread(target=time.sleep, args=(3,), name="test_sleep_sleeper_%d" % n)
sleeper_thread.start()
self.wait_sleep_started(REPEATS, 0.5)
thread_time = virtualtime._original_time()
setup_duration = thread_time - first_time
assert setup_duration < 0.5
virtualtime.set_time(thread_time + 20)
for n in range(REPEATS):
sleeper_threads[n].join()
join_time = virtualtime._original_time()
sleep_duration = join_time - thread_time
virtualtime.restore_time()
if self.virtual_time_enabled:
assert sleep_duration < 0.2
else:
assert sleep_duration >= 3
class TestDisabledSleep(SleepBase, RunUnpatched):
pass
class TestSleep(SleepBase, RunPatched):
@attr('long_running')
def test_many_parallel_sleeps(self):
"""Tests that sleep comes back quicker than normal when time is advanced, and that this works with lots of threads when repeated many times"""
LOOPS = 100
for m in range(LOOPS):
self.test_parallel_sleeps()
class TestFastForward(RunPatched):
def fast_forward_catcher(self, event, msg_dict):
offsets = msg_dict['offsets']
while "stop" not in msg_dict:
event.wait()
offsets.append(virtualtime._time_offset)
event.clear()
@restore_time_after
def test_fast_forward_time(self):
"""Test that fast forwarding the time works properly"""
event = threading.Event()
virtualtime.notify_on_change(event)
offsets = []
msg_dict = {'offsets': offsets}
catcher_thread = threading.Thread(target=self.fast_forward_catcher, args=(event, msg_dict))
catcher_thread.start()
start_time = virtualtime._original_time()
virtualtime.fast_forward_time(1)
assert virtualtime._time_offset == 1
virtualtime.fast_forward_time(2.5)
assert virtualtime._time_offset == 3.5
virtualtime.fast_forward_time(target=start_time + 9.1, step_size=2.0)
assert 9 <= virtualtime._time_offset <= 9.2
virtualtime.restore_time()
virtualtime.fast_forward_time(-1.3, step_size=0.9)
virtualtime.restore_time()
msg_dict['stop'] = True
event.set()
catcher_thread.join()
assert offsets[:6] == [1.0, 2.0, 3.0, 3.5, 5.5, 7.5]
assert 9 <= offsets[6] <= 9.2
assert offsets[7:11] == [0, -0.9, -1.3, 0]
# depends on how long the stop event takes?
assert (not offsets[11:]) or offsets[11:] == [0]
@attr('long_running')
@restore_time_after
def test_fast_forward_time_long(self):
"""Test that fast forwarding the time a long way works properly"""
event = threading.Event()
virtualtime.notify_on_change(event)
offsets = []
msg_dict = {'offsets': offsets}
catcher_thread = threading.Thread(target=self.fast_forward_catcher, args=(event, msg_dict))
catcher_thread.start()
start_time = virtualtime._original_time()
virtualtime.fast_forward_time(1000, step_size=1)
virtualtime.restore_time()
msg_dict['stop'] = True
event.set()
catcher_thread.join()
assert offsets == list(range(1, 1001)) + [0]
@restore_time_after
def test_fast_forward_datetime_style(self):
"""Test that fast forwarding the time works properly when using datetime-style objects"""
event = threading.Event()
virtualtime.notify_on_change(event)
offsets = []
msg_dict = {'offsets': offsets}
catcher_thread = threading.Thread(target=self.fast_forward_catcher, args=(event, msg_dict))
catcher_thread.start()
start_time = virtualtime._original_datetime_now()
utc_start_time = datetime_tz.localize(start_time).astimezone(pytz.utc)
virtualtime.fast_forward_timedelta(datetime.timedelta(seconds=1))
assert virtualtime._time_offset == 1
virtualtime.fast_forward_timedelta(datetime.timedelta(seconds=2.5))
assert virtualtime._time_offset == 3.5
virtualtime.fast_forward_local_datetime(target=start_time + datetime.timedelta(seconds=9.1), step_size=datetime.timedelta(seconds=2.0))
assert 9 <= virtualtime._time_offset <= 9.2
virtualtime.fast_forward_utc_datetime(target=utc_start_time + datetime.timedelta(seconds=18.2), step_size=datetime.timedelta(seconds=20.0))
assert 18 <= virtualtime._time_offset <= 18.3
virtualtime.restore_time()
virtualtime.fast_forward_timedelta(datetime.timedelta(seconds=-1.3), step_size=datetime.timedelta(seconds=0.9))
virtualtime.restore_time()
msg_dict['stop'] = True
event.set()
catcher_thread.join()
assert offsets[:6] == [1.0, 2.0, 3.0, 3.5, 5.5, 7.5]
assert 9 <= offsets[6] <= 9.2
assert 18 <= offsets[7] <= 18.3
assert offsets[8:12] == [0, -0.9, -1.3, 0]
# depends on how long the stop event takes?
assert (not offsets[12:]) or offsets[12:] == [0]
def fast_forward_delayer(self, notify_event, delay_event, msg_dict):
offsets = msg_dict['offsets']
positions = msg_dict['positions']
while "stop" not in msg_dict:
notify_event.wait()
offsets.append(virtualtime._time_offset)
position = positions.pop(0) if positions else ""
if position == "start_job":
virtualtime.delay_fast_forward_until_set(delay_event)
virtualtime._original_sleep(0.1)
delay_event.set()
notify_event.clear()
@restore_time_after
def test_fast_forward_delay(self):
"""Test that fast forwarding the time works properly"""
notify_event = threading.Event()
virtualtime.notify_on_change(notify_event)
delay_event = threading.Event()
offsets = []
positions = ["start_job", ""]
msg_dict = {'offsets': offsets, 'positions': positions}
catcher_thread = threading.Thread(target=self.fast_forward_delayer, args=(notify_event, delay_event, msg_dict))
catcher_thread.start()
start_time = virtualtime._original_time()
virtualtime.fast_forward_time(2)
assert virtualtime._time_offset == 2
virtualtime.restore_time()
msg_dict['stop'] = True
notify_event.set()
catcher_thread.join()
completion_time = virtualtime._original_time()
assert offsets[:3] == [1.0, 2.0, 0]
# depends on how long the stop event takes?
assert (not offsets[3:]) or offsets[3:] == [0]
assert completion_time - start_time < 0.2
assert delay_event.is_set()
class TestInheritance(object):
"""Tests how detection of inheritance works for datetime classes"""
def setup_method(self, method): # This is a wrapper of setUp for py.test (py.test and nose take different method setup methods)
"""Ensure that virtualtime is disabled when starting each test"""
self.setUp()
def setUp(self):
while virtualtime.enabled():
virtualtime.disable()
def teardown_method(self, method): # This is a wrapper of tearDown for py.test (py.test and nose take different method setup methods)
self.tearDown()
def tearDown(self):
"""Ensure that virtualtime is disabled after running each test"""
while virtualtime.enabled():
virtualtime.disable()
def test_disabled(self):
assert issubclass(datetime_tz.datetime_tz, datetime.datetime)
def test_enabled(self):
virtualtime.enable()
assert issubclass(datetime_tz.datetime_tz, datetime.datetime)
def test_switching(self):
orig_datetime = datetime.datetime
class derived_datetime(datetime.datetime):
pass
assert issubclass(datetime_tz.datetime_tz, orig_datetime)
assert issubclass(datetime_tz.datetime_tz, datetime.datetime)
assert issubclass(derived_datetime, orig_datetime)
assert issubclass(derived_datetime, datetime.datetime)
virtualtime.enable()
class derived_datetime2(datetime.datetime):
pass
assert issubclass(datetime_tz.datetime_tz, orig_datetime)
assert issubclass(datetime_tz.datetime_tz, datetime.datetime)
assert issubclass(derived_datetime, orig_datetime)
assert issubclass(derived_datetime, datetime.datetime)
assert issubclass(derived_datetime2, orig_datetime)
assert issubclass(derived_datetime2, datetime.datetime)
virtualtime.disable()
assert issubclass(datetime_tz.datetime_tz, orig_datetime)
assert issubclass(datetime_tz.datetime_tz, datetime.datetime)
assert issubclass(derived_datetime, orig_datetime)
assert issubclass(derived_datetime, datetime.datetime)
assert issubclass(derived_datetime2, orig_datetime)
assert issubclass(derived_datetime2, datetime.datetime)
def test_switching_values(self):
now = datetime_tz.datetime_tz.now()
assert isinstance(now, datetime.datetime)
assert isinstance(now, datetime_tz.datetime_tz)
later = now + datetime.timedelta(hours=1)
assert isinstance(later, datetime.datetime)
assert isinstance(later, datetime_tz.datetime_tz)
start = datetime.datetime.combine(now.date(), now.time())
assert isinstance(start, datetime.datetime)
local_start = datetime_tz.localize(start)
assert local_start == now
assert isinstance(local_start, datetime_tz.datetime_tz)
start_tz = datetime_tz.datetime_tz.combine(now.date(), now.time(), datetime_tz.localtz())
assert isinstance(start_tz, datetime_tz.datetime_tz)
local_start_tz = datetime_tz.localize(start_tz)
assert local_start_tz == now
assert isinstance(local_start_tz, datetime_tz.datetime_tz)
assert isinstance(datetime_tz.datetime_tz.min, datetime_tz.datetime_tz)
assert isinstance(datetime_tz.datetime_tz.max, datetime_tz.datetime_tz)
virtualtime.enable()
now = datetime_tz.datetime_tz.now()
assert isinstance(now, datetime.datetime)
assert isinstance(now, datetime_tz.datetime_tz)
later = now + datetime.timedelta(hours=1)
assert isinstance(later, datetime.datetime)
assert isinstance(later, datetime_tz.datetime_tz)
start = datetime.datetime.combine(now.date(), now.time())
assert isinstance(start, datetime.datetime)
local_start = datetime_tz.localize(start)
assert local_start == now
assert isinstance(local_start, datetime_tz.datetime_tz)
start_tz = datetime_tz.datetime_tz.combine(now.date(), now.time(), datetime_tz.localtz())
assert isinstance(start_tz, datetime_tz.datetime_tz)
local_start_tz = datetime_tz.localize(start_tz)
assert local_start_tz == now
assert isinstance(local_start_tz, datetime_tz.datetime_tz)
assert isinstance(datetime_tz.datetime_tz.min, datetime_tz.datetime_tz)
assert isinstance(datetime_tz.datetime_tz.max, datetime_tz.datetime_tz)
_original_datetime_module = virtualtime._original_datetime_module
_original_datetime_type = virtualtime._original_datetime_type
_original_datetime_now | |
<gh_stars>1-10
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn.bricks import (NORM_LAYERS, PLUGIN_LAYERS, ConvModule,
build_activation_layer, build_norm_layer,
build_upsample_layer)
from mmcv.cnn.utils import normal_init
from torch.nn.init import _calculate_correct_fan
from mmgen.models.builder import MODULES
from mmgen.models.common import AllGatherLayer
class EqualizedLR:
r"""Equalized Learning Rate.
This trick is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
The general idea is to dynamically rescale the weight in training instead
of in initializing so that the variance of the responses in each layer is
guaranteed with some statistical properties.
Note that this function is always combined with a convolution module which
is initialized with :math:`\mathcal{N}(0, 1)`.
Args:
name (str | optional): The name of weights. Defaults to 'weight'.
mode (str, optional): The mode of computing ``fan`` which is the
same as ``kaiming_init`` in pytorch. You can choose one from
['fan_in', 'fan_out']. Defaults to 'fan_in'.
"""
def __init__(self, name='weight', gain=2**0.5, mode='fan_in', lr_mul=1.0):
self.name = name
self.mode = mode
self.gain = gain
self.lr_mul = lr_mul
def compute_weight(self, module):
"""Compute weight with equalized learning rate.
Args:
module (nn.Module): A module that is wrapped with equalized lr.
Returns:
torch.Tensor: Updated weight.
"""
weight = getattr(module, self.name + '_orig')
if weight.ndim == 5:
# weight in shape of [b, out, in, k, k]
fan = _calculate_correct_fan(weight[0], self.mode)
else:
assert weight.ndim <= 4
fan = _calculate_correct_fan(weight, self.mode)
weight = weight * torch.tensor(
self.gain, device=weight.device) * torch.sqrt(
torch.tensor(1. / fan, device=weight.device)) * self.lr_mul
return weight
def __call__(self, module, inputs):
"""Standard interface for forward pre hooks."""
setattr(module, self.name, self.compute_weight(module))
@staticmethod
def apply(module, name, gain=2**0.5, mode='fan_in', lr_mul=1.):
"""Apply function.
This function is to register an equalized learning rate hook in an
``nn.Module``.
Args:
module (nn.Module): Module to be wrapped.
name (str | optional): The name of weights. Defaults to 'weight'.
mode (str, optional): The mode of computing ``fan`` which is the
same as ``kaiming_init`` in pytorch. You can choose one from
['fan_in', 'fan_out']. Defaults to 'fan_in'.
Returns:
nn.Module: Module that is registered with equalized lr hook.
"""
# sanity check for duplicated hooks.
for _, hook in module._forward_pre_hooks.items():
if isinstance(hook, EqualizedLR):
raise RuntimeError(
'Cannot register two equalized_lr hooks on the same '
f'parameter {name} in {module} module.')
fn = EqualizedLR(name, gain=gain, mode=mode, lr_mul=lr_mul)
weight = module._parameters[name]
delattr(module, name)
module.register_parameter(name + '_orig', weight)
# We still need to assign weight back as fn.name because all sorts of
# things may assume that it exists, e.g., when initializing weights.
# However, we can't directly assign as it could be an nn.Parameter and
# gets added as a parameter. Instead, we register weight.data as a
# plain attribute.
setattr(module, name, weight.data)
module.register_forward_pre_hook(fn)
# TODO: register load state dict hook
return fn
def equalized_lr(module, name='weight', gain=2**0.5, mode='fan_in', lr_mul=1.):
r"""Equalized Learning Rate.
This trick is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
The general idea is to dynamically rescale the weight in training instead
of in initializing so that the variance of the responses in each layer is
guaranteed with some statistical properties.
Note that this function is always combined with a convolution module which
is initialized with :math:`\mathcal{N}(0, 1)`.
Args:
module (nn.Module): Module to be wrapped.
name (str | optional): The name of weights. Defaults to 'weight'.
mode (str, optional): The mode of computing ``fan`` which is the
same as ``kaiming_init`` in pytorch. You can choose one from
['fan_in', 'fan_out']. Defaults to 'fan_in'.
Returns:
nn.Module: Module that is registered with equalized lr hook.
"""
EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul)
return module
def pixel_norm(x, eps=1e-6):
"""Pixel Normalization.
This normalization is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
Args:
x (torch.Tensor): Tensor to be normalized.
eps (float, optional): Epsilon to avoid dividing zero.
Defaults to 1e-6.
Returns:
torch.Tensor: Normalized tensor.
"""
if torch.__version__ >= '1.7.0':
norm = torch.linalg.norm(x, ord=2, dim=1, keepdim=True)
# support older pytorch version
else:
norm = torch.norm(x, p=2, dim=1, keepdim=True)
norm = norm / torch.sqrt(torch.tensor(x.shape[1]).to(x))
return x / (norm + eps)
@MODULES.register_module()
@NORM_LAYERS.register_module()
class PixelNorm(nn.Module):
"""Pixel Normalization.
This module is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
Args:
eps (float, optional): Epsilon value. Defaults to 1e-6.
"""
_abbr_ = 'pn'
def __init__(self, in_channels=None, eps=1e-6):
super().__init__()
self.eps = eps
def forward(self, x):
"""Forward function.
Args:
x (torch.Tensor): Tensor to be normalized.
Returns:
torch.Tensor: Normalized tensor.
"""
return pixel_norm(x, self.eps)
@PLUGIN_LAYERS.register_module()
class EqualizedLRConvModule(ConvModule):
r"""Equalized LR ConvModule.
In this module, we inherit default ``mmcv.cnn.ConvModule`` and adopt
equalized lr in convolution. The equalized learning rate is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
Note that, the initialization of ``self.conv`` will be overwritten as
:math:`\mathcal{N}(0, 1)`.
Args:
equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``.
If ``None``, equalized learning rate is ignored. Defaults to
dict(mode='fan_in').
"""
def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs):
super().__init__(*args, **kwargs)
self.with_equalized_lr = equalized_lr_cfg is not None
if self.with_equalized_lr:
self.conv = equalized_lr(self.conv, **equalized_lr_cfg)
# initialize the conv weight with standard Gaussian noise.
self._init_conv_weights()
def _init_conv_weights(self):
"""Initialize conv weights as described in PGGAN."""
normal_init(self.conv)
@PLUGIN_LAYERS.register_module()
class EqualizedLRConvUpModule(EqualizedLRConvModule):
r"""Equalized LR (Upsample + Conv) Module.
In this module, we inherit ``EqualizedLRConvModule`` and adopt
upsampling before convolution. As for upsampling, in addition to the
sampling layer in MMCV, we also offer the "fused_nn" type. "fused_nn"
denotes fusing upsampling and convolution. The fusion is modified from
the official Tensorflow implementation in:
https://github.com/tkarras/progressive_growing_of_gans/blob/master/networks.py#L86
Args:
upsample (dict | None, optional): Config for upsampling operation. If
``None``, upsampling is ignored. If you need a faster fused version as
the official PGGAN in Tensorflow, you should set it as
``dict(type='fused_nn')``. Defaults to
``dict(type='nearest', scale_factor=2)``.
"""
def __init__(self,
*args,
upsample=dict(type='nearest', scale_factor=2),
**kwargs):
super().__init__(*args, **kwargs)
self.with_upsample = upsample is not None
if self.with_upsample:
if upsample.get('type') == 'fused_nn':
assert isinstance(self.conv, nn.ConvTranspose2d)
self.conv.register_forward_pre_hook(
EqualizedLRConvUpModule.fused_nn_hook)
else:
self.upsample_layer = build_upsample_layer(upsample)
def forward(self, x, **kwargs):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
if hasattr(self, 'upsample_layer'):
x = self.upsample_layer(x)
return super().forward(x, **kwargs)
@staticmethod
def fused_nn_hook(module, inputs):
"""Standard interface for forward pre hooks."""
weight = module.weight
# pad the last two dimensions
weight = F.pad(weight, (1, 1, 1, 1))
weight = weight[..., 1:, 1:] + weight[..., 1:, :-1] + weight[
..., :-1, 1:] + weight[..., :-1, :-1]
module.weight = weight
@PLUGIN_LAYERS.register_module()
class EqualizedLRConvDownModule(EqualizedLRConvModule):
r"""Equalized LR (Conv + Downsample) Module.
In this module, we inherit ``EqualizedLRConvModule`` and adopt
downsampling after convolution. As for downsampling, we provide two modes
of "avgpool" and "fused_pool". "avgpool" denotes the commonly used average
pooling operation, while "fused_pool" represents fusing downsampling and
convolution. The fusion is modified from the official Tensorflow
implementation in:
https://github.com/tkarras/progressive_growing_of_gans/blob/master/networks.py#L109
Args:
downsample (dict | None, optional): Config for downsampling operation.
If ``None``, downsampling is ignored. Currently, we support the
types of ["avgpool", "fused_pool"]. Defaults to
dict(type='fused_pool').
"""
def __init__(self, *args, downsample=dict(type='fused_pool'), **kwargs):
super().__init__(*args, **kwargs)
downsample_cfg = deepcopy(downsample)
self.with_downsample = downsample is not None
if self.with_downsample:
type_ = downsample_cfg.pop('type')
if type_ == 'avgpool':
self.downsample = nn.AvgPool2d(2, 2)
elif type_ == 'fused_pool':
self.conv.register_forward_pre_hook(
EqualizedLRConvDownModule.fused_avgpool_hook)
elif callable(downsample):
self.downsample = downsample
else:
raise NotImplementedError(
'Currently, we only support ["avgpool", "fused_pool"] as '
f'the type of downsample, but got {type_} instead.')
def forward(self, x, **kwargs):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
torch.Tensor: Normalized tensor.
"""
x = super().forward(x, **kwargs)
if hasattr(self, 'downsample'):
x = self.downsample(x)
return x
@staticmethod
def fused_avgpool_hook(module, inputs):
"""Standard interface for forward pre hooks."""
weight = module.weight
# pad the last two dimensions
weight = F.pad(weight, (1, 1, 1, 1))
weight = (weight[..., 1:, 1:] + weight[..., 1:, :-1] +
weight[..., :-1, 1:] + weight[..., :-1, :-1]) * 0.25
module.weight = weight
@PLUGIN_LAYERS.register_module()
class EqualizedLRLinearModule(nn.Linear):
r"""Equalized LR LinearModule.
In this module, we adopt equalized lr in ``nn.Linear``. The equalized
learning rate is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
Note that, the initialization of ``self.weight`` will be overwritten as
:math:`\mathcal{N}(0, 1)`.
Args:
equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``.
If ``None``, | |
<filename>script-InvestmentFundamentalsAndDataAnalytics.py
# one stock -> simple return is common but not for many
# $$
# \frac{P_1 - P_0}{P_0} = \frac{P_1}{P_0} - 1
# $$
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
'''from AssetTypes.BaseAsset import BaseAsset
from AssetTypes.EquityShare import EquityShare
from AssetTypes.GovernmentBond import GovernmentBond'''
PG = wb.DataReader('PG', data_source='yahoo', start='1995-1-1')
# with iex key`
# PG = wb.DataReader('PG', data_source='iex', start='2015-1-1')
# csv
# PG = pd.read_csv('Section-11_PG_1995-03_23_2017.csv')
# PG = PG.set_index('Date')
# calculate simple return
PG['simple_return'] = (PG['Adj Close'] / PG['Adj Close'].shift(1)) - 1
print(PG['simple_return'])
# plot simple return
PG['simple_return'].plot(figsize=(8, 5))
# plt.show()
# Calculate the simple average daily return.
avg_returns_d = PG['simple_return'].mean()
# Estimate the simple average annual return.
avg_returns_a = PG['simple_return'].mean() * 250
# Print the simple percentage version of the result as a float with 2 digits after the decimal point.
print (str(round(avg_returns_a, 5) * 100) + ' %')
# calculate log return
PG['log_return'] = np.log(PG['Adj Close'] / PG['Adj Close'].shift(1))
print (PG['log_return'])
# plot log return
PG['log_return'].plot(figsize=(8, 5))
# plt.show()
# Calculate the log average daily return.
avg_returns_d = PG['log_return'].mean()
# Estimate the log average annual return.
avg_returns_a = PG['log_return'].mean() * 250
# Print the log percentage version of the result as a float with 2 digits after the decimal point.
print (str(round(avg_returns_a, 5) * 100) + ' %')
#
import quandl
#
mydata_01 = quandl.get("FRED/GDP")
mydata_01.tail()
mydata_01.head()
mydata_01.to_csv('Section-10_57-ImportingandOrganizingYourDatainPython-PartIII-Data_01.csv')
mydata_01 = pd.read_csv('Section-10_57-ImportingandOrganizingYourDatainPython-PartIII-Data_01.csv', index_col='Date')
mydata_01.tail()
mydata_01 = pd.read_csv('Section-10_57-ImportingandOrganizingYourDatainPython-PartIII-Data_01.csv')
mydata_01.tail()
mydata_01.set_index('Date')
mydata_01.tail()
mydata_02 = pd.read_csv('Section-10_57-ImportingandOrganizingYourDatainPython-PartIII-Data_02.csv', index_col='Date')
mydata_02.head()
mydata_02.tail()
mydata_02.to_excel('Section-10_57-ImportingandOrganizingYourDatainPython-PartIII-Data_02.xlsx')
mydata_02.info()
mydata_03 = pd.read_excel('Section-10_57-ImportingandOrganizingYourDatainPython-PartIII-Data_03.xlsx')
mydata_03.info()
mydata_03.set_index('Year')
mydata_03.info()
########## Return of Indices
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
tickers = ['^DJI', '^GSPC', '^IXIC', '^GDAXI']
ind_data = pd.DataFrame()
for t in tickers:
ind_data[t] = wb.DataReader(t, data_source='yahoo', start='2000-1-1')['Adj Close']
ind_data.head()
ind_data.tail()
# Normalize the data to 100 and plot the results on a graph.
(ind_data / ind_data.iloc[0] * 100).plot(figsize=(15, 6));
plt.show()
# How would you explain the common and the different parts of the behavior of the three indices?
# Obtain the simple returns of the indices.
ind_returns = (ind_data / ind_data.shift(1)) - 1
ind_returns.tail()
# Estimate the average annual return of each index.
annual_ind_returns = ind_returns.mean() * 250
annual_ind_returns
##########
'''baseAsset: BaseAsset = EquityShare('PG')
print(baseAsset.ShortType)
print(baseAsset.AssetType)
print(baseAsset.AssetName)
print(baseAsset.getSimpleReturn(PG))'''
tickers = ['PG', 'MSFT', 'F', 'GE']
yahoo_df = pd.DataFrame()
iex_df = pd.DataFrame()
for t in tickers:
yahoo_df[t] = wb.DataReader(t, data_source='yahoo', start='1995-1-1')['Adj Close']
# iex_df[t] = wb.DataReader(t, data_source='iex', start='2002-1-1')['Close']
yahoo_df = pd.read_csv('Section-12_PG_BEI.DE_2007_2017.csv', index_col='Date')
yahoo_df.tail()
yahoo_df.head()
# newDataFrame.to_csv('Section-10_57-ImportingandOrganizingYourDatainPython-PartIII-example_01.csv')
# newDataFrame.to_excel('Section-10_57-ImportingandOrganizingYourDatainPython-PartIII-example_01.xlsx')'''
yahoo_df.iloc[0]
(yahoo_df / yahoo_df.iloc[0] * 100).plot(figsize = (15, 6));
yahoo_df.plot(figsize=(15,6))
yahoo_df.loc['2007-01-03']
yahoo_df.iloc[0]
## Calculating the Return of a Portfolio of Securities
simple_returns = (yahoo_df / yahoo_df.shift(1)) - 1
simple_returns.head()
weights = np.array([0.5, 0.5])
np.dot(simple_returns, weights)
simple_returns_anual = simple_returns.mean() * 250
simple_returns_anual
np.dot(simple_returns_anual, weights)
pfolio_1 = str(round(np.dot(simple_returns_anual, weights), 5) * 100) + ' %'
print (pfolio_1)
weights_2 = np.array([0.75, 0.25])
pfolio_2 = str(round(np.dot(simple_returns_anual, weights_2), 5) * 100) + ' %'
print (pfolio_2)
## Calculating the Risk of a Portfolio of Securities Section-11_MSFT_2000_2017.csv
log_returns = np.log(yahoo_df / yahoo_df.shift(1))
log_returns.head()
# MSFT
log_returns['PG'].mean()
# Annual risk: covariance
log_returns['PG'].mean()*250
# Daily risk:
log_returns['PG'].std()
# Annual risk: covariance
log_returns['PG'].std() * 250 ** 0.5
# PG
log_returns['BEI.DE'].mean()
# Annual risk: covariance
log_returns['BEI.DE'].mean()*250
# Daily risk:
log_returns['BEI.DE'].std()
# Annual risk: covariance
log_returns['BEI.DE'].std() * 250 ** 0.5
# Repeat the process we went through in the lecture for these two stocks. How would you explain the difference between their means and their standard deviations?
log_returns[['PG', 'BEI.DE']].mean() * 250
# Store the volatilities of the two stocks in an array called "vols".
volatilities = log_returns[['PG', 'BEI.DE']].std() * 250 ** 0.5
volatilities
# ## Covariance and Correlation on returns
# \begin{eqnarray*}
# Covariance Matrix: \ \
# \Sigma = \begin{bmatrix}
# \sigma_{1}^2 \ \sigma_{12} \ \dots \ \sigma_{1I} \\
# \sigma_{21} \ \sigma_{2}^2 \ \dots \ \sigma_{2I} \\
# \vdots \ \vdots \ \ddots \ \vdots \\
# \sigma_{I1} \ \sigma_{I2} \ \dots \ \sigma_{I}^2
# \end{bmatrix}
# \end{eqnarray*}
# variance on returns
ms_var = log_returns['PG'].var()
ms_var
ms_var_anual = log_returns['PG'].var() * 250
ms_var_anual
pg_var = log_returns['BEI.DE'].var()
pg_var
pg_var_anual = log_returns['BEI.DE'].var() * 250
pg_var_anual
# covariance on returns
cov_matrix = log_returns.cov()
cov_matrix
cov_matrix_anual = log_returns.cov() * 250
cov_matrix_anual
# correlation on returns no need x 252
corr_matrix = log_returns.corr()
corr_matrix
# ## Calculating Portfolio Risk
# Weigthing scheme:
weights = np.array([0.25, 0.75])
# Portfolio Variance:
pfolio_var = np.dot(weights.T, np.dot(log_returns.cov() * 250, weights))
pfolio_var
# Portfolio Volatility:
pfolio_vol = (np.dot(weights.T, np.dot(log_returns.cov() * 250, weights))) ** 0.5
pfolio_vol
print (str(round(pfolio_vol, 5) * 100) + ' %')
# systematic = un diversifiable risk
# unsystematic = diversifiable risk = idiosyncratic -> diversification
## Calculating Diversifiable and Non-Diversifiable Risk of a Portfolio
# Diversifiable Risk:
ms_var_anual = log_returns['PG'].var() * 250
ms_var_anual
pg_var_anual = log_returns['BEI.DE'].var() * 250
pg_var_anual
diversifable_risk = pfolio_var - (weights[0] ** 2 * ms_var_anual) - (weights[1] ** 2 * pg_var_anual)
diversifable_risk
print (str(round(diversifable_risk*100, 3)) + ' %')
# Non-Diversifiable Risk:
n_dr_1 = pfolio_var - diversifable_risk
n_dr_1
n_dr_2 = (weights[0] ** 2 * ms_var_anual) + (weights[1] ** 2 * pg_var_anual)
n_dr_2
n_dr_1 == n_dr_2
# regresssions
import numpy as np
import pandas as pd
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
data = pd.read_excel('Section-13_Housing.xlsx')
data[['House Price', 'House Size (sq.ft.)']]
# univarate regression
X = data['House Size (sq.ft.)']
Y = data['House Price']
plt.scatter(X,Y)
plt.axis([0, 2500, 0, 1500000])
plt.ylabel('House Price')
plt.xlabel('House Size (sq.ft)')
# regression linear OLS
X1 = sm.add_constant(X)
reg = sm.OLS(Y, X1).fit()
reg.summary()
### Alpha, Beta, R^2:
slope, intercept, r_value, p_value, std_err = stats.linregress(X,Y)
line = intercept + slope * X
plt.plot(X,line)
print(slope)
print(intercept)
print(r_value)
print(r_value**2)
print(p_value)
print(std_err)
data = pd.read_excel('Section-13_IQ_data.xlsx')
data[['IQ', 'Test 1']]
X = data['Test 1']
Y = data['IQ']
plt.scatter(X,Y)
plt.axis([0, 120, 0, 150])
plt.ylabel('IQ')
plt.xlabel('Test 1')
# regression linear OLS
X1 = sm.add_constant(X)
reg = sm.OLS(Y, X1).fit()
reg.summary()
### Alpha, Beta, R^2:
slope, intercept, r_value, p_value, std_err = stats.linregress(X,Y)
line = intercept + slope * X
plt.plot(X,line)
print(slope)
print(intercept)
print(r_value)
print(r_value**2)
print(p_value)
print(std_err)
####################
# ## Obtaining the Efficient Frontier - Part I
# *Suggested Answers follow (usually there are multiple ways to solve a problem in Python).*
# We are in the middle of a set of 3 Python lectures that will help you reproduce the Markowitz Efficient Frontier. Let’s split this exercise into 3 parts and cover the first part here.
# Begin by loading data for Walmart and Facebook from the 1st of January 2014 until today.
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
assets = ['PG', '^GSPC']
# assets = ['WMT', 'FB']
pf_data = pd.DataFrame()
# for a in assets:
# pf_data[a] = wb.DataReader(a, data_source = 'yahoo', start = '2010-1-1')['Adj Close']
pf_data = pd.read_csv('Section-14_Markowitz_Data.csv', index_col = 'Date')
# pf_data = pd.read_csv('Section-12_Walmart_FB_2014_2017.csv', index_col='Date')
pf_data.tail()
(pf_data / pf_data.iloc[0] * 100).plot(figsize=(10, 5))
log_returns = np.log(pf_data / pf_data.shift(1))
log_returns.mean() * 250
log_returns.cov() * 250
log_returns.corr()
# In[10]:
num_assets = len(assets)
weights = np.random.random(num_assets)
weights /= np.sum(weights)
weights
weights[0] + weights[1]
# Now, estimate the expected Portfolio Return, Variance, and Volatility.
# Expected Portfolio Return:
np.sum(weights * log_returns.mean()) * 250
# Expected Portfolio Variance:
np.dot(weights.T, np.dot(log_returns.cov() * 250, weights))
# Expected Portfolio Volatility:
np.sqrt(np.dot(weights.T,np.dot(log_returns.cov() * 250, weights)))
# ***
# The rest of this exercise will be a reproduction of what we did in the previous video.
# 1) Create two empty lists. Name them pf_returns and pf_volatilites.
pfolio_returns = []
pfolio_volatilities = []
# 2) Create a loop with 1,000 iterations that will generate random weights, summing to 1, and will append the obtained values for the portfolio returns and the portfolio volatilities to pf_returns and pf_volatilities, respectively.
for x in range (1000):
weights = np.random.random(num_assets)
weights /= np.sum(weights)
pfolio_returns.append(np.sum(weights * log_returns.mean()) * 250)
pfolio_volatilities.append(np.sqrt(np.dot(weights.T,np.dot(log_returns.cov() * 250, weights))))
pfolio_returns, pfolio_volatilities
# 3) Transform the obtained lists into NumPy arrays and reassign them to pf_returns and pf_volatilites. Once you have done that, the two objects will be NumPy arrays.
pfolio_returns = np.array(pfolio_returns)
pfolio_volatilities = np.array(pfolio_volatilities)
pfolio_returns, pfolio_volatilities
# In[21]:
portfolios = pd.DataFrame({'Return': pfolio_returns, 'Volatility': pfolio_volatilities})
portfolios.head()
portfolios.tail()
# In[24]:
portfolios.plot(x='Volatility', y='Return', kind='scatter', figsize=(10, 6));
plt.xlabel('Expected Volatility')
plt.ylabel('Expected Return')
######### Section-14_87-ObtainingtheEfficientFrontier-PartIII-Solution_CSV
import numpy as np
import pandas as pd
from pandas_datareader import data as wb
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
assets = ['WMT', 'FB']
pf_data = pd.read_csv('Section-14_Walmart_FB_2014_2017.csv', index_col='Date')
# pf_data = pd.DataFrame()
# for a in assets:
# pf_data[a] = wb.DataReader(a, data_source = 'yahoo', start = '2014-1-1')['Adj Close']
pf_data.tail()
(pf_data / pf_data.iloc[0] * 100).plot(figsize=(10, 5))
log_returns = np.log(pf_data / pf_data.shift(1))
log_returns.mean() * 250
log_returns.cov() * 250
log_returns.corr()
# In[10]:
num_assets = len(assets)
weights = np.random.random(num_assets)
weights /= np.sum(weights)
weights
weights[0] + weights[1]
# Now, estimate the expected Portfolio Return, Variance, and Volatility.
# Expected Portfolio Return:
np.sum(weights * log_returns.mean()) * 250
# Expected Portfolio Variance:
np.dot(weights.T, np.dot(log_returns.cov() * 250, weights))
# Expected Portfolio Volatility:
np.sqrt(np.dot(weights.T,np.dot(log_returns.cov() * 250, weights)))
# The rest of this exercise will be a reproduction of what we did in the previous video.
# 1) Create two empty lists. Name them pf_returns and pf_volatilites.
pf_returns = []
pf_volatilities = []
# 2) Create a loop with 1,000 iterations that will generate random weights, summing to 1, and will append the obtained values for the portfolio returns and the portfolio volatilities to pf_returns and pf_volatilities, respectively.
for x in range (1000):
weights = np.random.random(num_assets)
weights /= np.sum(weights)
pf_returns.append(np.sum(weights * log_returns.mean()) * 250)
pf_volatilities.append(np.sqrt(np.dot(weights.T,np.dot(log_returns.cov() * 250, weights))))
pf_returns, pf_volatilities
# 3) Transform the obtained lists into NumPy arrays and reassign them to pf_returns and pf_volatilites. Once you have done that, the two objects will be NumPy arrays.
# In[8]:
pf_returns = np.array(pf_returns)
pf_volatilities = np.array(pf_volatilities)
pf_returns, pf_volatilities
# Now, create a dictionary, called portfolios, whose keys are the strings “Return” and “Volatility” and whose values are the NumPy arrays pf_returns and pf_volatilities.
# In[9]:
portfolios = pd.DataFrame({'Return': pf_returns, 'Volatility': | |
<filename>Arkanoid.py
#!/usr/bin/python3.9
# Setup Python ----------------------------------------------- #
import sys
import os
import pygame
from random import randrange as rnd
import time
import threading
from threading import Timer
import pyperclip
import twitch_bot
import Audio_assistant as aa
import webbrowser
##f = open('config.py', 'w')
##f.write('HOST = "irc.twitch.tv"\n')
##f.write('PORT = 6667\n')
##f.write('NICK = "bot"\n')
##f.write('RATE = (20/30)\n')
##f.write('oplist = {"username":[""]}\n')
##f.close()
def resource_path(relative_path):
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
# icon
icon_win = resource_path('resource/vdk.bmp')
pygame.display.set_icon(pygame.image.load(icon_win))
WIDTH, HEIGHT = 1280, 720
# Text
pygame.font.init()
tf2build_font1 = resource_path('resource/tf2build.ttf')
tf2secondary_font1 = resource_path('resource/tf2secondary.ttf')
smallfon = pygame.font.Font(tf2build_font1, 18)
myfont = pygame.font.Font(tf2build_font1, 16)
font = pygame.font.Font(tf2build_font1, 30)
font2 = pygame.font.Font(tf2build_font1, 50)
# textsurface = myfont.render('Пишите в чат: !left или !right чтобы начать игру!', False, (255, 255, 0))
textsurface2 = myfont.render('Write to chat: !Left or !Right to start the game!', False, (255, 0, 255))
textpaddle = myfont.render(f'{twitch_bot.chater}: {twitch_bot.message}', False, (255, 255, 80))
# paddle settings
paddle_w = 500
paddle_h = 50
paddle_speed = 15
paddle = pygame.Rect(WIDTH // 2 - paddle_w // 2, HEIGHT - paddle_h, paddle_w, paddle_h)
# ball settings
ball_radius = 20
ball_speed = 1
ball_rect = int(ball_radius * 2 ** 0.5)
ball = pygame.Rect(rnd(ball_rect, WIDTH - ball_rect), HEIGHT // 2, ball_rect, ball_rect)
dx, dy = 1, -1
# blocks settings
block_list = [pygame.Rect(50 + 120 * i, 10 + 70 * j, 100, 50) for i in range(10) for j in range(2)]
color_list = [(rnd(30, 256), rnd(30, 200), rnd(30, 256)) for i in range(10) for j in range(2)]
pygame.init()
sc = pygame.display.set_mode((WIDTH, HEIGHT))
clock = pygame.time.Clock()
# background image
background = resource_path('resource/screen.png')
img = pygame.image.load(background).convert()
def detect_collision(dx, dy, ball, rect):
if dx > 0:
delta_x = ball.right - rect.left
else:
delta_x = rect.right - ball.left
if dy > 0:
delta_y = ball.bottom - rect.top
else:
delta_y = rect.bottom - ball.top
if abs(delta_x - delta_y) < 10:
dx, dy = -dx, -dy
elif delta_x > delta_y:
dy = -dy
elif delta_y > delta_x:
dx = -dx
return dx, dy
# Sound
pygame.mixer.init()
sms_sound = resource_path("sound/sms.wav")
sms = pygame.mixer.Sound(sms_sound)
def show_command(x, y, chater, message):
message = myfont.render(f'{chater}: {message}', True, (0, 255, 255))
sc.blit(message, (x, y))
def show_chat(x, y, chater, message):
global count_chat
message = smallfon.render(f'{chater}: {message}', True, (255, 255, 255))
sc.blit(message, (x, y))
# Ativate modules
thread1 = threading.Thread(target=twitch_bot.run, args=())
thread1.start()
thread2 = threading.Thread(target=aa.run, args=())
thread2.start()
# Setup pygame/window ---------------------------------------- #
mainClock = pygame.time.Clock()
from pygame.locals import *
pygame.init()
pygame.display.set_caption('Arkanoid for Twitch chat play')
screen = pygame.display.set_mode((1280, 720),0,32)
# Colors
white = (255, 255, 255)
green = (0, 255, 0)
blue = (0, 0, 255)
red = (255, 0, 0)
def draw_text(text, font, color, surface, x, y):
textobj = font.render(text, 1, color)
textrect = textobj.get_rect()
textrect.topleft = (x, y)
surface.blit(textobj, textrect)
click = False
text_hind = 'OAuth Password'
def main_menu():
while True:
global click
screen.fill((0, 255, 0))
draw_text('Menu', font, (0, 0, 255), screen, 80, 30)
mx, my = pygame.mouse.get_pos()
button_1 = pygame.Rect(50, 100, 200, 50)
button_2 = pygame.Rect(50, 200, 250, 50)
button_3 = pygame.Rect(50, 300, 300, 50)
button_4 = pygame.Rect(50, 500, 400, 50)
if button_1.collidepoint((mx, my)):
if click:
game()
if button_2.collidepoint((mx, my)):
if click:
chanel()
if button_3.collidepoint((mx, my)):
if click:
password()
if button_4.collidepoint((mx, my)):
if click:
print('Go to my chanel')
webbrowser.open('https://www.twitch.tv/vdk45')
pygame.draw.rect(screen, (255, 0, 0), button_1)
pygame.draw.rect(screen, (255, 0, 0), button_2)
pygame.draw.rect(screen, (255, 0, 0), button_3)
pygame.draw.rect(screen, (255, 0, 0), button_4)
click = False
for event in pygame.event.get():
if event.type == QUIT:
twitch_bot.loop_true = False # Stop twitch bot
twitch_bot.send_mess('ARKANOID has stopped')
aa.assis = False # Audio assis stop
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
twitch_bot.loop_true = False # Stop twitch bot
twitch_bot.send_mess('ARKANOID has stopped')
aa.assis = False # Audio assis stop
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN:
if event.button == 1:
click = True
draw_text('Game', font, (255, 255, 255), screen, 80, 115)
draw_text('Chanel', font, (255, 255, 255), screen, 80, 215)
draw_text('Password', font, (255, 255, 255), screen, 80, 315)
draw_text('twitch.tv/vdk45', font, (255, 255, 255), screen, 80, 515)
pygame.display.update()
mainClock.tick(60)
def game():
# paddle settings
paddle_w = 500
paddle_h = 50
paddle_speed = 15
paddle = pygame.Rect(WIDTH // 2 - paddle_w // 2, HEIGHT - paddle_h, paddle_w, paddle_h)
# ball settings
ball_radius = 20
ball_speed = 1
ball_rect = int(ball_radius * 2 ** 0.5)
ball = pygame.Rect(rnd(ball_rect, WIDTH - ball_rect), HEIGHT // 2, ball_rect, ball_rect)
dx, dy = 1, -1
# blocks settings
block_list = [pygame.Rect(50 + 120 * i, 10 + 70 * j, 100, 50) for i in range(10) for j in range(2)]
color_list = [(rnd(30, 256), rnd(30, 200), rnd(30, 256)) for i in range(10) for j in range(2)]
reset = False
running = True
while running:
screen.fill((0,255,0))
for event in pygame.event.get():
if event.type == QUIT:
twitch_bot.loop_true = False # Stop twitch bot
twitch_bot.send_mess('ARKANOID has stopped')
aa.assis = False # Audio assis stop
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
running = False
# drawing world
[pygame.draw.rect(sc, color_list[color], block) for color, block in enumerate(block_list)]
pygame.draw.rect(sc, pygame.Color('darkorange'), paddle)
pygame.draw.circle(sc, pygame.Color('white'), ball.center, ball_radius)
# ball movement
ball.x += ball_speed * dx
ball.y += ball_speed * dy
# collision left right
if ball.centerx < ball_radius or ball.centerx > WIDTH - ball_radius:
dx = -dx
# collision top
if ball.centery < ball_radius:
dy = -dy
# collision paddle
if ball.colliderect(paddle) and dy > 0:
dx, dy = detect_collision(dx, dy, ball, paddle)
# collision blocks
hit_index = ball.collidelist(block_list)
if hit_index != -1:
hit_rect = block_list.pop(hit_index)
hit_color = color_list.pop(hit_index)
dx, dy = detect_collision(dx, dy, ball, hit_rect)
# win, game over
if ball.bottom > HEIGHT:
print('GAME OVER!')
reset = True
ball.x = paddle.left + paddle_w // 2 - 25
ball.y = 636
block_list.clear()
block_list = [pygame.Rect(50 + 120 * i, 10 + 70 * j, 100, 50) for i in range(10) for j in range(2)]
color_list = [(rnd(30, 256), rnd(30, 200), rnd(30, 256)) for i in range(10) for j in range(2)]
dx, dy = 0, 0
#exit()
elif not len(block_list):
print('WIN!!!')
reset = True
ball.x = paddle.left + paddle_w // 2 - 25
ball.y = 636
block_list.clear()
block_list = [pygame.Rect(50 + 120 * i, 10 + 70 * j, 100, 50) for i in range(10) for j in range(2)]
color_list = [(rnd(30, 256), rnd(30, 200), rnd(30, 256)) for i in range(10) for j in range(2)]
dx, dy = 0, 0
#exit()
# control
key = pygame.key.get_pressed()
if key[pygame.K_LEFT] and paddle.left > 0:
paddle.left -= paddle_speed
if reset == True:
dx, dy = 1, -1
reset = False
if key[pygame.K_RIGHT] and paddle.right < WIDTH:
paddle.right += paddle_speed
if reset == True:
dx, dy = -1, -1
reset = False
if key[pygame.K_BACKSPACE]:
loop = False
twitch_bot.loop_true = False # Stop twitch bot
aa.assis = False # Audio assis stop
# Twitch control
if twitch_bot.command == '!left' and paddle.left > 0 or twitch_bot.command == '!l' and paddle.left > 0:
paddle.left -= paddle_speed
if reset == True:
dx, dy = 1, -1
reset = False
if twitch_bot.command == '!right' and paddle.right < WIDTH or twitch_bot.command == '!r' and paddle.right < WIDTH:
paddle.left += paddle_speed
if reset == True:
dx, dy = 1, -1
reset = False
try:
if twitch_bot.sound == True :
sms.play()
except IndexError:
continue
# SHOW COMMANDS at paddle
if reset == True:
#sc.blit(textsurface,(50,570))
sc.blit(textsurface2,(paddle.left + 20,700))
if twitch_bot.message[:1] == '!' and twitch_bot.chater != 'nightbot' and len(twitch_bot.command) <= 10:
show_command(paddle.left + 170, 680, twitch_bot.chater, twitch_bot.message)
show_chat(15, 390, twitch_bot.lst_chat[6], twitch_bot.lst_chat[7])
show_chat(15, 410, twitch_bot.lst_chat[4], twitch_bot.lst_chat[5])
show_chat(15, 430, twitch_bot.lst_chat[2], twitch_bot.lst_chat[3])
show_chat(15, 450, twitch_bot.lst_chat[0], twitch_bot.lst_chat[1])
# update screen
pygame.display.flip()
## pygame.display.update()
mainClock.tick(60)
keys = pygame.key.get_pressed()
keys_pres = pygame.key.get_pressed()
# pygame.time.delay(50)
def chanel():
running = True
font = pygame.font.Font(tf2build_font1, 30)
clock = pygame.time.Clock()
input_box = pygame.Rect(100, 100, 140, 32)
color_inactive = pygame.Color('lightskyblue3')
color_active = pygame.Color('dodgerblue2')
color = color_inactive
active = False
text = ''
while running:
screen.fill((0,255,0))
for event in pygame.event.get():
if event.type == QUIT:
twitch_bot.loop_true = False # Stop twitch bot
twitch_bot.send_mess('ARKANOID has stopped')
aa.assis = False # Audio assis stop
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
# If the user clicked on the input_box rect.
if input_box.collidepoint(event.pos):
# Toggle the active variable.
active = not active
else:
active = False
# Change the current color of the input | |
<gh_stars>1-10
#!/usr/bin/env python
# Stock Python modules.
import os
import os.path
import shutil
import sys
import tempfile
# cdf extension modules.
from .. import interface as cdf
from .. import internal
from .. import typing
###
# TODO: The complex interactions of the various strategies are not well
# documented and are in fact quite fragile. The system evolved as I
# tried to encode all the information about when various ISTP attributes
# are actually required and what they are required to be. A more studied
# approach to encoding this data might add clarity.
###
# This error indicates that user input is required to fill in some
# missing data.
class InferenceError(Exception):
pass
# This error indicates that user input is required to resolve
# ambiguous contradictory data.
class RedundancyError(Exception):
pass
# This error is for internal use, and indicates to the autofilling
# function that there is at this moment insufficient data to
# guess the right value. There may be sufficient data if we retry
# later.
class _MissingPrerequisite(Exception):
pass
# This error is for internal use, and indicates to the autofilling
# function that although this var is listed as being potentially
# required, we have determined that it is not.
class _NotRequired(Exception):
pass
# This error is for internal use, and is not an error at all. It
# indicates to the autofulling function that this var has been
# inferred correctly and that its requirement is met.
class _InferenceSuccessful(Exception):
pass
class fillStrategy:
def __call__(self, archive, attr, var = None):
return NotImplemented
class userInput(fillStrategy):
def __call__(self, archive, attr, var = None):
if var is not None:
if attr not in archive[var].attributes:
raise InferenceError
else:
if attr not in archive.attributes:
raise InferenceError
class defaultValue(fillStrategy):
def __init__(self, value):
self._value = value
def __call__(self, archive, attr, var = None):
if var is not None:
if attr not in archive[var].attributes:
archive[var].attributes[attr] = self._value
else:
if attr not in archive.attributes:
archive.attributes[attr] = self._value
class autoIncrement(fillStrategy):
def __init__(self, value, step = 1):
self._value = value
self._step = step
def __call__(self, archive, attr, var = None):
if var is not None:
if attr in archive[var].attributes:
archive[var].attributes[attr] += self._step
else:
archive[var].attributes[attr] = self._value
else:
if attr in archive.attributes:
archive.attributes[attr] += self._step
else:
archive.attributes[attr] = self._value
class selectFromList(fillStrategy):
def __init__(self, choices, default = None):
self._choices = choices
self._default = default
def __call__(self, archive, attr, var = None):
if var is not None:
if attr in archive[var].attributes:
archive[var].attributes[attr] += self._step
else:
archive[var].attributes[attr] = self._value
else:
if attr in archive.attributes:
archive.attributes[attr] += self._step
else:
archive.attributes[attr] = self._value
class archiveName(fillStrategy):
def __call__(self, archive, attr, var = None):
filename = archive._filenames[-1]
if var is not None:
if attr not in archive[var].attributes:
archive[var].attributes[attr] = filename
else:
if attr not in archive.attributes:
archive.attributes[attr] = filename
class varName(fillStrategy):
def __call__(self, archive, attr, var):
if attr not in archive[var].attributes:
archive[var].attributes[attr] = var
class primaryDataOnly(fillStrategy):
def __call__(self, archive, attr, var):
if attr not in archive[var].attributes:
var_type = archive[var].attributes.get('VAR_TYPE', None)
if var_type == 'data':
raise InferenceError
elif var_type is None:
raise _MissingPrerequisite
else:
raise _NotRequired
class fillValStrategy(fillStrategy):
fillvals = {
internal.CDF_CHAR: '.',
internal.CDF_BYTE: -128,
internal.CDF_UINT1: 255,
internal.CDF_UINT2: 65535,
internal.CDF_UINT4: 4294967295,
internal.CDF_INT1: -128,
internal.CDF_INT2: -32768,
internal.CDF_INT4: -2147483648,
internal.CDF_REAL4: -1.0*10**31,
internal.CDF_REAL8: -1.0*10**31,
internal.CDF_EPOCH: '31-Dec-9999 23:59:59.999',
internal.CDF_EPOCH16: '31-Dec-9999 23:59:59.999',
}
def __call__(self, archive, attr, var):
if attr not in archive[var].attributes:
archive[var].attributes[attr] \
= self.fillvals[typing._typeConversions[archive[var]._dtype.type]]
class formatStrategy(fillStrategy):
formats = {
internal.CDF_CHAR: '%s',
internal.CDF_BYTE: '%c',
internal.CDF_UINT1: '%u',
internal.CDF_UINT2: '%u',
internal.CDF_UINT4: '%lu',
internal.CDF_INT1: '%d',
internal.CDF_INT2: '%d',
internal.CDF_INT4: '%ld',
internal.CDF_REAL4: '%f',
internal.CDF_REAL8: '%Lf',
internal.CDF_EPOCH: '%s',
internal.CDF_EPOCH16: '%s',
}
def __call__(self, archive, attr, var):
if attr not in archive[var].attributes:
archive[var].attributes[attr] \
= self.formats[typing._typeConversions[archive[var]._dtype.type]]
raise _InferenceSuccessful
class validminStrategy(fillStrategy):
fillvals = {
internal.CDF_CHAR: '.',
internal.CDF_BYTE: -128,
internal.CDF_UINT1: 0,
internal.CDF_UINT2: 0,
internal.CDF_UINT4: 0,
internal.CDF_INT1: -128,
internal.CDF_INT2: -32768,
internal.CDF_INT4: -2147483648,
internal.CDF_REAL4: -1.0*10**31,
internal.CDF_REAL8: -1.0*10**31,
internal.CDF_EPOCH: '01-Jan-0000 00:00:00.000',
internal.CDF_EPOCH16: '01-Jan-0000 00:00:00.000',
}
def __call__(self, archive, attr, var):
if attr not in archive[var].attributes:
archive[var].attributes[attr] \
= self.fillvals[typing._typeConversions[archive[var]._dtype.type]]
raise _InferenceSuccessful
class validmaxStrategy(fillStrategy):
fillvals = {
internal.CDF_CHAR: '.',
internal.CDF_BYTE: 127,
internal.CDF_UINT1: 255,
internal.CDF_UINT2: 65535,
internal.CDF_UINT4: 4294967295,
internal.CDF_INT1: 127,
internal.CDF_INT2: 32767,
internal.CDF_INT4: 2147483647,
internal.CDF_REAL4: 1.0*10**31,
internal.CDF_REAL8: 1.0*10**31,
internal.CDF_EPOCH: '31-Dec-9999 23:59:59.999',
internal.CDF_EPOCH16: '31-Dec-9999 23:59:59.999',
}
def __call__(self, archive, attr, var):
if attr not in archive[var].attributes:
archive[var].attributes[attr] \
= self.fillvals[typing._typeConversions[archive[var]._dtype.type]]
raise _InferenceSuccessful
class varTypeStrategy(fillStrategy):
def __call__(self, archive, attr, var):
if attr not in archive[var].attributes:
archive[var].attributes[attr] = 'support_data'
class notRequired(fillStrategy):
def __call__(self, *args, **kwargs):
raise _NotRequired
class required(fillStrategy):
def __init__(self, attr = None):
self._attr = attr
def __call__(self, archive, attr, var = None):
if self._attr is not None:
attr = self._attr
if var is not None:
if attr not in archive[var].attributes:
raise _MissingPrerequisite
else:
if attr not in archive.attributes:
raise _MissingPrerequisite
raise _NotRequired
# The contents of this attr must refer to an existing var
# in the archive.
class refersToVariable(fillStrategy):
def __init__(self, attr = None):
self._attr = attr
def __call__(self, archive, attr, var):
if attr not in archive[var].attributes:
# The attr does not exist.
# If we have a default value, assign it.
if self._attr is not None:
# Remember, it must be a valid variable.
if self._attr in archive:
archive[var].attributes[attr] = self._attr
# Otherwise, this is an error.
else:
raise cdf.CoherenceError
# Otherwise, this is an error.
else:
raise InferenceError
elif archive[var].attributes[attr] not in archive:
# The attr exists but is not valid.
raise cdf.CoherenceError
return True
class timeSeriesStrategy(fillStrategy):
def __call__(self, archive, attr, var):
var_type = archive[var].attributes.get('VAR_TYPE', None)
if var_type is None:
raise _MissingPrerequisite
elif var_type == 'ignore_data' or var_type == 'support_data':
raise _NotRequired
else:
display_type = archive[var].attributes.get('DISPLAY_TYPE', None)
if display_type == 'time_series':
refersToVariable()(archive, attr, var)
else:
raise _NotRequired
class dimensionStrategy(fillStrategy):
def __init__(self, dim, strategy):
self._dim = dim
self._strategy = strategy
def __call__(self, archive, attr, var):
# Determine if this strategy applies.
if len(archive[var]._dimSizes) >= self._dim:
# Call secondary strategy.
return self._strategy(archive, attr, var)
else:
raise _NotRequired
class one_of(fillStrategy):
def __init__(self, *args):
self._strategies = args[:]
def __call__(self, archive, attr, var):
# Call strategies one by one until something succeeds.
# If nothing succeeds, return the least traumatic exception we saw.
exceptions = []
for strategy in self._strategies:
try:
return strategy(archive, attr, var)
except _MissingPrerequisite as e:
# Pretty mild, really.
exceptions.insert(0, e)
except InferenceError as e:
exceptions.append(e)
except RedundancyError as e:
exceptions.append(e)
except cdf.CoherenceError as e:
exceptions.append(e)
# Do not trap _NotRequired or _InferenceSucceeded, as
# these will be used at a higher level and are close
# enough to success that we need not try any other cases.
if len(exceptions) > 0:
raise exceptions[0]
else:
raise InferenceError
attributes = {
'global':{
'required':{
'Project': userInput(),
'Source_name': userInput(),
'Discipline': userInput(),
'Data_type': userInput(),
'Descriptor': userInput(),
'Data_version': autoIncrement(1),
'Logical_file_id': archiveName(),
'PI_name': userInput(),
'PI_affiliation': userInput(),
'TEXT': userInput(),
'Instrument_type': userInput(),
'Mission_group': userInput(),
'Logical_source': userInput(),
'Logical_source_description': userInput(),
},
'recommended':[
'Acknowledgement',
'ADID_ref',
'Generated_by',
'Generation_date',
'HTTP_LINK',
'LINK_TEXT',
'LINK_TITLE',
'MODS',
'Rules_of_use',
'Time_resolution',
],
'optional':[
'Parents',
'Skeleton_version',
'Software_version',
'TITLE',
'Validate',
],
},
'var':{
'required':{
'CATDESC': varName(),
'DEPEND_0': timeSeriesStrategy(),
'DEPEND_1': dimensionStrategy(1, refersToVariable()),
'DEPEND_2': dimensionStrategy(2, refersToVariable()),
'DEPEND_3': dimensionStrategy(3, refersToVariable()),
'DISPLAY_TYPE': notRequired(),
'FIELDNAM': varName(),
'FILLVAL': fillValStrategy(),
'FORMAT': one_of(
refersToVariable('FORM_PTR'),
formatStrategy()),
'FORM_PTR': one_of(
refersToVariable(),
required('FORMAT')),
'LABLAXIS': varName(),
'LABL_PTR_1': one_of(
required('LABLAXIS'),
refersToVariable()),
'LABL_PTR_2': notRequired(),
'LABL_PTR_3': notRequired(),
'UNITS': defaultValue(' '),
'UNIT_PTR': one_of(
required('UNITS'),
refersToVariable()),
'VALIDMIN': validminStrategy(),
'VALIDMAX': validmaxStrategy(),
'VAR_TYPE': varTypeStrategy(),
},
'recommended':[
'SCALETYP',
'SCAL_PTR',
'VAR_NOTES',
],
'optional':[
'AVG_TYPE',
'DELTA_PLUS_VAR',
'DELTA_MINUS_VAR',
'DICT_KEY',
'MONOTON',
'SCALEMIN',
'SCALEMAX',
'V_PARENT',
'DERIVN',
'sig_digits',
'SI_conv',
]
},
}
# Attempt to fill in attributes/variables of the target archive based
# on the contents of the skeleton file.
def autofill(arc, skt):
try:
if skt is not None:
sys.path.append(os.path.dirname(skt))
dir = tempfile.mkdtemp()
sktfile = os.path.join(dir, 'sktfile.py')
shutil.copy(skt, sktfile)
sys.path.append(dir)
import sktfile
skeleton = sktfile.skeleton
else:
skeleton = {'variables':{}, 'attributes':{'global':{}, 'variable':{}}}
# Fill in static variables
for var in skeleton['variables']:
if var not in arc:
arc[var] = skeleton['variables'][var]
# Fill in global attributes
required = attributes['global']['required'].keys()
retry = []
while len(required) > 0:
for attr in required:
try:
if attr not in arc.attributes:
if attr in skeleton['attributes']['global']:
arc.attributes[attr] = skeleton['attributes']['global'][attr]
else:
attributes['global']['required'][attr](arc, attr)
if attr not in arc.attributes:
raise InferenceError
except InferenceError:
raise InferenceError('Unable to infer value of '
+ 'global attr "' + str(attr) + '"')
except _MissingPrerequisite:
retry.append(attr)
except _NotRequired:
# Good enough.
pass
except _InferenceSuccessful:
# Perfect!
pass
if len(required) == len(retry):
# This pass has resolved nothing, abort.
raise InferenceError('Unable to infer value of | |
<filename>bayesalpha/dists.py<gh_stars>10-100
import theano.tensor as tt
import theano
import theano.tensor.extra_ops
import theano.sparse
import theano.scalar
import pymc3 as pm
import numpy as np
from scipy import sparse, interpolate
from pymc3.distributions.distribution import draw_values
from pymc3.distributions.dist_math import bound
class NormalNonZero(pm.Normal):
def logp(self, value):
all_logp = super(NormalNonZero, self).logp(value)
return tt.switch(tt.eq(value, 0), 0., all_logp)
class ScaledSdMvNormalNonZero(pm.MvNormal):
def __init__(self, *args, **kwargs):
self.scale_sd = kwargs.pop('scale_sd')
assert not args
self._mu = kwargs.pop('mu')
if isinstance(self._mu, tt.Variable):
kwargs['mu'] = tt.zeros_like(self._mu)
else:
kwargs['mu'] = np.zeros_like(self._mu)
super(ScaledSdMvNormalNonZero, self).__init__(**kwargs)
def logp(self, value):
scale_sd = self.scale_sd
mu = self._mu
# properly broadcast values to work in unified way
if scale_sd.ndim == 0:
scale_sd = tt.repeat(scale_sd, value.shape[-1])
if scale_sd.ndim == 1:
scale_sd = scale_sd[None, :]
detfix = -tt.log(scale_sd).sum(axis=-1)
z = (value - mu)/scale_sd
logp = super(ScaledSdMvNormalNonZero, self).logp(z) + detfix
logp = tt.switch(tt.eq(value, 0).any(-1), 0., logp)
return logp
def random(self, point=None, size=None):
r = super(ScaledSdMvNormalNonZero, self).random(point=point, size=size)
shape = r.shape
scale_sd, mu = draw_values([self.scale_sd, self._mu], point=point)
if scale_sd.ndim == 0:
scale_sd = np.repeat(scale_sd, r.shape[-1])
if scale_sd.ndim == 1:
scale_sd = scale_sd[None, :]
r *= scale_sd
r += mu
# reshape back just in case
return r.reshape(shape)
class GPExponential(pm.Continuous):
def __init__(self, mu, alpha, sigma, *args, **kwargs):
self._mu = tt.as_tensor_variable(mu)
self._alpha = tt.as_tensor_variable(alpha)
self._sigma = tt.as_tensor_variable(sigma)
self.mean = self.median = self.mode = mu
super(GPExponential, self).__init__(*args, **kwargs)
def logp(self, value):
mu, alpha, sigma = self._mu, self._alpha, self._sigma
value = value.reshape((-1, value.shape[-1]))
k, n = value.shape # TODO other shapes!
delta = (value - mu) / sigma[..., None]
corr = tt.exp(-alpha)
mdiag_tau = - corr / (1 - corr ** 2)
# diag_tau_middle = 1 - 2 * corr * mdiag_tau
diag_tau_first = 1 - corr * mdiag_tau
# Compute the cholesky decomposition of tau
diag_chol = tt.sqrt(diag_tau_first)
mdiag_chol = mdiag_tau / diag_chol
if sigma.ndim == 1:
logdet = 2 * k * n * np.log(diag_chol) / sigma
else:
logdet = 2 * n * (np.log(diag_chol) / sigma).sum()
delta_trans = diag_chol * delta
delta_trans = tt.set_subtensor(
delta_trans[:, 1:],
delta_trans[:, 1:] + mdiag_chol * delta[:, :-1]
)
return -0.5 * (logdet + (delta_trans ** 2).sum())
def bspline_basis(n, eval_points, degree=3):
n_knots = n + degree + 1
knots = np.linspace(0, 1, n_knots - 2 * degree)
knots = np.r_[[0] * degree, knots, [1] * degree]
basis_funcs = interpolate.BSpline(knots, np.eye(n), k=degree)
Bx = basis_funcs(eval_points)
return sparse.csr_matrix(Bx)
# The following is adapted from theano.sparse.basic, to fix Theano/Theano#6522
class Dot(theano.gof.op.Op):
# See doc in instance of this Op or function after this class definition.
__props__ = ()
def __str__(self):
return "Sparse" + self.__class__.__name__
def infer_shape(self, node, shapes):
xshp, yshp = shapes
x, y = node.inputs
if x.ndim == 2 and y.ndim == 2:
return [(xshp[0], yshp[1])]
if x.ndim == 1 and y.ndim == 2:
return [(yshp[1],)]
if x.ndim == 2 and y.ndim == 1:
return [(xshp[0],)]
if x.ndim == 1 and y.ndim == 1:
return [()]
raise NotImplementedError()
def make_node(self, x, y):
dtype_out = theano.scalar.upcast(x.dtype, y.dtype)
# Sparse dot product should have at least one sparse variable
# as input. If the other one is not sparse, it has to be converted
# into a tensor.
if isinstance(x, sparse.spmatrix):
x = theano.sparse.as_sparse_variable(x)
if isinstance(y, sparse.spmatrix):
y = theano.sparse.as_sparse_variable(y)
x_is_sparse_var = theano.sparse.basic._is_sparse_variable(x)
y_is_sparse_var = theano.sparse.basic._is_sparse_variable(y)
if not x_is_sparse_var and not y_is_sparse_var:
raise TypeError(
"Sparse dot product should have at least one "
"sparse variable as inputs, but the inputs are "
"%s (%s) and %s (%s)." % (x, x.type, y, y.type))
if x_is_sparse_var:
broadcast_x = (False,) * x.ndim
else:
x = tt.as_tensor_variable(x)
broadcast_x = x.type.broadcastable
assert y.format in ["csr", "csc"]
if x.ndim not in (1, 2):
raise TypeError(
'theano.sparse.Dot: input 0 (0-indexed) must have ndim of '
'1 or 2, %d given.' % x.ndim)
if y_is_sparse_var:
broadcast_y = (False,) * y.ndim
else:
y = tt.as_tensor_variable(y)
broadcast_y = y.type.broadcastable
assert x.format in ["csr", "csc"]
if y.ndim not in (1, 2):
raise TypeError(
'theano.sparse.Dot: input 1 (1-indexed) must have ndim of '
'1 or 2, %d given.' % y.ndim)
if len(broadcast_y) == 2:
broadcast_out = broadcast_x[:-1] + broadcast_y[1:]
elif len(broadcast_y) == 1:
broadcast_out = broadcast_x[:-1]
return theano.gof.Apply(
self, [x, y], [tt.tensor(dtype=dtype_out,
broadcastable=broadcast_out)])
def perform(self, node, inputs, out):
x, y = inputs
out = out[0]
x_is_sparse = theano.sparse.basic._is_sparse(x)
y_is_sparse = theano.sparse.basic._is_sparse(y)
if not x_is_sparse and not y_is_sparse:
raise TypeError(x)
rval = x * y
if x_is_sparse and y_is_sparse:
rval = rval.toarray()
out[0] = theano._asarray(rval, dtype=node.outputs[0].dtype)
def grad(self, inputs, gout):
(x, y) = inputs
(gz,) = gout
assert (theano.sparse.basic._is_sparse_variable(x)
or theano.sparse.basic._is_sparse_variable(y))
rval = []
if theano.sparse.basic._is_dense_variable(y):
rval.append(tt.dot(gz, y.T))
else:
rval.append(dot(gz, y.T))
if theano.sparse.basic._is_dense_variable(x):
rval.append(tt.dot(x.T, gz))
else:
rval.append(dot(x.T, gz))
return rval
_dot = Dot()
def dot(x, y):
"""
Operation for efficiently calculating the dot product when
one or all operands is sparse. Supported format are CSC and CSR.
The output of the operation is dense.
Parameters
----------
x
Sparse or dense matrix variable.
y
Sparse or dense matrix variable.
Returns
-------
The dot product `x`.`y` in a dense format.
Notes
-----
The grad implemented is regular, i.e. not structured.
At least one of `x` or `y` must be a sparse matrix.
When the operation has the form dot(csr_matrix, dense)
the gradient of this operation can be performed inplace
by UsmmCscDense. This leads to significant speed-ups.
"""
if hasattr(x, 'getnnz'):
x = theano.sparse.as_sparse_variable(x)
if hasattr(y, 'getnnz'):
y = theano.sparse.as_sparse_variable(y)
x_is_sparse_variable = theano.sparse.basic._is_sparse_variable(x)
y_is_sparse_variable = theano.sparse.basic._is_sparse_variable(y)
if not x_is_sparse_variable and not y_is_sparse_variable:
raise TypeError()
return _dot(x, y)
class BatchedMatrixInverse(tt.Op):
"""Computes the inverse of a matrix :math:`A`.
Given a square matrix :math:`A`, ``matrix_inverse`` returns a square
matrix :math:`A_{inv}` such that the dot product :math:`A \cdot A_{inv}`
and :math:`A_{inv} \cdot A` equals the identity matrix :math:`I`.
Notes
-----
When possible, the call to this op will be optimized to the call
of ``solve``.
"""
__props__ = ()
def __init__(self):
pass
def make_node(self, x):
x = tt.as_tensor_variable(x)
assert x.dim == 3
return tt.Apply(self, [x], [x.type()])
def perform(self, node, inputs, outputs):
(x,) = inputs
(z,) = outputs
z[0] = np.linalg.inv(x).astype(x.dtype)
def grad(self, inputs, g_outputs):
r"""The gradient function should return
.. math:: V\frac{\partial X^{-1}}{\partial X},
where :math:`V` corresponds to ``g_outputs`` and :math:`X` to
``inputs``. Using the `matrix cookbook
<http://www2.imm.dtu.dk/pubdb/views/publication_details.php?id=3274>`_,
one can deduce that the relation corresponds to
.. math:: (X^{-1} \cdot V^{T} \cdot X^{-1})^T.
"""
x, = inputs
xi = self.__call__(x)
gz, = g_outputs
# TT.dot(gz.T,xi)
gx = tt.batched_dot(xi, gz.transpose(0, 2, 1))
gx = tt.batched_dot(gx, xi)
gx = -gx.transpose(0, 2, 1)
return [gx]
def R_op(self, inputs, eval_points):
r"""The gradient function should return
.. math:: \frac{\partial X^{-1}}{\partial X}V,
where :math:`V` corresponds to ``g_outputs`` and :math:`X` to
``inputs``. Using the `matrix cookbook
<http://www2.imm.dtu.dk/pubdb/views/publication_details.php?id=3274>`_,
one can deduce that the relation corresponds to
.. math:: X^{-1} \cdot V \cdot X^{-1}.
"""
x, = inputs
xi = self.__call__(x)
ev, = eval_points
if ev is None:
return [None]
r = tt.batched_dot(xi, ev)
r = tt.batched_dot(r, xi)
r = -r
return [r]
def infer_shape(self, node, shapes):
return shapes
batched_matrix_inverse = BatchedMatrixInverse()
class EQCorrMvNormal(pm.Continuous):
def __init__(self, mu, std, corr, clust, nonzero=True, *args, **kwargs):
super(EQCorrMvNormal, self).__init__(*args, **kwargs)
self.mu, self.std, self.corr, self.clust = map(
tt.as_tensor_variable, [mu, std, corr, clust]
)
self.nonzero = nonzero
def logp(self, x):
# -1/2 (x-mu) @ Sigma^-1 @ (x-mu)^T - 1/2 log(2pi^k|Sigma|)
# Sigma = diag(std) @ Corr @ diag(std)
# Sigma^-1 = diag(std^-1) @ Corr^-1 @ diag(std^-1)
# Corr is a block matrix of special form
# +----------+
# Corr = [[ | 1, b1, b1|, 0, 0, 0,..., 0]
# [ |b1, 1, b1|, 0, 0, 0,..., 0]
# [ |b1, b1, 1|, 0, 0, 0,..., 0]
# +-----------+----------+
# [ 0, 0, 0, | 1, b2, b2|,..., 0]
# [ 0, 0, 0, |b2, 1, b2|,..., 0]
# [ 0, 0, 0, |b2, b2, 1|,..., 0]
# +----------+
# [ ... ]
# [ 0, 0, 0, 0, 0, 0 ,..., 1]]
#
# Corr = [[B1, 0, 0, ..., 0]
# [ 0, B2, 0, ..., 0]
# [ 0, 0, B3, ..., 0]
# [ ... ]
# [ 0, 0, 0, ..., Bk]]
#
# Corr^-1 = [[B1^-1, 0, 0, | |
<gh_stars>1000+
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# WikiGraphs is licensed under the terms of the Creative Commons
# Attribution-ShareAlike 4.0 International (CC BY-SA 4.0) license.
#
# WikiText-103 data (unchanged) is licensed by Salesforce.com, Inc. under the
# terms of the Creative Commons Attribution-ShareAlike 4.0 International
# (CC BY-SA 4.0) license. You can find details about CC BY-SA 4.0 at:
#
# https://creativecommons.org/licenses/by-sa/4.0/legalcode
#
# Freebase data is licensed by Google LLC under the terms of the Creative
# Commons CC BY 4.0 license. You may obtain a copy of the License at:
#
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# ==============================================================================
"""Transformer embedding modules."""
from typing import List, Optional
import haiku as hk
from haiku import initializers as init
import jax
import jax.numpy as jnp
import jraph
from wikigraphs.model import graph_net as gn
def get_pos_start(timesteps: int, batch_size: int) -> jnp.ndarray:
"""Find the right slice of positional embeddings for incremental sampling."""
pos_start = hk.get_state(
'cache_progress_idx', [batch_size], dtype=jnp.int32, init=jnp.zeros)
hk.set_state('cache_progress_idx', pos_start + timesteps)
return pos_start
class SinusoidalPositionEmbedding(hk.Module):
"""Position encoding, using mixture of sinusoidal signals."""
def __init__(self,
dim: int,
cache_steps: int = 0,
reverse_order: bool = False,
clamp_len: Optional[int] = None,
name: Optional[str] = None):
"""Initialize a SinusoidalPositionEmbedding.
Args:
dim: Embedding dimension.
cache_steps: The length of the memory.
reverse_order: If set to True, position index is reversed.
clamp_len: position beyond clamp_len will be reset to clamp_len, default
to not clamping.
name: Optional name for this Haiku module.
"""
super(SinusoidalPositionEmbedding, self).__init__(name=name)
self._dim = dim
self._cache_steps = cache_steps
self._reverse_order = reverse_order
self._clamp_len = clamp_len
self._inv_freq = 1.0 / (
10000 ** (jnp.arange(0, dim, 2).astype(jnp.float32) / dim))
def __call__(self, timesteps: int, batch_size: int) -> jnp.ndarray:
"""Computes the sinusoidal position embedding.
Args:
timesteps: The length of the sequence.
batch_size: The size of the batch.
Returns:
Sinusoidal position embedding.
"""
full_length = timesteps + self._cache_steps
if self._reverse_order:
positions = jnp.arange(full_length - 1, -1, -1)
positions = jnp.repeat(positions[None, :], batch_size, axis=0)
else:
if self._cache_steps > 0:
positions = (get_pos_start(timesteps, batch_size)[:, None]
+ jnp.arange(timesteps)[None, :])
else:
positions = jnp.arange(0, full_length)
positions = jnp.repeat(positions[None, :], batch_size, axis=0)
if self._clamp_len is not None:
positions = jnp.minimum(positions, self._clamp_len)
scaled_time = positions[:, :, None] * self._inv_freq[None, None, :]
return jnp.concatenate([jnp.sin(scaled_time), jnp.cos(scaled_time)], axis=2)
def relative_shift(x: jnp.ndarray) -> jnp.ndarray:
"""Shift the relative logits."""
x_shape = list(x.shape)
x = jnp.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0]])
x = jnp.reshape(
x, [x_shape[0], x_shape[1], x_shape[3] + 1, x_shape[2]])[:, :, 1:, :]
x = jnp.reshape(x, x_shape)
return x
class RelativePositionEmbedding(hk.Module):
"""Position encoding, using relative positions than absolute positions."""
def __init__(self,
dim: int,
dropout_rate: float,
r_w_bias: jnp.ndarray,
r_r_bias: jnp.ndarray,
init_scale: float = 0.02,
clamp_len: Optional[int] = None,
name: Optional[str] = None):
"""Initialize a RelativePositionEmbedding.
Args:
dim: Embedding dimension.
dropout_rate: dropout rate.
r_w_bias: global content bias.
r_r_bias: global positional bias.
init_scale: the initialization scale of the RandomNormal used for the
linear layer.
clamp_len: position beyond clamp_len will be reset to clamp_len, default
to not clamping.
name: Optional name for this Haiku module.
"""
super(RelativePositionEmbedding, self).__init__(name=name)
self._dim = dim
self._dropout_rate = dropout_rate
self._r_w_bias = r_w_bias
self._r_r_bias = r_r_bias
self._init_scale = init_scale
self._sinusoidal_pos_emb = SinusoidalPositionEmbedding(
dim=dim,
reverse_order=True,
clamp_len=clamp_len,
name=name)
def __call__(self, q: jnp.ndarray, k: jnp.ndarray) -> jnp.ndarray:
"""Computes the relative position embedding.
Args:
q: The query.
k: The key.
Returns:
Relative position embedding.
"""
# Use key instead of query to obtain the length.
batch_size, key_length, num_heads, head_dim = list(k.shape)
# Content based addressing and global content bias
content_score = jnp.einsum('bthd,bThd->bhtT', q + self._r_w_bias, k)
# Relative position encoding
positional_encodings = self._sinusoidal_pos_emb(key_length, batch_size)
positional_encodings = hk.dropout(hk.next_rng_key(), self._dropout_rate,
positional_encodings)
rel_pos_emb = hk.Conv1D(
output_channels=self._dim, kernel_shape=1, with_bias=False,
w_init=init.RandomNormal(stddev=self._init_scale))(positional_encodings)
rel_pos_emb = jnp.reshape(rel_pos_emb, [
batch_size, key_length, num_heads, head_dim])
# Content dependent positional bias and global positional bias
rel_pos_score = jnp.einsum('bthd,bThd->bhtT', q + self._r_r_bias,
rel_pos_emb)
rel_pos_score = relative_shift(rel_pos_score)
assert content_score.shape == rel_pos_score.shape
return content_score + rel_pos_score
def hierarchical_logprobs(
logits: jnp.ndarray,
class_logits: jnp.ndarray,
cutoffs: List[int]) -> jnp.ndarray:
"""Hierarchical log-probs for adaptive softmax."""
sizes = [y - x for x, y in zip(cutoffs[:-1], cutoffs[1:])]
num_tails = len(sizes) - 1
split_logits = jnp.split(logits, cutoffs[1:-1], axis=-1)
all_head_logits = jnp.concatenate([split_logits[0], class_logits], -1)
# Mask out item 0, the NULL token
all_head_logits += jnp.concatenate(
[jnp.ones([1], dtype=logits.dtype) * -10,
jnp.zeros([sizes[0] + num_tails - 1], dtype=logits.dtype)], 0)
all_head_logprobs = jax.nn.log_softmax(all_head_logits)
head_logprobs, class_logprobs = jnp.split(all_head_logprobs,
[sizes[0]], axis=-1)
tail_logprobs = []
for i, tail_size in enumerate(sizes[1:]): # pylint: disable=unused-variable
tail_logprobs += [jax.nn.log_softmax(split_logits[i + 1])
+ class_logprobs[..., [i]]]
return jnp.concatenate([head_logprobs] + tail_logprobs, -1)
class AdaptiveSoftmaxEmbedding(hk.Module):
"""Adaptive inputs and softmax (https://arxiv.org/abs/1809.10853)."""
def __init__(self,
dim: int,
vocab_size: int,
cutoffs: List[int],
tail_shrink_factor: int = 4,
hierarchical: bool = True,
init_std: float = 0.02,
init_proj_std: float = 0.01,
dtype: jnp.dtype = jnp.float32,
name: Optional[str] = None):
"""Initialize a AdaptiveSoftmaxEmbedding.
Args:
dim: dimensionality of the hidden space.
vocab_size: the size of the vocabulary.
cutoffs: the cutoff indices of the vocabulary used for the adaptive
softmax embedding.
tail_shrink_factor: how many times to shrink the hidden dimensionality
for low-frequency vocabulary after each cutoff.
hierarchical: whether to use hierarchical softmax.
init_std: standard deviation of the Normal distribution used to initialize
the embedding weights.
init_proj_std: standard deviation of the Normal distribution used to
initialize the projection weights.
dtype: Optional data type default to jnp.float32.
name: Optional name for this Haiku module.
"""
super(AdaptiveSoftmaxEmbedding, self).__init__(name=name)
self._hidden_size = dim
self._vocab_size = vocab_size
self._cutoffs = [0] + list(cutoffs) + [self._vocab_size]
self._tail_shrink_factor = tail_shrink_factor
self._hierarchical = hierarchical
self._dtype = dtype
self._embeddings = []
self._projections = []
self._bias = hk.get_parameter(
'bias', [self._vocab_size], dtype=self._dtype, init=jnp.zeros)
l_cutoffs = self._cutoffs[:-1]
r_cutoffs = self._cutoffs[1:]
for i, (l_cutoff, r_cutoff) in enumerate(zip(l_cutoffs, r_cutoffs)):
hidden_size = self._hidden_size // (self._tail_shrink_factor ** i)
embedding = hk.get_parameter(
f'embeddings_{l_cutoff}_{r_cutoff}',
[r_cutoff - l_cutoff, hidden_size],
dtype=self._dtype,
init=hk.initializers.RandomNormal(stddev=init_std))
self._embeddings += [embedding]
if self._tail_shrink_factor != 1:
projection = hk.get_parameter(
f'projection_{l_cutoff}_{r_cutoff}',
[hidden_size, self._hidden_size],
dtype=self._dtype,
init=hk.initializers.RandomNormal(stddev=init_proj_std))
self._projections += [projection]
if self._tail_shrink_factor != 1:
self._output_projection = hk.get_parameter(
'output_head_projection',
[self._hidden_size, self._hidden_size],
dtype=self._dtype,
init=hk.initializers.RandomNormal(stddev=init_proj_std))
if self._hierarchical:
self._class_weights = hk.get_parameter(
'tail_class_weights',
[self._hidden_size, len(cutoffs)],
init=hk.initializers.RandomNormal(stddev=init_std))
self._class_bias = hk.get_parameter(
'tail_class_bias',
[len(cutoffs)],
dtype=self._dtype,
init=jnp.zeros)
@hk.transparent
def build_embeddings(self):
"""Builds input embeddings."""
if self._projections:
embedding_mat = [
jnp.dot(emb, proj) for emb, proj in zip(self._embeddings,
self._projections)]
else:
embedding_mat = self._embeddings
input_embeddings = jnp.concatenate(embedding_mat, 0)
return input_embeddings
@hk.transparent
def build_output_embeddings(self):
"""Builds separate output embeddings."""
if self._projections:
projections = [self._output_projection] + self._projections[1:]
embedding_mat = [jnp.dot(emb, proj)
for emb, proj in zip(self._embeddings, projections)]
else:
embedding_mat = self._embeddings
output_embeddings = jnp.concatenate(embedding_mat, 0)
return jnp.transpose(output_embeddings)
def embed_input(self, input_tokens: jnp.ndarray) -> jnp.ndarray:
"""Embeds the input."""
assert jnp.issubdtype(input_tokens.dtype, jnp.integer)
input_embeddings = self.build_embeddings()
embedded_inputs = input_embeddings[input_tokens]
return embedded_inputs * self._hidden_size ** 0.5
def embed_output(self, inputs: jnp.ndarray) -> jnp.ndarray:
"""Outputs logits."""
output_embs = self.build_output_embeddings()
logits = jnp.einsum('btd,dv->btv', inputs, output_embs) + self._bias
if self._hierarchical:
class_logits = jnp.dot(inputs, self._class_weights) + self._class_bias
logprobs = hierarchical_logprobs(logits, class_logits, self._cutoffs)
return logprobs
else:
return logits
class GraphEmbeddingModel(hk.Module):
"""A single graph network for embedding graph data."""
def __init__(self,
embed_dim: int,
num_layers: int,
msg_hidden_size_factor: int = 2,
use_layer_norm: bool = False,
name: Optional[str] = None):
"""Constructor.
Args:
embed_dim: node embedding size.
num_layers: number of message passing layers to use.
msg_hidden_size_factor: size of the message network hiddens as a factor
of embed_dim.
use_layer_norm: whether to apply layer norm on node updates.
name: optional name for this module.
"""
super().__init__(name=name)
self._embed_dim = embed_dim
self._num_layers = num_layers
self._msg_hidden_size_factor = msg_hidden_size_factor
self._use_layer_norm = use_layer_norm
def __call__(self, graphs: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Compute embeddings for each node in the graphs.
Args:
graphs: a set of graphs batched into a single graph. The nodes and edges
are represented as feature tensors.
Returns:
graphs: new graph with node embeddings updated (shape [n_nodes,
embed_dim]).
"""
nodes = hk.Linear(self._embed_dim)(graphs.nodes)
edges = hk.Linear(self._embed_dim)(graphs.edges)
nodes = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(
jax.nn.gelu(nodes))
edges = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(
jax.nn.gelu(edges))
graphs = graphs._replace(nodes=nodes, edges=edges)
| |
city, New Mexico",2539),
("Los Alamos CDP, New Mexico",12373),
("Los Cerrillos CDP, New Mexico",260),
("Los Chaves CDP, New Mexico",5890),
("Los Luceros CDP, New Mexico",483),
("Los Lunas village, New Mexico",15428),
("Los Ojos CDP, New Mexico",52),
("Los Ranchos de Albuquerque village, New Mexico",6131),
("Loving village, New Mexico",1212),
("Lovington city, New Mexico",11575),
("Lower Frisco CDP, New Mexico",0),
("<NAME> CDP, New Mexico",191),
("Lumberton CDP, New Mexico",186),
("Luna CDP, New Mexico",43),
("Lyden CDP, New Mexico",138),
("McCartys Village CDP, New Mexico",72),
("McIntosh CDP, New Mexico",1089),
("Madrid CDP, New Mexico",201),
("Madrone CDP, New Mexico",682),
("Magdalena village, New Mexico",599),
("Malaga CDP, New Mexico",148),
("Manzano CDP, New Mexico",0),
("Manzano Springs CDP, New Mexico",229),
("Maxwell village, New Mexico",259),
("Mayhill CDP, New Mexico",59),
("Meadow Lake CDP, New Mexico",4253),
("Melrose village, New Mexico",595),
("Mescalero CDP, New Mexico",1361),
("Mesilla town, New Mexico",2138),
("Mesita CDP, New Mexico",857),
("Mesquite CDP, New Mexico",414),
("Middle Frisco CDP, New Mexico",21),
("Midway CDP, New Mexico",841),
("Milan village, New Mexico",3636),
("Mimbres CDP, New Mexico",536),
("Monterey Park CDP, New Mexico",1493),
("Monument CDP, New Mexico",96),
("Moquino CDP, New Mexico",43),
("Mora CDP, New Mexico",907),
("Moriarty city, New Mexico",2223),
("Morningside CDP, New Mexico",857),
("Mosquero village, New Mexico",71),
("Mountainair town, New Mexico",974),
("Mountain View CDP, New Mexico",135),
("Nadine CDP, New Mexico",328),
("Nageezi CDP, New Mexico",261),
("Nakaibito CDP, New Mexico",352),
("Nambe CDP, New Mexico",1814),
("Napi Headquarters CDP, New Mexico",744),
("Nara Visa CDP, New Mexico",47),
("Naschitti CDP, New Mexico",328),
("Navajo CDP, New Mexico",1498),
("Navajo Dam CDP, New Mexico",329),
("Nenahnezad CDP, New Mexico",671),
("Newcomb CDP, New Mexico",368),
("Newkirk CDP, New Mexico",0),
("Nogal CDP, New Mexico",133),
("North Acomita Village CDP, New Mexico",192),
("North Hobbs CDP, New Mexico",6068),
("North Hurley CDP, New Mexico",134),
("North Light Plant CDP, New Mexico",348),
("North San Ysidro CDP, New Mexico",0),
("North Valley CDP, New Mexico",12242),
("Oasis CDP, New Mexico",63),
("O<NAME> CDP, New Mexico",1356),
("O<NAME> CDP, New Mexico",643),
("Organ CDP, New Mexico",200),
("Orogrande CDP, New Mexico",91),
("Paguate CDP, New Mexico",420),
("Pajarito Mesa CDP, New Mexico",289),
("Paradise Hills CDP, New Mexico",5079),
("Paraje CDP, New Mexico",831),
("Pastura CDP, New Mexico",8),
("Peak Place CDP, New Mexico",412),
("Pecan Park CDP, New Mexico",0),
("Pecos village, New Mexico",1594),
("Peña Blanca CDP, New Mexico",605),
("Peñasco CDP, New Mexico",663),
("Peralta town, New Mexico",3577),
("Picuris Pueblo CDP, New Mexico",115),
("Pie Town CDP, New Mexico",93),
("Pinehill CDP, New Mexico",62),
("Pinon CDP, New Mexico",0),
("Pinos Altos CDP, New Mexico",365),
("Placitas CDP (Doña Ana County), New Mexico",591),
("Placitas CDP (Sandoval County), New Mexico",4441),
("Playas CDP, New Mexico",44),
("Pleasanton CDP, New Mexico",193),
("Pojoaque CDP, New Mexico",2018),
("Polvadera CDP, New Mexico",225),
("Ponderosa CDP, New Mexico",288),
("Ponderosa Pine CDP, New Mexico",1057),
("Portales city, New Mexico",11974),
("Pueblito CDP, New Mexico",167),
("Pueblitos CDP, New Mexico",370),
("Pueblo CDP, New Mexico",0),
("Pueblo of Sandia Village CDP, New Mexico",377),
("Pueblo Pintado CDP, New Mexico",404),
("Puerto de Luna CDP, New Mexico",57),
("Pulpotio Bareas CDP, New Mexico",283),
("Quemado CDP, New Mexico",69),
("Questa village, New Mexico",2019),
("Radium Springs CDP, New Mexico",1731),
("Ramah CDP, New Mexico",359),
("Rancho Grande CDP, New Mexico",52),
("Ranchos de Taos CDP, New Mexico",2460),
("Raton city, New Mexico",6145),
("Red River town, New Mexico",418),
("Regina CDP, New Mexico",57),
("Reserve village, New Mexico",669),
("Ribera CDP, New Mexico",307),
("Rincon CDP, New Mexico",413),
("Rio Communities city, New Mexico",4587),
("Rio en Medio CDP, New Mexico",205),
("Rio Lucio CDP, New Mexico",474),
("Rio Rancho city, New Mexico",94765),
("Rivers CDP, New Mexico",0),
("Rock Springs CDP, New Mexico",410),
("Rodeo CDP, New Mexico",68),
("Rodey CDP, New Mexico",359),
("Rosedale CDP, New Mexico",313),
("Roswell city, New Mexico",48186),
("Rowe CDP, New Mexico",150),
("Roy village, New Mexico",184),
("Ruidoso village, New Mexico",7757),
("Ruidoso Downs city, New Mexico",2597),
("Sacramento CDP, New Mexico",118),
("Salem CDP, New Mexico",650),
("San Acacia CDP, New Mexico",83),
("San Antonio CDP, New Mexico",11),
("San Antonito CDP (Bernalillo County), New Mexico",1179),
("San Antonito CDP (Socorro County), New Mexico",11),
("San Cristobal CDP, New Mexico",259),
("Sandia Heights CDP, New Mexico",3366),
("Sandia Knolls CDP, New Mexico",1566),
("Sandia Park CDP, New Mexico",141),
("San Felipe Pueblo CDP, New Mexico",2194),
("San Fidel CDP, New Mexico",439),
("San Ildefonso Pueblo CDP, New Mexico",742),
("San Jon village, New Mexico",235),
("San Jose CDP (Rio Arriba County), New Mexico",632),
("San Jose CDP (San Miguel County), New Mexico",217),
("San Lorenzo CDP, New Mexico",148),
("San Luis CDP, New Mexico",204),
("San Mateo CDP, New Mexico",341),
("San Miguel CDP, New Mexico",1054),
("Sanostee CDP, New Mexico",434),
("San Pablo CDP, New Mexico",1046),
("San Pedro CDP, New Mexico",93),
("San Rafael CDP, New Mexico",1251),
("Santa Ana Pueblo CDP, New Mexico",602),
("Santa Clara village, New Mexico",1783),
("Santa Clara Pueblo CDP, New Mexico",789),
("Santa Cruz CDP, New Mexico",261),
("Santa Fe city, New Mexico",83847),
("Santa Rosa city, New Mexico",3426),
("Santa Teresa CDP, New Mexico",5696),
("<NAME> Pueblo CDP, New Mexico",2379),
("San Ysidro CDP, New Mexico",2091),
("San Ysidro village, New Mexico",193),
("Sausal CDP, New Mexico",1944),
("Seama CDP, New Mexico",336),
("Seboyeta CDP, New Mexico",157),
("Sedillo CDP, New Mexico",911),
("Sena CDP, New Mexico",151),
("Sheep Springs CDP, New Mexico",268),
("Shiprock CDP, New Mexico",8966),
("Silver City town, New Mexico",9783),
("Skyline-Ganipa CDP, New Mexico",1208),
("Socorro city, New Mexico",8554),
("Soham CDP, New Mexico",80),
("Sombrillo CDP, New Mexico",232),
("South Acomita Village CDP, New Mexico",47),
("South Valley CDP, New Mexico",41011),
("Spencerville CDP, New Mexico",1746),
("Springer town, New Mexico",811),
("Sunland Park city, New Mexico",16602),
("Sunshine CDP, New Mexico",203),
("Tajique CDP, New Mexico",263),
("Talpa CDP, New Mexico",636),
("Taos town, New Mexico",6021),
("Taos Pueblo CDP, New Mexico",1518),
("Taos Ski Valley village, New Mexico",95),
("Tatum town, New Mexico",772),
("Tecolote CDP, New Mexico",205),
("Tecolotito CDP, New Mexico",233),
("Tesuque CDP, New Mexico",837),
("Tesuque Pueblo CDP, New Mexico",260),
("Texico city, New Mexico",1229),
("Thoreau CDP, New Mexico",1666),
("Tierra Amarilla CDP, New Mexico",653),
("Tijeras village, New Mexico",655),
("Timberon CDP, New Mexico",301),
("Tohatchi CDP, New Mexico",825),
("Tome CDP, New Mexico",2172),
("Torreon CDP (Sandoval County), New Mexico",215),
("Torreon CDP (Torrance County), New Mexico",231),
("Trout Valley CDP, New Mexico",0),
("Truchas CDP, New Mexico",229),
("Truth or Consequences city, New Mexico",5968),
("Tse Bonito CDP, New Mexico",176),
("Tucumcari city, New Mexico",4958),
("Tularosa village, New Mexico",2931),
("Twin Forks CDP, New Mexico",200),
("Twin Lakes CDP, New Mexico",1008),
("Tyrone CDP, New Mexico",551),
("University Park CDP, New Mexico",3121),
("Upper Fruitland CDP, New Mexico",1672),
("Ute Park CDP, New Mexico",219),
("Vadito CDP, New Mexico",295),
("Vado CDP, New Mexico",2579),
("Valencia CDP, New Mexico",1826),
("Vaughn town, New Mexico",151),
("Veguita CDP, New Mexico",80),
("Velarde CDP, New Mexico",119),
("Ventura CDP, New Mexico",403),
("Villanueva CDP, New Mexico",362),
("Virden village, New Mexico",152),
("Wagon Mound village, New Mexico",266),
("Waterflow CDP, New Mexico",1769),
("Watrous CDP, New Mexico",124),
("Weed CDP, New Mexico",0),
("West Hammond CDP, New Mexico",2787),
("White Rock CDP, New Mexico",5809),
("White Sands CDP, New Mexico",1137),
("Whites City CDP, New Mexico",173),
("White Signal CDP, New Mexico",272),
("Willard village, New Mexico",202),
("Williamsburg village, New Mexico",448),
("Windmill CDP, New Mexico",85),
("Winston CDP, New Mexico",64),
("Yah-ta-hey CDP, New Mexico",648),
("Young Place CDP, New Mexico",214),
("Youngsville CDP, New Mexico",42),
("Zia Pueblo CDP, New Mexico",934),
("Zuni Pueblo CDP, New Mexico",7590),
("Accord CDP, New York",521),
("Adams village, New York",1661),
("Adams Center CDP, New York",1451),
("Addison village, New York",1753),
("Afton village, New York",986),
("Airmont village, New York",8776),
("Akron village, New York",2857),
("Albany city, New York",97889),
("Albertson CDP, New York",5254),
("Albion village, New York",5439),
("Alden village, New York",2580),
("Alexander village, New York",589),
("Alexandria Bay village, New York",831),
("Alfred village, New York",4317),
("Allegany village, New York",1922),
("Almond village, New York",494),
("Altamont village, New York",1755),
("Altmar CDP, New York",442),
("Altona CDP, New York",759),
("Amagansett CDP, New York",750),
("Amenia CDP, New York",1107),
("Ames village, New York",132),
("Amityville village, New York",9452),
("Amsterdam city, New York",17927),
("Andes CDP, New York",243),
("Andover village, New York",818),
("Angelica village, New York",759),
("Angola village, New York",2199),
("Angola on the Lake CDP, New York",1819),
("Antwerp village, New York",543),
("Apalachin CDP, New York",1146),
("Aquebogue CDP, New York",1690),
("Arcade village, New York",2182),
("Ardsley village, New York",4534),
("Argyle village, New York",555),
("Arkport village, New York",853),
("Arlington CDP, New York",3556),
("Armonk CDP, New York",4381),
("Asharoken village, New York",443),
("Athens village, New York",1559),
("Atlantic Beach village, New York",1398),
("Attica village, New York",3190),
("Auburn city, New York",26779),
("Aurora village, New York",655),
("Au Sable Forks CDP, New York",426),
("Averill Park CDP, New York",1330),
("Avoca village, New York",960),
("Avon village, New York",3261),
("Babylon village, New York",12089),
("Bainbridge village, New York",1442),
("Baiting Hollow CDP, New York",1654),
("Baldwin CDP, New York",25134),
("Baldwin Harbor CDP, New York",7690),
("Baldwinsville village, New York",7550),
("Ballston Spa village, New York",5443),
("Balmville CDP, New York",3093),
("Bardonia CDP, New York",3839),
("Barker village, New York",588),
("Barneveld CDP, New York",319),
("Barnum Island CDP, New York",2247),
("Batavia city, New York",14719),
("Bath village, New York",5572),
("Baxter Estates village, New York",1065),
("Bay Park CDP, New York",1614),
("Bayport CDP, New York",8392),
("Bay Shore CDP, New York",30685),
("Bayville village, New York",6708),
("Baywood CDP, New York",7389),
("Beacon city, New York",14279),
("Beaver Dam Lake CDP, New York",1654),
("Bedford CDP, New York",2074),
("Bedford Hills CDP, New York",3522),
("Belfast CDP, New York",715),
("Bellerose village, New York",1178),
("Bellerose Terrace CDP, New York",2055),
("Belle Terre village, New York",681),
("Belleville CDP, New York",306),
("Bellmore CDP, New York",15730),
("Bellport village, New York",2008),
("Belmont village, New York",807),
("Bemus Point village, New York",301),
("Bergen village, New York",1118),
("Bethpage CDP, New York",17215),
("Big Flats CDP, New York",5212),
("Billington Heights CDP, New York",1384),
("Binghamton city, New York",45503),
("Binghamton University CDP, New York",6149),
("Black River village, New York",1210),
("Blasdell village, New York",2629),
("Blauvelt CDP, New York",5620),
("Bliss CDP, New York",521),
("Blodgett Mills CDP, New York",236),
("Bloomfield village, New York",1467),
("Bloomingburg village, New York",392),
("Bloomville CDP, New York",160),
("Blue Point CDP, New York",4882),
("Bohemia CDP, New York",9293),
("Bolivar village, New York",965),
("Bolton Landing CDP, New York",507),
("Boonville village, New York",2063),
("Brasher Falls CDP, New York",734),
("Breesport CDP, New York",614),
("Brentwood CDP, New York",62942),
("Brewerton CDP, New York",3931),
("Brewster village, New York",2087),
("Brewster Hill CDP, New York",1945),
("Briarcliff Manor village, New York",7697),
("Bridgehampton CDP, New York",1322),
("Bridgeport CDP, New York",1568),
("Bridgewater CDP, New York",529),
("Brighton CDP, New York",36447),
("Brightwaters village, New York",3069),
("Brinckerhoff CDP, New York",2430),
("Broadalbin village, New York",1466),
("Brockport village, New York",8350),
("Brocton village, New York",1319),
("Bronxville village, New York",6394),
("Brookhaven CDP, New York",3531),
("Brookville village, New York",3564),
("Brownville village, New York",1047),
("Brushton village, New York",456),
("Buchanan village, New York",2132),
("Buffalo city, New York",257518),
("Burdett village, New York",437),
("Burke village, New York",225),
("Busti CDP, New York",334),
("Byersville CDP, New York",29),
("Cairo CDP, New York",1015),
("Calcium CDP, New York",3841),
("Caledonia village, New York",2102),
("Callicoon CDP, New York",224),
("Calverton CDP, New York",7154),
("Cambridge village, New York",1806),
("Camden village, New | |
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p36039269-p36053267.7z"),
page_ids=range(36039269, 36053268),
darus_id=93466,
sha1="4a65b307703e63be8947a07ec6e2792325c31e7e",
size=68744982,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p36053268-p36104657.7z"),
page_ids=range(36053268, 36104658),
darus_id=93467,
sha1="bd96b574bd870c3fbcfdfc13f1ce85edfedf9ff7",
size=229963408,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p36104658-p36168970.7z"),
page_ids=range(36104658, 36168971),
darus_id=93469,
sha1="c7fdb7db23187217d48980fee1e07462ec073cc1",
size=254911964,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p36168971-p36235529.7z"),
page_ids=range(36168971, 36235530),
darus_id=93470,
sha1="acac054b0a1b1548002acb88854efdde01acebb8",
size=262267363,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p36235530-p36365957.7z"),
page_ids=range(36235530, 36365958),
darus_id=93471,
sha1="8fac47e75792271e0f2c5fc3516f29a3f2cfebc0",
size=370661327,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p36365958-p36429171.7z"),
page_ids=range(36365958, 36429172),
darus_id=93472,
sha1="6a63710746478f9677df00f2831419570fe48b12",
size=249921400,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p36429172-p36471219.7z"),
page_ids=range(36429172, 36471220),
darus_id=93474,
sha1="38fae8db0a86523dc44d4821b9b4851f23dabc5c",
size=214857867,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p36471220-p36513111.7z"),
page_ids=range(36471220, 36513112),
darus_id=93475,
sha1="20070218ea1fd34e9080ef632fd93eba6b72b73f",
size=214818550,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p36513112-p36553083.7z"),
page_ids=range(36513112, 36553084),
darus_id=93476,
sha1="94ea9533bc07fd2cab3889213fcd95253fd69933",
size=207729773,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p36553084-p36596433.7z"),
page_ids=range(36553084, 36596434),
darus_id=93477,
sha1="b907c3226537bca33847301de03270786cb27db7",
size=219621859,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p36596434-p36636496.7z"),
page_ids=range(36596434, 36636497),
darus_id=93478,
sha1="d160e8451222e087eebf764546448368663a9109",
size=200774080,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p36636497-p36713234.7z"),
page_ids=range(36636497, 36713235),
darus_id=93479,
sha1="9060bf07041c560855cd0bcde869425d21996368",
size=274065439,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p36713235-p36885529.7z"),
page_ids=range(36713235, 36885530),
darus_id=93480,
sha1="e35643f5d4f961b3cf7d95091c29c9565858d892",
size=431095668,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p36885530-p36958439.7z"),
page_ids=range(36885530, 36958440),
darus_id=93482,
sha1="913c05b5a6822b54862beb0020dca7b86c7fc18f",
size=271045472,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p36958440-p37005400.7z"),
page_ids=range(36958440, 37005401),
darus_id=93483,
sha1="f8e37ac830f7dcc6e7b417c7d0c757d621103ebe",
size=227793814,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37005401-p37060002.7z"),
page_ids=range(37005401, 37060003),
darus_id=93484,
sha1="d29a3a658438560f862adf9d2172b9d517f8b9c4",
size=245248497,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37060003-p37124890.7z"),
page_ids=range(37060003, 37124891),
darus_id=93485,
sha1="cd765b1fd2e1e0297fc90641f1b9523201d3d210",
size=252411217,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37124891-p37177070.7z"),
page_ids=range(37124891, 37177071),
darus_id=93486,
sha1="239300fcc415a634a40a7a5c754a26841550c35c",
size=237359574,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37177071-p37228850.7z"),
page_ids=range(37177071, 37228851),
darus_id=93487,
sha1="4c62fd1e7cbbd012e5833dcf9ce1c3997cc22d0f",
size=240757203,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37228851-p37270283.7z"),
page_ids=range(37228851, 37270284),
darus_id=93489,
sha1="2fb1382793a9c1fd72cc8ab68096740908a6e39a",
size=209339018,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37270284-p37315029.7z"),
page_ids=range(37270284, 37315030),
darus_id=93491,
sha1="64219f845cb1d85c3c6688c440a863e2683cbef9",
size=221961646,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37315030-p37366189.7z"),
page_ids=range(37315030, 37366190),
darus_id=93492,
sha1="57e50fea21914fb592fd0fd0538b44b502593fc3",
size=231269628,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37366190-p37414744.7z"),
page_ids=range(37366190, 37414745),
darus_id=93493,
sha1="4aea331913bfc61378d2fb41252210b0c243a2f5",
size=222141965,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37414745-p37464749.7z"),
page_ids=range(37414745, 37464750),
darus_id=93494,
sha1="b0a4b24f5a035a26bf4c406d801f28838a9541e6",
size=228792104,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37464750-p37506459.7z"),
page_ids=range(37464750, 37506460),
darus_id=93495,
sha1="1dd3adccb357aa15a18c12bfeb82dea1013499ae",
size=206156266,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37506460-p37556821.7z"),
page_ids=range(37506460, 37556822),
darus_id=93496,
sha1="8d31bdebe1169372772c4a651f62965e8137b912",
size=226891632,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37556822-p37602983.7z"),
page_ids=range(37556822, 37602984),
darus_id=93497,
sha1="e147e48bf2d3fc2e2bdb0fc2ce993fae45436dba",
size=220386430,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37602984-p37647173.7z"),
page_ids=range(37602984, 37647174),
darus_id=93499,
sha1="82b2438cc22ab6c41f3fab8d65a98a9ee9d08cd9",
size=215899495,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37647174-p37687727.7z"),
page_ids=range(37647174, 37687728),
darus_id=93500,
sha1="0b0c1c0aeb23cd0106d84ce2a5b4753ec0e90d0a",
size=207724150,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37687728-p37728222.7z"),
page_ids=range(37687728, 37728223),
darus_id=93501,
sha1="135b4e62a2dcee88d88431495344b3c3449f474b",
size=205012890,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37728223-p37769747.7z"),
page_ids=range(37728223, 37769748),
darus_id=93502,
sha1="d95e0010e05631e28fd79428fed9efc21b54fc94",
size=210337547,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37769748-p37814464.7z"),
page_ids=range(37769748, 37814465),
darus_id=93503,
sha1="ea7163b2543d5c60f421c3833c62b98a9940a04e",
size=216908679,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37814465-p37864993.7z"),
page_ids=range(37814465, 37864994),
darus_id=93504,
sha1="a4ba3e945dd2c0b13ea9302a1f15ca1d2b0ef1cc",
size=235764604,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37864994-p37918932.7z"),
page_ids=range(37864994, 37918933),
darus_id=93505,
sha1="676202d232d4926027d4e2d3a83c4e5a9cc4306c",
size=238630662,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37918933-p37975644.7z"),
page_ids=range(37918933, 37975645),
darus_id=93506,
sha1="fbefee22029558f425e19bb2c2b7743725734c3e",
size=240127859,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p37975645-p38031796.7z"),
page_ids=range(37975645, 38031797),
darus_id=93507,
sha1="a07af20e739513229edd6d58b4f37d96e8883141",
size=243722143,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38031797-p38090367.7z"),
page_ids=range(38031797, 38090368),
darus_id=93508,
sha1="20ed5073aa05e8c18638067b0ad71ccf8c270900",
size=251980501,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38090368-p38142966.7z"),
page_ids=range(38090368, 38142967),
darus_id=93510,
sha1="d8fb86b4d269dd75683cf81b68ff174ed1d29277",
size=234912444,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38142967-p38197646.7z"),
page_ids=range(38142967, 38197647),
darus_id=93511,
sha1="01eee6d4838cd70f7f82451cbdb8a23a138a3b90",
size=243211229,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38197647-p38252279.7z"),
page_ids=range(38197647, 38252280),
darus_id=93512,
sha1="a41ac7648e426e9026dca3d2d4e5a2d38fa07f59",
size=248213653,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38252280-p38305713.7z"),
page_ids=range(38252280, 38305714),
darus_id=93513,
sha1="b30a078c93e5fcf73614d1f0b5de7efff1820db7",
size=253860141,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38305714-p38354751.7z"),
page_ids=range(38305714, 38354752),
darus_id=93514,
sha1="9d2d610588cea501b7b09a3d9e1239c366e24f07",
size=239015115,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38354752-p38402815.7z"),
page_ids=range(38354752, 38402816),
darus_id=93515,
sha1="136320b96d60190a5f952ac4ed6b5315ee898462",
size=225508472,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38402816-p38450655.7z"),
page_ids=range(38402816, 38450656),
darus_id=93516,
sha1="8b76b9005fd86486e6c9b6c189bc4539eb4396a8",
size=217819227,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38450656-p38495318.7z"),
page_ids=range(38450656, 38495319),
darus_id=93517,
sha1="8d6d042d617707c39179a18f058398f2f0fc636c",
size=206479953,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38495319-p38535378.7z"),
page_ids=range(38495319, 38535379),
darus_id=93518,
sha1="c66a8fb5e24bd77544748629085451a9aa07457d",
size=186127456,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38535379-p38577979.7z"),
page_ids=range(38535379, 38577980),
darus_id=93520,
sha1="5359bccec604b53205e77c148d80fab06857f76f",
size=197790333,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38577980-p38618663.7z"),
page_ids=range(38577980, 38618664),
darus_id=93521,
sha1="66965311e0ed6bd8213c288e9c52bc9e09bdeee1",
size=191889188,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38618664-p38660578.7z"),
page_ids=range(38618664, 38660579),
darus_id=93522,
sha1="0c9d1f78c3294dc2f8e5d481d61083a6a0aed4f1",
size=196440112,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38660579-p38701800.7z"),
page_ids=range(38660579, 38701801),
darus_id=93523,
sha1="5eaf1c97738eaf1ef3c2c378b8f8c4a3dbc29b3b",
size=192876180,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38701801-p38739960.7z"),
page_ids=range(38701801, 38739961),
darus_id=93524,
sha1="e750be50af02e49ab53050721fe544bcaa629757",
size=179809428,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38739961-p38779372.7z"),
page_ids=range(38739961, 38779373),
darus_id=93525,
sha1="91b202f8feb6350c96e2bc9057bfb976b48342e9",
size=181453005,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38779373-p38820897.7z"),
page_ids=range(38779373, 38820898),
darus_id=93526,
sha1="ee68db0fc23a54072d39f60975074e76fc66c956",
size=198209698,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38820898-p38871723.7z"),
page_ids=range(38820898, 38871724),
darus_id=93527,
sha1="87fc885f8076425c029940ceccf5f823e016c635",
size=246572622,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38871724-p38924956.7z"),
page_ids=range(38871724, 38924957),
darus_id=93528,
sha1="e2d8ff281b0b1d570f3c000163cc844401a762f8",
size=259410370,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38924957-p38970438.7z"),
page_ids=range(38924957, 38970439),
darus_id=93529,
sha1="56674018098407fd575c11e32c6afb1c95bc22ee",
size=213112979,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p38970439-p39006737.7z"),
page_ids=range(38970439, 39006738),
darus_id=93531,
sha1="93ab1cc65385266142743fad1d2e7a6ff699a7cf",
size=166809190,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p39006738-p39051754.7z"),
page_ids=range(39006738, 39051755),
darus_id=93532,
sha1="22965f37eee3d89cccb14815ce361ef5ce6c65a0",
size=186847319,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p39051755-p39086005.7z"),
page_ids=range(39051755, 39086006),
darus_id=93533,
sha1="46a2f6327fd0dd711d070a6b95f3aa335726bc5f",
size=157868750,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p39086006-p39139180.7z"),
page_ids=range(39086006, 39139181),
darus_id=93534,
sha1="9d62c397eccb8fd5e482795884655e443618fec4",
size=202655029,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p39139181-p39227918.7z"),
page_ids=range(39139181, 39227919),
darus_id=93535,
sha1="e4eda2a539a3a23e9c5ad13e463da31bf7e291e3",
size=296950501,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p39227919-p39333654.7z"),
page_ids=range(39227919, 39333655),
darus_id=93536,
sha1="ab71f5fe0675ee12f2d1100ddc5d855f9a267553",
size=344122391,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p39333655-p39405692.7z"),
page_ids=range(39333655, 39405693),
darus_id=93538,
sha1="48fb7b09e53f5842f7b3c33a0ce6a684f75cac49",
size=263172159,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p39405693-p39472185.7z"),
page_ids=range(39405693, 39472186),
darus_id=93539,
sha1="98a35d6b1c34d9f38cd2b3cba82ab7a0b5b101da",
size=251357931,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p39472186-p39534989.7z"),
page_ids=range(39472186, 39534990),
darus_id=93540,
sha1="f0667b8c9e34b6499cf67b8672cb1bc0690656e6",
size=238166500,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p39534990-p39592016.7z"),
page_ids=range(39534990, 39592017),
darus_id=93542,
sha1="de0021b29926c71abf7bd2166ceecbd33f668bbc",
size=227602494,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p39592017-p39654904.7z"),
page_ids=range(39592017, 39654905),
darus_id=93543,
sha1="a69d4baa52b3aef14355816c682223e4ed94f58c",
size=251828400,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p39654905-p39727621.7z"),
page_ids=range(39654905, 39727622),
darus_id=93544,
sha1="075552c9ee7fedd9534f4e691bab6373556b7418",
size=298391468,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p39727622-p39799897.7z"),
page_ids=range(39727622, 39799898),
darus_id=93547,
sha1="6495d39e2765f14cf9c82ee59447721a26077d1d",
size=292351382,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p39799898-p39900705.7z"),
page_ids=range(39799898, 39900706),
darus_id=93548,
sha1="8cc60e65c9958138f3cf33bbc7d83168437dfbde",
size=366652748,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p39900706-p39984567.7z"),
page_ids=range(39900706, 39984568),
darus_id=93549,
sha1="da7b40cc528ae6026d921b962198da9003b9537c",
size=322770624,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p39984568-p40036256.7z"),
page_ids=range(39984568, 40036257),
darus_id=93550,
sha1="fa03f56a3771567cb54d6d017b2df258074e5bc8",
size=229946469,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p40036257-p40100898.7z"),
page_ids=range(40036257, 40100899),
darus_id=93551,
sha1="8eac08d740880cd6a9f9ab54f41b898929e9ed9e",
size=279564412,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p40100899-p40175109.7z"),
page_ids=range(40100899, 40175110),
darus_id=93553,
sha1="5a89eaac20fa2020c18c83d442124c4bea5923a2",
size=307466061,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p40175110-p40222795.7z"),
page_ids=range(40175110, 40222796),
darus_id=93554,
sha1="720c987869b5a773b9d819c31cf1b1436b8cbc6b",
size=205201349,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p40222796-p40299538.7z"),
page_ids=range(40222796, 40299539),
darus_id=93555,
sha1="19ad6878218e4117dd17f572c5369c118977c8bc",
size=323389064,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p40299539-p40374659.7z"),
page_ids=range(40299539, 40374660),
darus_id=93556,
sha1="50486b37a950d3a7ec44451c27600ab1f2566cf6",
size=317793331,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p40374660-p40470100.7z"),
page_ids=range(40374660, 40470101),
darus_id=93557,
sha1="27975d07a7dba0751c4241ab2708057982610605",
size=347679151,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p40470101-p40561140.7z"),
page_ids=range(40470101, 40561141),
darus_id=93558,
sha1="2c3a3b95db9581e0153de3ca0d418254691bc31b",
size=352824355,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p40561141-p40653502.7z"),
page_ids=range(40561141, 40653503),
darus_id=93560,
sha1="ea7bdc1d60fb5703bf60b2e04f773e12a61b6f26",
size=362625642,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p40653503-p40746073.7z"),
page_ids=range(40653503, 40746074),
darus_id=93562,
sha1="5e96eedb8f0033ba17c13e367e08b0f701d1c94d",
size=369706642,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p40746074-p40841894.7z"),
page_ids=range(40746074, 40841895),
darus_id=93563,
sha1="b97e7451e14d02a024fbd9a090f01ce55fb0da2f",
size=375785023,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p40841895-p40953053.7z"),
page_ids=range(40841895, 40953054),
darus_id=93564,
sha1="27131e046944831d5758fd85f748b0f304d9882c",
size=416473303,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p40953054-p41065960.7z"),
page_ids=range(40953054, 41065961),
darus_id=93566,
sha1="ecc991c1f30e2fc8d09bab3d5c4a0dcb15add989",
size=420803100,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p41065961-p41140055.7z"),
page_ids=range(41065961, 41140056),
darus_id=93567,
sha1="e974576cd0fa6b1d2086a1c851614a0c40bf08bc",
size=286244386,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p41140056-p41253534.7z"),
page_ids=range(41140056, 41253535),
darus_id=93568,
sha1="f16d8f49455b9a1bfdcc527af9098228fdb386bc",
size=435901989,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p41253535-p41371464.7z"),
page_ids=range(41253535, 41371465),
darus_id=93569,
sha1="bbc38a95d0bd5e2d3774efbdcc8cd9896dc4c7d1",
size=447973328,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p41371465-p41472564.7z"),
page_ids=range(41371465, 41472565),
darus_id=93572,
sha1="84d7f80048f1417b908f4c67937b3ca038b95ebf",
size=384428220,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p41472565-p41601112.7z"),
page_ids=range(41472565, 41601113),
darus_id=93573,
sha1="5843b7eed409384cd200abf653c8fb2bb97e7863",
size=460377024,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p41601113-p41739539.7z"),
page_ids=range(41601113, 41739540),
darus_id=93574,
sha1="0c19a7c8441b0e4e064bac5d1318941e29109a91",
size=481023198,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p41739540-p41868272.7z"),
page_ids=range(41739540, 41868273),
darus_id=93576,
sha1="576815aca2b861f050262745f1afbe2c7d3b7dea",
size=469359485,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p41868273-p42020811.7z"),
page_ids=range(41868273, 42020812),
darus_id=93577,
sha1="e9dc5696bbb5e1c8110966de8d7e7525f08e60c1",
size=523025932,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p42020812-p42171852.7z"),
page_ids=range(42020812, 42171853),
darus_id=93578,
sha1="2751b1ad590d9c94e511cee2c2f2896e71f73556",
size=531851985,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p42171853-p42259443.7z"),
page_ids=range(42171853, 42259444),
darus_id=93580,
sha1="f356f83415b68419045c641e2f8a7427702584b2",
size=314183180,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p42259444-p42412936.7z"),
page_ids=range(42259444, 42412937),
darus_id=93581,
sha1="3c25cdeb96cd443f5674850d59dbb1567d2020c0",
size=521683130,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p42412937-p42570729.7z"),
page_ids=range(42412937, 42570730),
darus_id=93582,
sha1="e5ada297e0ff56f60bd7e57dd21e6c327752ea24",
size=515020680,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p42570730-p42729701.7z"),
page_ids=range(42570730, 42729702),
darus_id=93584,
sha1="6772a703bf23f5c6fe06e600142d19cbc144142d",
size=480995048,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p42729702-p42881274.7z"),
page_ids=range(42729702, 42881275),
darus_id=93585,
sha1="e691fa062b2bf0c8d9c4ab1f090d0f847e3a644c",
size=505121486,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix | |
<filename>Week 3 PA 1 Your first CNN on CIFAR-10/week3_task1_first_cnn_cifar10_clean.py
# coding: utf-8
# # Your first CNN on CIFAR-10
#
# In this task you will:
# * define your first CNN architecture for CIFAR-10 dataset
# * train it from scratch
# * visualize learnt filters
#
# CIFAR-10 dataset contains 32x32 color images from 10 classes: __airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck__:
# <img src="images/cifar10.jpg" style="width:80%">
# # Import stuff
# In[1]:
import sys
sys.path.append("..")
import grading
import download_utils
# In[2]:
download_utils.link_all_keras_resources()
# In[3]:
import tensorflow as tf
import keras
from keras import backend as K
import numpy as np
get_ipython().magic('matplotlib inline')
import matplotlib.pyplot as plt
print(tf.__version__)
print(keras.__version__)
import grading_utils
import keras_utils
# # Fill in your Coursera token and email
# To successfully submit your answers to our grader, please fill in your Coursera submission token and email
# In[4]:
grader = grading.Grader(assignment_key="s1B1I5DuEeeyLAqI7dCYkg",
all_parts=["7W4tu", "nQOsg", "96eco"])
# In[29]:
# token expires every 30 min
COURSERA_TOKEN = "cnM8yfby4NgOgesV"
COURSERA_EMAIL = "<EMAIL>"
# # Load dataset
# In[6]:
from keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# In[7]:
print("Train samples:", x_train.shape, y_train.shape)
print("Test samples:", x_test.shape, y_test.shape)
# In[8]:
NUM_CLASSES = 10
cifar10_classes = ["airplane", "automobile", "bird", "cat", "deer",
"dog", "frog", "horse", "ship", "truck"]
# In[9]:
# show random images from train
cols = 8
rows = 2
fig = plt.figure(figsize=(2 * cols - 1, 2.5 * rows - 1))
for i in range(cols):
for j in range(rows):
random_index = np.random.randint(0, len(y_train))
ax = fig.add_subplot(rows, cols, i * rows + j + 1)
ax.grid('off')
ax.axis('off')
ax.imshow(x_train[random_index, :])
ax.set_title(cifar10_classes[y_train[random_index, 0]])
plt.show()
# # Prepare data
# We need to normalize inputs like this: $$x_{norm} = \frac{x}{255} - 0.5$$
#
# We need to convert class labels to one-hot encoded vectors. Use __keras.utils.to_categorical__.
# In[10]:
# normalize inputs
x_train2 = x_train / 255. - 0.5
x_test2 = x_test / 255. - 0.5
# convert class labels to one-hot encoded, should have shape (?, NUM_CLASSES)
y_train2 = keras.utils.to_categorical(y_train, num_classes=NUM_CLASSES)
y_test2 = keras.utils.to_categorical(y_test, num_classes=NUM_CLASSES)
# # Define CNN architecture
# In[11]:
# import necessary building blocks
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Activation, Dropout
from keras.layers.advanced_activations import LeakyReLU
# Convolutional networks are built from several types of layers:
# - [Conv2D](https://keras.io/layers/convolutional/#conv2d) - performs convolution:
# - **filters**: number of output channels;
# - **kernel_size**: an integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window;
# - **padding**: padding="same" adds zero padding to the input, so that the output has the same width and height, padding='valid' performs convolution only in locations where kernel and the input fully overlap;
# - **activation**: "relu", "tanh", etc.
# - **input_shape**: shape of input.
# - [MaxPooling2D](https://keras.io/layers/pooling/#maxpooling2d) - performs 2D max pooling.
# - [Flatten](https://keras.io/layers/core/#flatten) - flattens the input, does not affect the batch size.
# - [Dense](https://keras.io/layers/core/#dense) - fully-connected layer.
# - [Activation](https://keras.io/layers/core/#activation) - applies an activation function.
# - [LeakyReLU](https://keras.io/layers/advanced-activations/#leakyrelu) - applies leaky relu activation.
# - [Dropout](https://keras.io/layers/core/#dropout) - applies dropout.
# You need to define a model which takes __(None, 32, 32, 3)__ input and predicts __(None, 10)__ output with probabilities for all classes. __None__ in shapes stands for batch dimension.
#
# Simple feed-forward networks in Keras can be defined in the following way:
#
# ```python
# model = Sequential() # start feed-forward model definition
# model.add(Conv2D(..., input_shape=(32, 32, 3))) # first layer needs to define "input_shape"
#
# ... # here comes a bunch of convolutional, pooling and dropout layers
#
# model.add(Dense(NUM_CLASSES)) # the last layer with neuron for each class
# model.add(Activation("softmax")) # output probabilities
# ```
#
# Stack __4__ convolutional layers with kernel size __(3, 3)__ with growing number of filters __(16, 32, 32, 64)__, use "same" padding.
#
# Add __2x2__ pooling layer after every 2 convolutional layers (conv-conv-pool scheme).
#
# Use __LeakyReLU__ activation with recommended parameter __0.1__ for all layers that need it (after convolutional and dense layers):
# ```python
# model.add(LeakyReLU(0.1))
# ```
#
# Add a dense layer with __256__ neurons and a second dense layer with __10__ neurons for classes. Remember to use __Flatten__ layer before first dense layer to reshape input volume into a flat vector!
#
# Add __Dropout__ after every pooling layer (__0.25__) and between dense layers (__0.5__).
# In[12]:
def make_model():
"""
Define your model architecture here.
Returns `Sequential` model.
"""
model = Sequential()
### YOUR CODE HERE
# CONV 1
# first layer needs to define "input_shape"
model.add(Conv2D(16, (3, 3), strides = (1, 1), padding="same", name = 'conv1', input_shape=(32, 32, 3)))
model.add(LeakyReLU(0.1))
# CONV 2
model.add(Conv2D(32, (3, 3), strides = (1, 1), padding="same", name = 'conv2'))
model.add(LeakyReLU(0.1))
# MaxPooling2D 1
model.add(MaxPooling2D((2, 2), name='max_pool_1'))
# Dropout
model.add(Dropout(0.25, noise_shape=None, seed=0))
# CONV 3
model.add(Conv2D(32, (3, 3), strides = (1, 1), padding="same", name = 'conv3'))
model.add(LeakyReLU(0.1))
# CONV 4
model.add(Conv2D(64, (3, 3), strides = (1, 1), padding="same", name = 'conv4'))
model.add(LeakyReLU(0.1))
# MaxPooling2D 1
model.add(MaxPooling2D((2, 2), name='max_pool_2'))
# Dropout
model.add(Dropout(0.25, noise_shape=None, seed=0))
# Flatten
model.add(Flatten())
# FC
model.add(Dense(256, name='fc1'))
model.add(Dropout(0.5, noise_shape=None, seed=0))
# FC
model.add(Dense(NUM_CLASSES)) # the last layer with neuron for each class
model.add(Activation("softmax")) # output probabilities
return model
# In[13]:
# describe model
K.clear_session() # clear default graph
model = make_model()
model.summary()
# In[21]:
## GRADED PART, DO NOT CHANGE!
# Number of model parameters
grader.set_answer("7W4tu", grading_utils.model_total_params(model))
# In[22]:
# you can make submission with answers so far to check yourself at this stage
grader.submit(COURSERA_EMAIL, COURSERA_TOKEN)
# # Train model
# Training of your model can take approx. 4-8 minutes per epoch.
#
# During training you should observe the decrease in reported loss on training and validation.
#
# If the loss on training is not decreasing with epochs you should revise your model definition and learning rate.
# In[23]:
INIT_LR = 5e-3 # initial learning rate
BATCH_SIZE = 32
EPOCHS = 10
K.clear_session() # clear default graph
# don't call K.set_learning_phase() !!! (otherwise will enable dropout in train/test simultaneously)
model = make_model() # define our model
# prepare model for fitting (loss, optimizer, etc)
model.compile(
loss='categorical_crossentropy', # we train 10-way classification
optimizer=keras.optimizers.adamax(lr=INIT_LR), # for SGD
metrics=['accuracy'] # report accuracy during training
)
# scheduler of learning rate (decay with epochs)
def lr_scheduler(epoch):
return INIT_LR * 0.9 ** epoch
# callback for printing of actual learning rate used by optimizer
class LrHistory(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs={}):
print("Learning rate:", K.get_value(model.optimizer.lr))
# fit model
model.fit(
x_train2, y_train2, # prepared data
batch_size=BATCH_SIZE,
epochs=EPOCHS,
callbacks=[keras.callbacks.LearningRateScheduler(lr_scheduler), LrHistory(), keras_utils.TqdmProgressCallback()],
validation_data=(x_test2, y_test2),
shuffle=True,
verbose=0
)
# In[24]:
# save weights to file
model.save_weights("weights.h5")
# In[25]:
# load weights from file (can call without model.fit)
model.load_weights("weights.h5")
# # Evaluate model
# In[26]:
# make test predictions
y_pred_test = model.predict_proba(x_test2)
y_pred_test_classes = np.argmax(y_pred_test, axis=1)
y_pred_test_max_probas = np.max(y_pred_test, axis=1)
# In[27]:
# confusion matrix and accuracy
from sklearn.metrics import confusion_matrix, accuracy_score
plt.figure(figsize=(7, 6))
plt.title('Confusion matrix', fontsize=16)
plt.imshow(confusion_matrix(y_test, y_pred_test_classes))
plt.xticks(np.arange(10), cifar10_classes, rotation=45, fontsize=12)
plt.yticks(np.arange(10), cifar10_classes, fontsize=12)
plt.colorbar()
plt.show()
print("Test accuracy:", accuracy_score(y_test, y_pred_test_classes))
# In[28]:
## GRADED PART, DO NOT CHANGE!
# Accuracy on validation data
grader.set_answer("nQOsg", accuracy_score(y_test, y_pred_test_classes))
# In[30]:
# you can make submission with answers so far to check yourself at this stage
grader.submit(COURSERA_EMAIL, COURSERA_TOKEN)
# In[ ]:
# inspect preditions
cols = 8
rows = 2
fig = plt.figure(figsize=(2 * cols - 1, 3 * rows - 1))
for i in range(cols):
for j in range(rows):
random_index = np.random.randint(0, len(y_test))
ax = fig.add_subplot(rows, cols, i * rows + j + 1)
ax.grid('off')
ax.axis('off')
ax.imshow(x_test[random_index, :])
pred_label = cifar10_classes[y_pred_test_classes[random_index]]
pred_proba = y_pred_test_max_probas[random_index]
true_label = cifar10_classes[y_test[random_index, 0]]
ax.set_title("pred: {}\nscore: {:.3}\ntrue: {}".format(
pred_label, pred_proba, true_label
))
plt.show()
# # Visualize maximum stimuli
# We want to find input images that provide maximum activations for particular layers of our network.
#
# We will find those maximum stimuli via gradient ascent in image space.
#
# For that task we load our model weights, calculate the layer output gradient with respect to image input and shift input image in that direction.
# In[31]:
K.clear_session() # clear default graph
K.set_learning_phase(0) # disable dropout
model = make_model()
model.load_weights("weights.h5") # that were saved after model.fit
# In[32]:
# all weights we have
model.summary()
# In[33]:
def find_maximum_stimuli(layer_name, is_conv, filter_index, model, iterations=20, step=1., verbose=True):
def image_values_to_rgb(x):
# normalize x: center on 0 (np.mean(x_train2)), ensure std is 0.25 (np.std(x_train2))
# so that it looks like a normalized image input for our network
x = (x - np.mean(x_train2)) / np.std(x_train2)
# do reverse normalization to RGB values: x = (x_norm + 0.5) * 255
x = (x + 0.5) * 255
# clip values to [0, 255] and convert to bytes
x = np.clip(x, 0, 255).astype('uint8')
return x
# this is the placeholder for the input image
input_img | |
<reponame>tweak-com-public/tweak-api-client-python
# coding: utf-8
"""
tweak-api
Tweak API to integrate with all the Tweak services. You can find out more about Tweak at <a href='https://www.tweak.com'>https://www.tweak.com</a>, #tweak.
OpenAPI spec version: 1.0.8-beta.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class BillingSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, object='source', amount=None, client_secret=None, created=None, currency=None, flow=None, livemode=None, owner=None, receiver=None, statement_descriptor=None, status=None, type=None, usage=None, ach_credit_transfer=None, sepa_debit=None, sofort=None, redirect=None, token=None):
"""
BillingSource - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'str',
'object': 'str',
'amount': 'float',
'client_secret': 'str',
'created': 'float',
'currency': 'str',
'flow': 'str',
'livemode': 'bool',
'owner': 'BillingSourceOwner',
'receiver': 'BillingSourceReceiver',
'statement_descriptor': 'str',
'status': 'str',
'type': 'str',
'usage': 'str',
'ach_credit_transfer': 'BillingSourceAchCreditTransfer',
'sepa_debit': 'BillingSourceSepaDebit',
'sofort': 'BillingSourceSofort',
'redirect': 'BillingSourceRedirect',
'token': 'str'
}
self.attribute_map = {
'id': 'id',
'object': 'object',
'amount': 'amount',
'client_secret': 'clientSecret',
'created': 'created',
'currency': 'currency',
'flow': 'flow',
'livemode': 'livemode',
'owner': 'owner',
'receiver': 'receiver',
'statement_descriptor': 'statementDescriptor',
'status': 'status',
'type': 'type',
'usage': 'usage',
'ach_credit_transfer': 'achCreditTransfer',
'sepa_debit': 'sepaDebit',
'sofort': 'sofort',
'redirect': 'redirect',
'token': 'token'
}
self._id = id
self._object = object
self._amount = amount
self._client_secret = client_secret
self._created = created
self._currency = currency
self._flow = flow
self._livemode = livemode
self._owner = owner
self._receiver = receiver
self._statement_descriptor = statement_descriptor
self._status = status
self._type = type
self._usage = usage
self._ach_credit_transfer = ach_credit_transfer
self._sepa_debit = sepa_debit
self._sofort = sofort
self._redirect = redirect
self._token = token
@property
def id(self):
"""
Gets the id of this BillingSource.
:return: The id of this BillingSource.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this BillingSource.
:param id: The id of this BillingSource.
:type: str
"""
self._id = id
@property
def object(self):
"""
Gets the object of this BillingSource.
:return: The object of this BillingSource.
:rtype: str
"""
return self._object
@object.setter
def object(self, object):
"""
Sets the object of this BillingSource.
:param object: The object of this BillingSource.
:type: str
"""
self._object = object
@property
def amount(self):
"""
Gets the amount of this BillingSource.
:return: The amount of this BillingSource.
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""
Sets the amount of this BillingSource.
:param amount: The amount of this BillingSource.
:type: float
"""
self._amount = amount
@property
def client_secret(self):
"""
Gets the client_secret of this BillingSource.
:return: The client_secret of this BillingSource.
:rtype: str
"""
return self._client_secret
@client_secret.setter
def client_secret(self, client_secret):
"""
Sets the client_secret of this BillingSource.
:param client_secret: The client_secret of this BillingSource.
:type: str
"""
self._client_secret = client_secret
@property
def created(self):
"""
Gets the created of this BillingSource.
:return: The created of this BillingSource.
:rtype: float
"""
return self._created
@created.setter
def created(self, created):
"""
Sets the created of this BillingSource.
:param created: The created of this BillingSource.
:type: float
"""
self._created = created
@property
def currency(self):
"""
Gets the currency of this BillingSource.
:return: The currency of this BillingSource.
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""
Sets the currency of this BillingSource.
:param currency: The currency of this BillingSource.
:type: str
"""
self._currency = currency
@property
def flow(self):
"""
Gets the flow of this BillingSource.
:return: The flow of this BillingSource.
:rtype: str
"""
return self._flow
@flow.setter
def flow(self, flow):
"""
Sets the flow of this BillingSource.
:param flow: The flow of this BillingSource.
:type: str
"""
self._flow = flow
@property
def livemode(self):
"""
Gets the livemode of this BillingSource.
:return: The livemode of this BillingSource.
:rtype: bool
"""
return self._livemode
@livemode.setter
def livemode(self, livemode):
"""
Sets the livemode of this BillingSource.
:param livemode: The livemode of this BillingSource.
:type: bool
"""
self._livemode = livemode
@property
def owner(self):
"""
Gets the owner of this BillingSource.
:return: The owner of this BillingSource.
:rtype: BillingSourceOwner
"""
return self._owner
@owner.setter
def owner(self, owner):
"""
Sets the owner of this BillingSource.
:param owner: The owner of this BillingSource.
:type: BillingSourceOwner
"""
self._owner = owner
@property
def receiver(self):
"""
Gets the receiver of this BillingSource.
:return: The receiver of this BillingSource.
:rtype: BillingSourceReceiver
"""
return self._receiver
@receiver.setter
def receiver(self, receiver):
"""
Sets the receiver of this BillingSource.
:param receiver: The receiver of this BillingSource.
:type: BillingSourceReceiver
"""
self._receiver = receiver
@property
def statement_descriptor(self):
"""
Gets the statement_descriptor of this BillingSource.
:return: The statement_descriptor of this BillingSource.
:rtype: str
"""
return self._statement_descriptor
@statement_descriptor.setter
def statement_descriptor(self, statement_descriptor):
"""
Sets the statement_descriptor of this BillingSource.
:param statement_descriptor: The statement_descriptor of this BillingSource.
:type: str
"""
self._statement_descriptor = statement_descriptor
@property
def status(self):
"""
Gets the status of this BillingSource.
:return: The status of this BillingSource.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this BillingSource.
:param status: The status of this BillingSource.
:type: str
"""
self._status = status
@property
def type(self):
"""
Gets the type of this BillingSource.
:return: The type of this BillingSource.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this BillingSource.
:param type: The type of this BillingSource.
:type: str
"""
allowed_values = ["sepa_debit", "sofort"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}"
.format(type, allowed_values)
)
self._type = type
@property
def usage(self):
"""
Gets the usage of this BillingSource.
:return: The usage of this BillingSource.
:rtype: str
"""
return self._usage
@usage.setter
def usage(self, usage):
"""
Sets the usage of this BillingSource.
:param usage: The usage of this BillingSource.
:type: str
"""
self._usage = usage
@property
def ach_credit_transfer(self):
"""
Gets the ach_credit_transfer of this BillingSource.
:return: The ach_credit_transfer of this BillingSource.
:rtype: BillingSourceAchCreditTransfer
"""
return self._ach_credit_transfer
@ach_credit_transfer.setter
def ach_credit_transfer(self, ach_credit_transfer):
"""
Sets the ach_credit_transfer of this BillingSource.
:param ach_credit_transfer: The ach_credit_transfer of this BillingSource.
:type: BillingSourceAchCreditTransfer
"""
self._ach_credit_transfer = ach_credit_transfer
@property
def sepa_debit(self):
"""
Gets the sepa_debit of this BillingSource.
:return: The sepa_debit of this BillingSource.
:rtype: BillingSourceSepaDebit
"""
return self._sepa_debit
@sepa_debit.setter
def sepa_debit(self, sepa_debit):
"""
Sets the sepa_debit of this BillingSource.
:param sepa_debit: The sepa_debit of this BillingSource.
:type: BillingSourceSepaDebit
"""
self._sepa_debit = sepa_debit
@property
def sofort(self):
"""
Gets the sofort of this BillingSource.
:return: The sofort of this BillingSource.
:rtype: BillingSourceSofort
"""
return self._sofort
@sofort.setter
def sofort(self, sofort):
"""
Sets the sofort of this BillingSource.
:param sofort: The sofort of this BillingSource.
:type: BillingSourceSofort
"""
self._sofort = sofort
@property
def redirect(self):
"""
Gets the redirect of this BillingSource.
:return: The redirect of this BillingSource.
:rtype: BillingSourceRedirect
"""
return self._redirect
@redirect.setter
def redirect(self, redirect):
"""
Sets the redirect of this BillingSource.
:param redirect: The redirect of this BillingSource.
:type: BillingSourceRedirect
"""
self._redirect = redirect
@property
def token(self):
"""
Gets the token of this BillingSource.
:return: The token of this BillingSource.
:rtype: str
"""
return self._token
@token.setter
def token(self, token):
"""
Sets the token of this BillingSource.
:param token: The token of this BillingSource.
:type: str
"""
self._token = token
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
| |
<gh_stars>1-10
# -*- coding: utf-8 -*-
import datetime
from .client import ETGClient
from .models import (
GuestData,
)
class ETGHotelsClient(ETGClient):
def autocomplete(self, query,
language=None):
"""Finds regions and hotels by a part of their names.
:param query: part of hotel or region name.
:type query: str
:param language: (optional) language of the response, e.g. 'en', 'ru'.
:type language: str
:return: suggested hotels and regions, no more than 5 objects for each category.
:rtype: dict
"""
endpoint = 'api/b2b/v3/search/multicomplete/'
data = {
'query': query,
'language': language,
}
response = self.request('POST', endpoint, data=data)
return response
def search(self, ids, checkin, checkout, guests,
currency=None, residency=None, timeout=None, upsells=None,
language=None):
"""Searches hotels with available accommodation that meets the given conditions.
It is not recommended to let the users choose the rates from this method response.
:param ids: list of hotels identifiers or region identifier.
:type ids: list[str] or int
:param checkin: check-in date, no later than 366 days from today.
:type checkin: datetime.date
:param checkout: check-out date, no later than 30 days from check-in date.
:type checkout: datetime.date
:param guests: list of guests in the rooms.
The max number of rooms in one request is 6.
:type guests: list[GuestData]
:param currency: (optional) currency code of the rooms rate in the response, e.g. 'GBP', 'USD', 'RUB'.
Default value is contract currency.
:type currency: str or None
:param residency: (optional) guest's (or multiple guests') nationality.
Use it in case there are doubts regarding country/hotel policy towards citizens of a specific country.
Value format is specified by standard 'ISO 3166-1 alpha-2', e.g. 'gb', 'us', 'ru'.
:type residency: str or None
:param timeout: (optional) response timeout in seconds.
:type timeout: int or None
:param upsells: (optional) additional services request.
:type upsells: dict or None
:param language: (optional) language of static information in the response, e.g. 'en', 'ru'.
Default value is contract language.
:type language: str or None
:return: list of available hotels.
:rtype: list
"""
endpoint = None
if isinstance(ids, list):
endpoint = 'api/b2b/v3/search/serp/hotels/'
elif isinstance(ids, int):
endpoint = 'api/b2b/v3/search/serp/region/'
data = {
'ids': ids,
'region_id': ids,
'checkin': checkin.strftime('%Y-%m-%d'),
'checkout': checkout.strftime('%Y-%m-%d'),
'guests': guests,
'currency': currency,
'residency': residency,
'timeout': timeout,
'upsells': upsells if upsells is not None else {},
'language': language,
}
response = self.request('POST', endpoint, data=data)
hotels = list()
if isinstance(response, dict):
hotels = response.get('hotels')
return hotels
def search_by_hotels(self, ids, checkin, checkout, guests, **kwargs):
"""Searches hotels with available accommodation that meets the given conditions.
:param ids: list of hotels identifiers.
:type ids: list[str]
:param checkin: check-in date, no later than 366 days from today.
:type checkin: datetime.date
:param checkout: check-out date, no later than 30 days from check-in date.
:type checkout: datetime.date
:param guests: list of guests in the rooms.
The max number of rooms in one request is 6.
:type guests: list[GuestData]
:param kwargs: optional parameters.
For more information, see the description of ``self.search`` method.
:return: list of available hotels (Hotels Search Engine Results Page).
:rtype: list
"""
return self.search(ids, checkin, checkout, guests, **kwargs)
def search_by_region(self, region_id, checkin, checkout, guests, **kwargs):
"""Searches hotels with available accommodation that meets the given conditions.
:param region_id: region identifier.
:type region_id: int
:param checkin: check-in date, no later than 366 days from today.
:type checkin: datetime.date
:param checkout: check-out date, no later than 30 days from check-in date.
:type checkout: datetime.date
:param guests: list of guests in the rooms.
The max number of rooms in one request is 6.
:type guests: list[GuestData]
:param kwargs: optional parameters.
For more information, see the description of ``self.search`` method.
:return: list of available hotels (Region Search Engine Results Page).
:rtype: list
"""
return self.search(region_id, checkin, checkout, guests, **kwargs)
def hotelpage(self, hotel_id, checkin, checkout, guests,
currency=None, residency=None, upsells=None, language=None):
"""Returns actual rates for the given hotel.
This request is necessary to make a booking via API.
Value of `book_hash` in results of this API method can be passed as `book_hash` when sending booking requests.
:param hotel_id: hotel identifier.
:type hotel_id: str
:param checkin: check-in date, no later than 366 days from today.
:type checkin: datetime.date
:param checkout: check-out date, no later than 30 days from check-in date.
:type checkout: datetime.date
:param guests: list of guests in the rooms.
The max number of rooms in one request is 6.
:type guests: list[GuestData]
:param currency: (optional) currency code of the rooms rate in the response, e.g. 'GBP', 'USD', 'RUB'.
Default value is contract currency.
:type currency: str or None
:param residency: (optional) guest's (or multiple guests') nationality.
Use it in case there are doubts regarding country/hotel policy towards citizens of a specific country.
Value format is specified by standard 'ISO 3166-1 alpha-2', e.g. 'gb', 'us', 'ru'.
:type residency: str or None
:param timeout: (optional) response timeout in seconds.
:type timeout: int or None
:param upsells: (optional) additional services request.
:type upsells: dict or None
:param language: (optional) language of static information in the response, e.g. 'en', 'ru'.
Default value is contract language.
:type language: str or None
:return: hotel info with actual available rates.
:rtype: dict or None
"""
endpoint = 'api/b2b/v3/search/hp/'
data = {
'id': hotel_id,
'checkin': checkin.strftime('%Y-%m-%d'),
'checkout': checkout.strftime('%Y-%m-%d'),
'guests': guests,
'currency': currency,
'residency': residency,
'upsells': upsells if upsells is not None else {},
'language': language,
}
response = self.request('POST', endpoint, data=data)
hotel = None
if isinstance(response, dict) and isinstance(response.get('hotels'), list) and len(response.get('hotels')):
hotel = response.get('hotels')[0]
return hotel
def make_reservation(self, partner_order_id, book_hash, language, user_ip):
"""Makes a new reservation.
:param partner_order_id: unique order id on partner side, e.g. '0a0f4e6d-b337-43be-a5f8-484492ebe033'.
:type partner_order_id: str
:param book_hash: unique identifier of the rate from hotelpage response.
:type book_hash: str
:param language: language of the reservation, e.g. 'en', 'ru'.
:type language: str
:param user_ip: customer IP address, e.g. '8.8.8.8'.
:type user_ip: str
:return: reservation info.
:rtype: dict
"""
endpoint = 'api/b2b/v3/hotel/order/booking/form/'
data = {
'partner_order_id': partner_order_id,
'book_hash': book_hash,
'language': language,
'user_ip': user_ip,
}
response = self.request('POST', endpoint, data=data)
return response
def finish_reservation(self, partner, payment_type, rooms, user, language,
arrival_datetime=None, upsell_data=None, return_path=None):
"""Completes the reservation.
:param partner: partner information.
partner_order_id: partner order id.
comment: (optional) partner booking inner comment. It is visible only to the partner himself.
amount_sell_b2b2c: (optional) reselling price for the client in contract currency.
:type partner: dict
:param payment_type: payment information.
type: payment type option, possible values: 'now', 'hotel', 'deposit'.
amount: amount of the order.
currency_code: ISO currency code, e.g. 'EUR'.
init_uuid: (optional) token of the booking payment operation.
pay_uuid: (optional) token of the booking payment check.
:type payment_type: dict
:param rooms: guest data by the rooms.
:type rooms: list
:param user: guest additional information.
email: partner manager email.
phone: guest telephone number.
comment: (optional) guest comment sent to the hotel.
:type user: dict
:param language: language of the reservation, e.g. 'en', 'ru'.
:type language: str
:param arrival_datetime: (optional) estimated arrival time to the hotel.
:type arrival_datetime: datetime.datetime
:param upsell_data: (optional) upsell information.
:type upsell_data: list or None
:param return_path: (optional) URL on the partner side to which the user will be forwarded
by the payment gateway after 3D Secure verification.
:type return_path: str
:return: True if the reservation is completed.
:rtype: bool
"""
endpoint = 'api/b2b/v3/hotel/order/booking/finish/'
data = {
'partner': partner,
'payment_type': payment_type,
'rooms': rooms,
'user': user,
'language': language,
'arrival_datetime': arrival_datetime,
'upsell_data': upsell_data if upsell_data is not None else [],
'return_path': return_path,
}
self.request('POST', endpoint, data=data)
return True
def check_reservation_status(self, partner_order_id):
endpoint = 'api/b2b/v3/hotel/order/booking/finish/status/'
data = {
'partner_order_id': partner_order_id,
}
response = self.request('POST', endpoint, data=data)
return response
def cancel(self, partner_order_id):
"""Cancels reservation.
:param partner_order_id: partner order id, e.g. '0a0f4e6d-b337-43be-a5f8-484492ebe033'.
:type partner_order_id: str
:return: True if the reservation is canceled.
:rtype: bool
"""
endpoint = 'api/b2b/v3/hotel/order/cancel/'
data = {
'partner_order_id': partner_order_id,
}
self.request('POST', endpoint, data=data)
return True
def region_list(self, last_id=None, limit=None, types=None):
"""Returns information about regions.
:param last_id: (optional) all retrieved regions will have an ID that exceeds the given value.
:type last_id: int or None
:param limit: (optional) maximum number of regions in a response, cannot exceed 10000, default | |
based on `K.learning_phase`.
"""
return in_train_phase(alt, x, training=training)
# NN OPERATIONS
def _assert_has_capability(module, func):
if not hasattr(module, func):
raise EnvironmentError(
'It looks like like your version of '
'Theano is out of date. '
'Install the latest version with:\n'
'pip install git+git://github.com/Theano/Theano.git '
'--upgrade --no-deps')
def elu(x, alpha=1.0):
""" Exponential linear unit
# Arguments
x: Tensor to compute the activation function for.
alpha: scalar
"""
_assert_has_capability(T.nnet, 'elu')
return T.nnet.elu(x, alpha)
def relu(x, alpha=0., max_value=None, threshold=0.):
"""Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
# Arguments
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
max_value: Saturation threshold.
# Returns
A tensor.
"""
_assert_has_capability(T.nnet, 'relu')
if alpha != 0.:
if threshold != 0.:
negative_part = T.nnet.relu(-x + threshold)
else:
negative_part = T.nnet.relu(-x)
if threshold != 0.:
x = x * T.cast(T.gt(x, threshold), floatx())
else:
x = T.nnet.relu(x)
if max_value is not None:
x = T.clip(x, 0.0, max_value)
if alpha != 0.:
x -= alpha * negative_part
return x
def softmax(x, axis=-1):
if (axis == -1 or axis == x.ndim - 1) and x.ndim == 2:
return T.nnet.softmax(x)
xm = x.max(axis=axis, keepdims=True)
return T.exp(x - xm) / T.exp(
x - xm).sum(axis=axis, keepdims=True)
def softmax_3d(x):
""""Softmax on the last axis of a 2d or 3d tensor.
# Arguments
x: A tensor or variable.
# Returns
A tensor.
# Raises
Exception: If the input tensor is not 2D or 3D.
"""
nd = ndim(x)
if nd == 2:
return softmax(x)
elif nd == 3:
e = exp(x - max(x, axis=-1, keepdims=True))
s = sum(e, axis=-1, keepdims=True)
return e / s
else:
raise Exception('Cannot apply softmax to a tensor that is not 2D or 3D. ' +
'Here, ndim=' + str(nd))
def softplus(x):
"""Softplus of a tensor.
# Arguments
x: A tensor or variable.
# Returns
A tensor.
"""
return T.nnet.softplus(x)
def softsign(x):
"""Softsign of a tensor.
# Arguments
x: A tensor or variable.
# Returns
A tensor.
"""
return T_softsign(x)
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy between an output tensor and a target tensor.
# Arguments
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
# Returns
Output tensor.
"""
output_dimensions = list(range(len(int_shape(output))))
if axis != -1 and axis not in output_dimensions:
raise ValueError(
'{}{}{}'.format(
'Unexpected channels axis {}. '.format(axis),
'Expected to be -1 or one of the axes of `output`, ',
'which has {} dimensions.'.format(len(int_shape(output)))))
# If the channels are not in the last axis, move them to be there:
if axis != -1 and axis != output_dimensions[-1]:
permutation = output_dimensions[:axis]
permutation += output_dimensions[axis + 1:] + [axis]
output = permute_dimensions(output, permutation)
target = permute_dimensions(target, permutation)
if from_logits:
output = T.nnet.softmax(output)
else:
# scale preds so that the class probas of each sample sum to 1
output /= output.sum(axis=-1, keepdims=True)
# avoid numerical instability with _EPSILON clipping
output = T.clip(output, epsilon(), 1.0 - epsilon())
return T.nnet.categorical_crossentropy(output, target)
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy with integer targets.
# Arguments
target: An integer tensor.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
# Returns
Output tensor.
"""
output_dimensions = list(range(len(int_shape(output))))
if axis != -1 and axis not in output_dimensions:
raise ValueError(
'{}{}{}'.format(
'Unexpected channels axis {}. '.format(axis),
'Expected to be -1 or one of the axes of `output`, ',
'which has {} dimensions.'.format(len(int_shape(output)))))
# If the channels are not in the last axis, move them to be there:
if axis != -1 and axis != output_dimensions[-1]:
permutation = output_dimensions[:axis]
permutation += output_dimensions[axis + 1:] + [axis]
output = permute_dimensions(output, permutation)
target = permute_dimensions(target, permutation)
target = T.cast(T.flatten(target), 'int32')
target = T.extra_ops.to_one_hot(target, nb_class=output.shape[-1])
target = reshape(target, shape(output))
return categorical_crossentropy(target, output, from_logits, axis=-1)
def binary_crossentropy(target, output, from_logits=False):
"""Binary crossentropy between an output tensor and a target tensor.
# Arguments
target: A tensor with the same shape as `output`.
output: A tensor.
from_logits: Whether `output` is expected to be a logits tensor.
By default, we consider that `output`
encodes a probability distribution.
# Returns
A tensor.
"""
if from_logits:
output = T.nnet.sigmoid(output)
# avoid numerical instability with _EPSILON clipping
output = T.clip(output, epsilon(), 1.0 - epsilon())
return T.nnet.binary_crossentropy(output, target)
def weighted_binary_crossentropy(target, output, from_logits=False, lambda_w_rec=1.0, lambda_w_pre=1.0):
"""Weighted crossentropy of binary random variables.
# Arguments
target: A tensor with the same shape as `output`.
output: A tensor.
from_logits: Whether `output` is expected to be a logits tensor.
By default, we consider that `output`
encodes a probability distribution.
lambda_w_rec: Float. First weight.
lambda_w_pre: Float. Second weight.
# Returns
A tensor.
"""
if from_logits:
output = T.nnet.sigmoid(output)
# avoid numerical instability with _epsilon clipping
output = T.clip(output, _epsilon(), 1.0 - _epsilon())
return -(lambda_w_rec * target * T.log(output) + lambda_w_pre * (1.0 - target) * T.log(1.0 - output))
def sigmoid(x):
"""Element-wise sigmoid.
# Arguments
x: A tensor or variable.
# Returns
A tensor.
"""
return T.nnet.sigmoid(x)
def hard_sigmoid(x):
"""Segment-wise linear approximation of sigmoid.
Faster than sigmoid.
Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.
In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.
# Arguments
x: A tensor or variable.
# Returns
A tensor.
"""
return T.nnet.hard_sigmoid(x)
def tanh(x):
"""Element-wise tanh.
# Arguments
x: A tensor or variable.
# Returns
A tensor.
"""
return T.tanh(x)
def dropout(x, level, noise_shape=None, seed=None):
"""Sets entries in `x` to zero at random, while scaling the entire tensor.
# Arguments
x: tensor
level: fraction of the entries in the tensor
that will be set to 0.
noise_shape: shape for randomly generated keep/drop flags,
must be broadcastable to the shape of `x`
seed: random seed to ensure determinism.
# Returns
A tensor.
"""
if level < 0. or level >= 1:
raise ValueError('Dropout level must be in interval [0, 1[.')
if seed is None:
seed = np.random.randint(1, 10e6)
if isinstance(noise_shape, list):
noise_shape = tuple(noise_shape)
rng = RandomStreams(seed=seed)
retain_prob = 1. - level
if noise_shape is None:
random_tensor = rng.binomial(x.shape, p=retain_prob, dtype=x.dtype)
else:
random_tensor = rng.binomial(noise_shape, p=retain_prob, dtype=x.dtype)
random_tensor = T.patternbroadcast(random_tensor,
[dim == 1 for dim in noise_shape])
x *= random_tensor
x /= retain_prob
return x
def l2_normalize(x, axis=None):
"""Normalizes a tensor wrt the L2 norm alongside the specified axis.
# Arguments
x: Tensor or variable.
axis: axis along which to perform normalization.
# Returns
A tensor.
"""
square_sum = T.sum(T.square(x), axis=axis, keepdims=True)
norm = T.sqrt(T.maximum(square_sum, epsilon()))
return x / norm
def l1_normalize(x, axis):
"""Normalizes a tensor wrt the L1 norm alongside the specified axis.
# Arguments
x: Tensor or variable.
axis: axis along which to perform normalization.
# Returns
A tensor.
"""
norm = T.max(T.sum(abs(x), axis=axis, keepdims=True))
return x / norm
def in_top_k(predictions, targets, k):
"""Returns whether the `targets` are in the top `k` `predictions`.
# Arguments
predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
k: An `int`, number of top elements to consider.
# Returns
A 1D tensor of length `batch_size` and type `bool`.
`output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
values of `predictions[i]`.
"""
# handle k < 1 and k >= predictions.shape[1] cases to match TF behavior
if k < 1:
# dtype='bool' is only available since Theano 0.9.0
try:
return T.zeros_like(targets, dtype='bool')
except TypeError:
return T.zeros_like(targets, dtype='int8')
if k >= int_shape(predictions)[1]:
try:
return T.ones_like(targets, dtype='bool')
except TypeError:
return T.ones_like(targets, dtype='int8')
predictions_k = T.sort(predictions)[:, -k]
targets_values = predictions[T.arange(targets.shape[0]), targets]
return T.ge(targets_values, predictions_k)
# CONVOLUTIONS
def _preprocess_conv2d_input(x, data_format):
"""Transpose and cast the input before the conv2d.
# Arguments
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
# Returns
A tensor.
"""
if data_format == 'channels_last':
# TF uses the last dimension | |
<filename>tellet/handlers.py
import tornado.web
import json
import difflib
import pandas as pd
from datetime import datetime
import os.path as op
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
return self.get_secure_cookie("user")
def set_current_user(self, user):
if user:
self.set_secure_cookie("user", tornado.escape.json_encode(user))
else:
self.clear_cookie("user")
class My404Handler(tornado.web.RequestHandler):
# Override prepare() instead of get() to cover all possible HTTP methods.
def prepare(self):
self.set_status(404)
self.redirect('/')
def _initialize(self, fp):
self.fp = fp
def get_color_ndays(n, cutoffs=[7, 15], ascending=True):
mini, maxi = cutoffs
if ascending:
if n < mini:
if ascending:
return 'bs-callout-info'
else:
return 'bs-callout-danger'
elif n < maxi:
return 'bs-callout-warning'
elif n > maxi:
if ascending:
return 'bs-callout-danger'
else:
return 'bs-callout-info'
class MainHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
username = str(self.current_user[1:-1], 'utf-8')
print('\n*** %s has just logged in.' % username)
import git
repo = git.Repo(op.dirname(op.dirname(__file__)))
commit = list(repo.iter_commits(max_count=1))[0]
dt = datetime.fromtimestamp(commit.committed_date)
version = datetime.strftime(dt, '%Y%m%d-%H%M%S')
loglist = json.load(open(self.fp))['log']
columns = ['ts', 'who', 'action', 'what', 'where']
df = pd.DataFrame(loglist, columns=columns).set_index('ts')
df = df.sort_index(ascending=False)
rp = df.query('action == "did" & where == "reports"')
callout = ''
if not rp.empty:
rp['what2'] = rp.apply(lambda row: row.what.split(';')[0], axis=1)
# last poubelle
lp = rp.query('what2 == "poubelles"')
if not lp.empty:
lp = lp.iloc[0]
dt = datetime.now() - datetime.strptime(lp.name, '%Y%m%d_%H%M%S')
comments = lp.what.split(';')[-1]
if comments != '': comments = ' ' + comments
opt = {'ndays': dt.days,
'who': lp.who,
'comments': comments,
'color': get_color_ndays(dt.days, [7,15], False)}
callout = '<div class="bs-callout bs-callout-info {color}">'\
'Dernière poubelle il y a <strong>{ndays} jours</strong>'\
' ({who}{comments})</div>'''.format(**opt)
# last wc
lw = rp.query('what2 == "wc"')
if not lw.empty:
lw = lw.iloc[0]
dt = datetime.now() - datetime.strptime(lw.name, '%Y%m%d_%H%M%S')
comments = lw.what.split(';')[-1]
if comments != '': comments = ' ' + comments
opt = {'ndays': dt.days,
'who': lw.who,
'comments': comments,
'color': get_color_ndays(dt.days, [7, 15], True)}
callout = callout + '<div class="bs-callout {color}">'\
'Dernier nettoyage WC il y a <strong>{ndays} jours</strong>'\
' ({who}{comments})</div>'''.format(**opt)
# is it laundry day
wd = datetime.now().weekday()
if wd == 2:
callout = callout + '<div class="bs-callout bs-callout-warning">'\
'<strong>Jour de lessive</strong> '\
'<span class="badge bg-warning">Rappel</span></div>'''
elif wd == 1:
callout = callout + '<div class="bs-callout bs-callout-warning">'\
'Demain jour de lessive '\
'<span class="badge bg-warning">Rappel</span></div>'''
self.render("html/index.html", version=version, callout=callout)
def initialize(self, **kwargs):
_initialize(self, **kwargs)
class ListHandler():
def remove_from_list(self, what, which_list, action):
username = str(self.current_user[1:-1], 'utf-8')
that_list = json.load(open(self.fp))[which_list]
matches = difflib.get_close_matches(what, that_list)
print(what, that_list, matches)
dt = datetime.strftime(datetime.now(), '%Y%m%d_%H%M%S')
j = json.load(open(self.fp))
if len(matches) == 0:
action = 'tried_to_%s' % action
res = None
else:
that_list.remove(matches[0])
j[which_list] = that_list
res = that_list
entry = (dt, username, action, what, which_list)
j['log'].append(entry)
json.dump(j, open(self.fp, 'w'), indent=4)
return res
def perform_action(self):
username = str(self.current_user[1:-1], 'utf-8')
action = str(self.get_argument("action", ""))
print(str(self.get_argument("what", "\n")))
what = str(self.get_argument("what", "\n")).split('\n')[0]
if action == 'show':
html = self.get_list_html()
self.write(html)
return
print((username, action, what))
that_list = self.remove_from_list(what, self._id, action)
is_found = that_list is not None
sl = None
if is_found:
sl = 'La liste est vide.'
if len(that_list) != 0:
sl = self.get_list_html()
self.write(json.dumps([is_found, sl]))
def _get_(self):
username = str(self.current_user[1:-1], 'utf-8')
print('\n*** %s is looking at %s.' % (username, self._id))
shopping = json.load(open(self.fp))[self._id]
if len(shopping) == 0:
sl = '<div id="itemlist">Liste vide !</div>'
else:
sl = self.get_list_html()
from glob import glob
files = glob(op.join(op.dirname(op.dirname(__file__)),
'web/html/modals/*.html'))
modals = '\n'.join([open(e).read() for e in files])
self.render("html/%s.html" % self._id, list=sl, modals=modals)
def get_list_html(self):
def sort_dates(j):
res1 = [] # with correct dates
res2 = [] # with wrong dates
res3 = [] # without dates
for each in j:
label, q, original_q, unit, ed = each.split(';')
if ed != '':
try:
ed = datetime.strptime(ed, '%d%m%y')
res1.append((label, q, original_q, unit, ed))
except ValueError:
res2.append(each)
else:
res3.append(each)
columns = ['label', 'q', 'q1', 'unit', 'ed']
df = pd.DataFrame(res1, columns=columns).sort_values(by='ed')
res1b = [e.to_list() for _, e in df.iterrows()]
res1 = []
for label, q, original_q, unit, ed in res1b:
ed = ed.strftime('%d%m%y')
e = [label, q, original_q, unit, ed]
res1.append(';'.join(e))
res = []
for each in (res1, res2, res3):
res.extend(each)
return res
j = json.load(open(self.fp))[self._id]
if len(j) == 0:
list_html = '<div id="itemlist">Liste vide !</div>'
return list_html
if self._id in ('fridge', 'pharmacy'):
sl = []
j = sort_dates(j)
for each in j:
label, q, original_q, unit, ed = each.split(';')
if ed != '':
try:
ed = datetime.strptime(ed, '%d%m%y')
dt = ed - datetime.now()
style = ' (%s)'
if dt.days < 0:
style = ' <span style="color:red; font-weight:600">(%s)</span>'
elif dt.days < 2:
style = ' <span style="color:red">(%s)</span>'
elif dt.days < 7:
style = ' <span style="color:darksalmon">(%s)</span>'
ed = 'exp. %s' % ed.strftime('%d-%m-20%y')
ed = style % ed
except ValueError:
ed = ' <span style="color:red; font-weight:600">(%s)</span>' % ed
if unit == 'units':
unit = ''
original_q = '/'+original_q
else:
unit = '%'
original_q = ''
sl.append('''<li class="list-group-item d-flex justify-content-between
align-items-center" data-data="%s">
%s – %s%s%s%s
<span>
<span class="badge bg-danger">Editer</span>
<span class="badge bg-success">%s </span></span>
</li>''' % (each, label, q, original_q, unit, ed,
self._action_label))
else:
sl = ['''<li class="list-group-item d-flex justify-content-between
align-items-center">
%s
<span>
<span class="badge bg-danger">Editer</span>
<span class="badge bg-success">%s </span></span>
</li>''' % (each, self._action_label) for each in j]
list_html = '<div id="itemlist"><ul class="list-group">%s</ul></div>' % ''.join(sl)
return list_html
class ShoppingHandler(BaseHandler, ListHandler):
_id = 'shopping'
_action_label = 'Acheté'
@tornado.web.authenticated
def get(self):
self._get_()
def post(self):
self.perform_action()
def initialize(self, **kwargs):
_initialize(self, **kwargs)
class PharmacyHandler(BaseHandler, ListHandler):
_id = 'pharmacy'
_action_label = 'Utiliser'
@tornado.web.authenticated
def get(self):
self._get_()
def post(self):
self.perform_action()
def initialize(self, **kwargs):
_initialize(self, **kwargs)
class FridgeHandler(BaseHandler, ListHandler):
_id = 'fridge'
_action_label = 'Utiliser'
@tornado.web.authenticated
def get(self):
self._get_()
def post(self):
self.perform_action()
def initialize(self, **kwargs):
_initialize(self, **kwargs)
class TodoHandler(BaseHandler, ListHandler):
_id = 'todo'
_action_label = 'Fait'
@tornado.web.authenticated
def get(self):
self._get_()
def post(self):
self.perform_action()
def initialize(self, **kwargs):
_initialize(self, **kwargs)
class AddHandler(BaseHandler):
def add_to_list(self, what, which_list):
username = str(self.current_user[1:-1], 'utf-8')
j = json.load(open(self.fp))
that_list = j[which_list]
that_list.append(what)
j[which_list] = that_list
dt = datetime.strftime(datetime.now(), '%Y%m%d_%H%M%S')
entry = (dt, username, 'add', what, which_list)
j['log'].append(entry)
json.dump(j, open(self.fp, 'w'), indent=4)
return that_list
@tornado.web.authenticated
def post(self):
username = str(self.current_user[1:-1], 'utf-8')
to = str(self.get_argument("to", ""))
what = str(self.get_argument("what", ""))
# then = str(self.get_argument("then", to))
print((username, 'adding', what.split('\n')[0], 'to', to))
if to not in ['log', 'reports']:
shopping = self.add_to_list(what, to)
print(shopping)
self.write(json.dumps(True))
else: # log
j = json.load(open(self.fp))
dt = datetime.strftime(datetime.now(), '%Y%m%d_%H%M%S')
entry = (dt, username, 'did', what, to)
j['log'].append(entry)
json.dump(j, open(self.fp, 'w'), indent=4)
def initialize(self, **kwargs):
_initialize(self, **kwargs)
class EditHandler(BaseHandler):
def add_to_list(self, what, which_list, item):
username = str(self.current_user[1:-1], 'utf-8')
j = json.load(open(self.fp))
that_list = j[which_list]
i = that_list.index(item)
that_list.remove(item)
that_list.insert(i, what)
j[which_list] = that_list
dt = datetime.strftime(datetime.now(), '%Y%m%d_%H%M%S')
entry = (dt, username, 'edit', what, which_list)
j['log'].append(entry)
json.dump(j, open(self.fp, 'w'), indent=4)
return that_list
@tornado.web.authenticated
def post(self):
username = str(self.current_user[1:-1], 'utf-8')
to = str(self.get_argument("to", ""))
what = str(self.get_argument("what", ""))
item = str(self.get_argument("item", ""))
# then = str(self.get_argument("then", to))
print('to', to)
print((username, to, what.split('\n')[0]))
if to != 'log':
shopping = self.add_to_list(what, to, item)
self.write(json.dumps(True))
else: # log
j = json.load(open(self.fp))
dt = datetime.strftime(datetime.now(), '%Y%m%d_%H%M%S')
entry = (dt, username, 'did', what, to)
j['log'].append(entry)
json.dump(j, open(self.fp, 'w'), indent=4)
def initialize(self, **kwargs):
_initialize(self, **kwargs)
class StatsHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
username = str(self.current_user[1:-1], 'utf-8')
print('\n*** %s is looking at stats.' % username)
loglist = json.load(open(self.fp))['log']
columns = ['ts', 'who', 'action', 'what', 'where']
data = pd.DataFrame(loglist, columns=columns).set_index('ts')
df = data.query('where != "reports"')
df = df.sort_index(ascending=False)
tpl = '''
<style>
table.df { display: block;
overflow-x: auto;
white-space: nowrap;}
.df tbody tr:nth-child(even) { background-color: lightblue; }
</style>
'''
log = tpl + df.to_html(classes="df")
df = data.query('where == "reports"')
df = df.sort_index(ascending=False)
reports = tpl + df.to_html(classes="df")
self.render("html/stats.html", reports=reports, log=log)
def post(self):
loglist = json.load(open(self.fp))['log']
columns = ['ts', 'who', 'action', 'what', 'where']
df = pd.DataFrame(loglist, columns=columns).set_index('ts')
# n total
from tellet import stats
graph1 = stats.get_doughnut(df, '# total de contributions')
graph2 = stats.get_radar(df, 'Répartition des actions')
graph3 = stats.get_stacked_doughnut(df)
config = {'ntotal': graph1,
'radar': graph2,
'stacked': graph3}
self.write(json.dumps(config))
def initialize(self, **kwargs):
_initialize(self, **kwargs)
class ReportsHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
username = str(self.current_user[1:-1], 'utf-8')
print('\n*** %s is reporting.' % username)
self.render("html/reports.html")
def initialize(self, **kwargs):
_initialize(self, **kwargs)
class AuthLogoutHandler(BaseHandler):
def get(self):
self.clear_cookie("user")
self.redirect(self.get_argument("next", "/"))
class AuthLoginHandler(BaseHandler):
def get(self):
try:
errormessage = self.get_argument("error")
except Exception:
errormessage = ""
self.render("html/login.html", errormessage=errormessage,)
def check_permission(self, username):
if username in ['Cha', 'Greg']:
print(username)
return True
def post(self):
username = str(self.get_argument("username", ""))
auth = self.check_permission(username)
if auth:
self.set_current_user(username)
self.write(json.dumps([]))
| |
<reponame>huaxz1986/lingvo
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for all optimizers."""
import dataclasses
import functools
from typing import Any, Callable, NamedTuple, Optional, Sequence, Tuple, Union
from absl import logging
import jax
from jax import lax
from jax import numpy as jnp
from lingvo.jax import base_layer
from lingvo.jax import gshard_utils
from lingvo.jax import py_utils
from lingvo.jax import pytypes
import optax
from optax_shampoo import distributed_shampoo
NestedMap = py_utils.NestedMap
InstantiableParams = py_utils.InstantiableParams
JTensor = pytypes.JTensor
NestedJTensor = pytypes.NestedJTensor
NestedParams = pytypes.NestedParams
# Initializes sharding spec for the optimizer state variables.
TransformInitPartitionSpecFn = Callable[[NestedParams],
Union[NestedParams,
Sequence[NestedParams]]]
# Extension of optax.GradientTransformation that supports spmd sharding and
# explicit annotation of sharding specs for the optimizer state variables.
@dataclasses.dataclass(frozen=True)
class ShardedGradientTransformation:
"""GradientTransformation that supports spmd."""
# init_fn and update_fn are the same as in optax.GradientTransformation
init: optax.TransformInitFn
update: optax.TransformUpdateFn
# Input is the sharding specs of the variables used in the forward
# computation. Output is the sharding specs of the optimizer state variables.
#
# Constraints: output from this function should be of identical structure as
# that of the init() function.
init_partition_spec: TransformInitPartitionSpecFn
GeneralGradientTransformation = Union[optax.GradientTransformation,
ShardedGradientTransformation]
def count_init_fn(_):
"""Common init_fn that initializes a count for global step."""
return NestedMap(count=jnp.zeros([], jnp.int32))
def count_init_partition_spec_fn(var_params):
"""Init partition spec for only partitioning the count/step."""
var_spec_flattened, _ = jax.tree_flatten(var_params)
assert var_spec_flattened
first_var = var_spec_flattened[0]
assert isinstance(first_var, py_utils.Params)
device_mesh = first_var.device_mesh
return NestedMap(
count=py_utils.weight_params(
shape=[],
init=None,
dtype=jnp.int32,
collections=None,
device_mesh=device_mesh,
tensor_split_dims_mapping=[]))
def sharded_sgd(learning_rate_fn: optax.Schedule, momentum: Optional[float],
nesterov: bool) -> ShardedGradientTransformation:
"""A canonical Stochastic Gradient Descent optimiser that supports spmd ...
sharding.
This implements stochastic gradient descent. It also includes support for
momentum, and nesterov acceleration, as these are standard practice when
using stochastic gradient descent to train deep neural networks.
References:
Sutskever et al, 2013: http://proceedings.mlr.press/v28/sutskever13.pdf
Args:
learning_rate_fn: a callable that given the current training step, returns
the learning rate to apply.
momentum: (default `None`), the `decay` rate used by the momentum term, when
it is set to `None`, then momentum is not used at all.
nesterov (default `False`): whether nesterov momentum is used.
Returns:
A `ShardedGradientTransformation`.
"""
# TODO(yonghui): support momentum.
assert momentum is None
del nesterov
def update_fn(updates, state, params=None):
del params
step_size = -1.0 * learning_rate_fn(state.count)
updates = jax.tree_map(lambda g: jnp.array(step_size, dtype=g.dtype) * g,
updates)
updated_states = NestedMap(count=state.count + jnp.array(1, jnp.int32))
return updates, updated_states
return ShardedGradientTransformation(
init=count_init_fn,
update=update_fn,
init_partition_spec=count_init_partition_spec_fn)
class _AdamOptState:
def __init__(self, *, m, v):
self.m = m
self.v = v
class _ShardedAdamHelper:
"""A helper class facilitates the creation of sharded_adam_optimizer."""
def opt_state_sharding_spec(self,
var_params: py_utils.Params) -> _AdamOptState:
"""Returns optimizer sharding spec for one particular variable."""
m_var_params = var_params.Copy()
m_var_params.init = None
v_var_params = var_params.Copy()
v_var_params.init = None
# m and v simply share the same sharding.
return _AdamOptState(m=m_var_params, v=v_var_params)
def init_opt_state(self, var_params: py_utils.Params) -> _AdamOptState:
"""Returns optimizer state for one particular variable."""
return _AdamOptState(
m=jnp.zeros_like(var_params), v=jnp.zeros_like(var_params))
def sanitize_values(self, array: JTensor, replacement: float = 0.0):
"""Sanitizes NaN and Infinity values."""
return jnp.nan_to_num(
array, nan=replacement, posinf=replacement, neginf=replacement)
def bias_corrected_decay(self, step: JTensor, decay: float) -> JTensor:
"""Incorporates bias correction into decay.
Please see section 7.1 in https://arxiv.org/pdf/1804.04235.pdf for the
derivation of the formulas below. With bias-corrected decay, we can simply
do
m_{t} = decay1 * m_{t-1} + (1 - decay1) * g
v_{t} = decay2 * v_{t-1} + (1 - decay2) * g ^ 2
without further bias correction.
Args:
step: current step, 0-based.
decay: the raw decay. As t -> infinity, bias corrected decay converges to
this value.
Returns:
Bias corrected decay.
"""
t = step.astype(jnp.float32) + 1.
return decay * (1. - jnp.power(decay, t - 1.)) / (1. - jnp.power(decay, t))
def update_moments(self, step: JTensor, update: JTensor,
moments: _AdamOptState, beta1: float,
beta2: float) -> _AdamOptState:
"""Updates momentum values."""
beta1_decay = self.bias_corrected_decay(step, beta1)
beta2_decay = self.bias_corrected_decay(step, beta2)
m = (1.0 - beta1_decay) * update + beta1_decay * moments.m
v = (1.0 - beta2_decay) * (update**2) + beta2_decay * moments.v
return _AdamOptState(m=m, v=v)
def clip_update(self, update: JTensor, clip_threshold: float) -> JTensor:
mean_update = self.sanitize_values(reduce_rms(update), 1.0)
clip_threshold = jnp.array(clip_threshold, dtype=update.dtype)
denom = jnp.maximum(1.0, mean_update / clip_threshold)
return update / denom
def sharded_chain(
*args: GeneralGradientTransformation) -> ShardedGradientTransformation:
"""Applies a list of (possibly sharded) chainable update transformations.
Given a sequence of chainable transforms, `sharded_chain` returns an `init_fn`
that constructs a `state` by concatenating the states of the individual
transforms, and returns an `update_fn` which chains the update transformations
feeding the appropriate state to each. In addition, it differs from the optax
`chain` function, by also supporting ShardedGradientTransformation by chaining
also the `init_partition_spec_fn`. If there are no
ShardedGradientTransformations in the chain, the sharding specs will be
None, meaning all the variables are replicated.
Args:
*args: a sequence of chainable GradientTransformations or
ShardedGradientTransformations or a combination of both.
Returns:
A single chained ShardedGradientTransformation.
"""
def init_fn(params):
return tuple(fn.init(params) for fn in args)
def update_fn(updates, state, params=None):
if len(args) != len(state):
raise ValueError('The number of updates and states has to be the same in '
'sharded chain.')
new_state = []
for s, fn in zip(state, args):
updates, new_s = fn.update(updates, s, params)
new_state.append(new_s)
return updates, tuple(new_state)
def init_partition_spec_fn(mdl_vars):
partition_specs = []
for fn in args:
init_partition_spec = getattr(fn, 'init_partition_spec', None)
if callable(init_partition_spec):
nmap = init_partition_spec(mdl_vars)
partition_specs.append(nmap)
else:
# Replicate the states.
partition_specs.append(None)
return tuple(partition_specs)
return ShardedGradientTransformation(
init=init_fn,
update=update_fn,
init_partition_spec=init_partition_spec_fn)
def sharded_masked(
inner: GeneralGradientTransformation, mask: Union[NestedParams,
Callable[[NestedParams],
NestedParams]]
) -> GeneralGradientTransformation:
"""Mask updates so only some are transformed, the rest are passed through.
This differs from the Optax version in that it supports sharding annotations.
Args:
inner: Inner transformation to mask.
mask: a PyTree with same structure as (or a prefix of) the params PyTree, or
a Callable that returns such a pytree given the params/updates. The leaves
should be booleans, ``True`` for leaves/subtrees you want to apply the
transformation to, and ``False`` for those you want to skip.
Returns:
New ShardedGradientTransformation wrapping ``inner``.
"""
def init_partition_spec_fn(mdl_vars):
init_partition_spec = getattr(inner, 'init_partition_spec', None)
if callable(init_partition_spec):
return init_partition_spec(mdl_vars)
grad_tx = optax.masked(inner, mask)
if not hasattr(inner, 'init_partition_spec'):
return grad_tx
else:
return ShardedGradientTransformation(
init=grad_tx.init,
update=grad_tx.update,
init_partition_spec=init_partition_spec_fn)
def apply_l2_weight_decay(
learning_rate_fn: optax.Schedule,
l2_regularizer_weight: Optional[float] = 0.
) -> ShardedGradientTransformation:
"""Applies L2 weight decay.
Args:
learning_rate_fn: An optax schedule that infers the lr given the step.
l2_regularizer_weight: Weight for L2 regularization.
Returns:
A ShardedGradientTransformation applying L2 weight decay.
"""
def update_fn(updates, state, params):
count = state.count
lr_multiplier = learning_rate_fn(count)
if l2_regularizer_weight:
if params is None:
raise ValueError('Params must not be empty when applying weight decay.')
updates = jax.tree_multimap(
lambda g, p: g - lr_multiplier * l2_regularizer_weight * p, updates,
params)
updated_state = NestedMap(count=count + 1)
return updates, updated_state
return ShardedGradientTransformation(
init=count_init_fn,
update=update_fn,
init_partition_spec=count_init_partition_spec_fn)
def apply_l1_weight_decay(
learning_rate_fn: optax.Schedule,
l1_regularizer_weight: Optional[float] = 0.
) -> ShardedGradientTransformation:
"""Applies L1 weight decay.
Args:
learning_rate_fn: An optax schedule that infers the lr given the step.
l1_regularizer_weight: Weight for L1 regularization.
Returns:
A ShardedGradientTransformation applying L1 weight decay.
"""
def update_fn(updates, state, params):
count = state.count
lr_multiplier = learning_rate_fn(count)
if l1_regularizer_weight:
if params is None:
raise ValueError('Params must not be empty when applying weight decay.')
updates = jax.tree_multimap(
lambda g, p: g - lr_multiplier * l1_regularizer_weight * jnp.sign(p),
updates, params)
updated_state = NestedMap(count=count + 1)
return updates, updated_state
return ShardedGradientTransformation(
init=count_init_fn,
update=update_fn,
init_partition_spec=count_init_partition_spec_fn)
def sharded_adam(learning_rate_fn: optax.Schedule, beta1: float, beta2: float,
epsilon: float, epsilon_root: float, update_capping: float,
weight_decay: float) -> ShardedGradientTransformation:
"""Standard Adam optimizer that also supports sharding.
This Adam optimizer supports optional update capping when update_capping is >
0. Update capping can help stabilizing model learning, avoiding excessive
updates when gradient variance estimate is stale (e.g. when data distribution
suddenly shifts).
Args:
learning_rate_fn: a callable that given the current training step, returns
the learning | |
# FST functionality tests
# Copyright (c) 2015, Qualcomm Atheros, Inc.
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import struct
import subprocess
import time
import os
import re
import hwsim_utils
from hwsim import HWSimRadio
import hostapd
from wpasupplicant import WpaSupplicant
import fst_test_common
import fst_module_aux
from utils import alloc_fail, HwsimSkip
#enum - bad parameter types
bad_param_none = 0
bad_param_session_add_no_params = 1
bad_param_group_id = 2
bad_param_session_set_no_params = 3
bad_param_session_set_unknown_param = 4
bad_param_session_id = 5
bad_param_old_iface = 6
bad_param_new_iface = 7
bad_param_negative_llt = 8
bad_param_zero_llt = 9
bad_param_llt_too_big = 10
bad_param_llt_nan = 11
bad_param_peer_addr = 12
bad_param_session_initiate_no_params = 13
bad_param_session_initiate_bad_session_id = 14
bad_param_session_initiate_with_no_new_iface_set = 15
bad_param_session_initiate_with_bad_peer_addr_set = 16
bad_param_session_initiate_request_with_bad_stie = 17
bad_param_session_initiate_response_with_reject = 18
bad_param_session_initiate_response_with_bad_stie = 19
bad_param_session_initiate_response_with_zero_llt = 20
bad_param_session_initiate_stt_no_response = 21
bad_param_session_initiate_concurrent_setup_request = 22
bad_param_session_transfer_no_params = 23
bad_param_session_transfer_bad_session_id = 24
bad_param_session_transfer_setup_skipped = 25
bad_param_session_teardown_no_params = 26
bad_param_session_teardown_bad_session_id = 27
bad_param_session_teardown_setup_skipped = 28
bad_param_session_teardown_bad_fsts_id = 29
bad_param_names = ("None",
"No params passed to session add",
"Group ID",
"No params passed to session set",
"Unknown param passed to session set",
"Session ID",
"Old interface name",
"New interface name",
"Negative LLT",
"Zero LLT",
"LLT too big",
"LLT is not a number",
"Peer address",
"No params passed to session initiate",
"Session ID",
"No new_iface was set",
"Peer address",
"Request with bad st ie",
"Response with reject",
"Response with bad st ie",
"Response with zero llt",
"No response, STT",
"Concurrent setup request",
"No params passed to session transfer",
"Session ID",
"Session setup skipped",
"No params passed to session teardown",
"Bad session",
"Session setup skipped",
"Bad fsts_id")
def fst_start_session(apdev, test_params, bad_param_type, start_on_ap,
peer_addr = None):
"""This function makes the necessary preparations and the adds and sets a
session using either correct or incorrect parameters depending on the value
of bad_param_type. If the call ends as expected (with session being
successfully added and set in case of correct parameters or with the
expected exception in case of incorrect parameters), the function silently
exits. Otherwise, it throws an exception thus failing the test."""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
bad_parameter_detected = False
exception_already_raised = False
try:
fst_module_aux.connect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
if start_on_ap:
initiator = ap1
responder = sta1
new_iface = ap2.ifname()
new_peer_addr = ap2.get_actual_peer_addr()
else:
initiator = sta1
responder = ap1
new_iface = sta2.ifname()
new_peer_addr = sta2.get_actual_peer_addr()
initiator.add_peer(responder, peer_addr, new_peer_addr)
group_id = None
if bad_param_type == bad_param_group_id:
group_id = '-1'
elif bad_param_type == bad_param_session_add_no_params:
group_id = ''
initiator.set_fst_parameters(group_id=group_id)
sid = initiator.add_session()
if bad_param_type == bad_param_session_set_no_params:
res = initiator.set_session_param(None)
if not res.startswith("OK"):
raise Exception("Session set operation failed")
elif bad_param_type == bad_param_session_set_unknown_param:
res = initiator.set_session_param("bad_param=1")
if not res.startswith("OK"):
raise Exception("Session set operation failed")
else:
if bad_param_type == bad_param_session_initiate_with_no_new_iface_set:
new_iface = None
elif bad_param_type == bad_param_new_iface:
new_iface = 'wlan12'
old_iface = None if bad_param_type != bad_param_old_iface else 'wlan12'
llt = None
if bad_param_type == bad_param_negative_llt:
llt = '-1'
elif bad_param_type == bad_param_zero_llt:
llt = '0'
elif bad_param_type == bad_param_llt_too_big:
llt = '4294967296' #0x100000000
elif bad_param_type == bad_param_llt_nan:
llt = 'nan'
elif bad_param_type == bad_param_session_id:
sid = '-1'
initiator.set_fst_parameters(llt=llt)
initiator.configure_session(sid, new_iface, old_iface)
except Exception, e:
if e.args[0].startswith("Cannot add FST session with groupid"):
if bad_param_type == bad_param_group_id or bad_param_type == bad_param_session_add_no_params:
bad_parameter_detected = True
elif e.args[0].startswith("Cannot set FST session new_ifname:"):
if bad_param_type == bad_param_new_iface:
bad_parameter_detected = True
elif e.args[0].startswith("Session set operation failed"):
if (bad_param_type == bad_param_session_set_no_params or
bad_param_type == bad_param_session_set_unknown_param):
bad_parameter_detected = True
elif e.args[0].startswith("Cannot set FST session old_ifname:"):
if (bad_param_type == bad_param_old_iface or
bad_param_type == bad_param_session_id or
bad_param_type == bad_param_session_set_no_params):
bad_parameter_detected = True
elif e.args[0].startswith("Cannot set FST session llt:"):
if (bad_param_type == bad_param_negative_llt or
bad_param_type == bad_param_llt_too_big or
bad_param_type == bad_param_llt_nan):
bad_parameter_detected = True
elif e.args[0].startswith("Cannot set FST session peer address:"):
if bad_param_type == bad_param_peer_addr:
bad_parameter_detected = True
if not bad_parameter_detected:
# The exception was unexpected
logger.info(e)
exception_already_raised = True
raise
finally:
fst_module_aux.disconnect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
if not exception_already_raised:
if bad_parameter_detected:
logger.info("Success. Bad parameter was detected (%s)" % bad_param_names[bad_param_type])
else:
if bad_param_type == bad_param_none or bad_param_type == bad_param_zero_llt:
logger.info("Success. Session added and set")
else:
exception_text = ""
if bad_param_type == bad_param_peer_addr:
exception_text = "Failure. Bad parameter was not detected (Peer address == %s)" % ap1.get_new_peer_addr()
else:
exception_text = "Failure. Bad parameter was not detected (%s)" % bad_param_names[bad_param_type]
raise Exception(exception_text)
else:
logger.info("Failure. Unexpected exception")
def fst_initiate_session(apdev, test_params, bad_param_type, init_on_ap):
"""This function makes the necessary preparations and then adds, sets and
initiates a session using either correct or incorrect parameters at each
stage depending on the value of bad_param_type. If the call ends as expected
(with session being successfully added, set and initiated in case of correct
parameters or with the expected exception in case of incorrect parameters),
the function silently exits. Otherwise it throws an exception thus failing
the test."""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
bad_parameter_detected = False
exception_already_raised = False
try:
fst_module_aux.connect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
# This call makes sure FstHostapd singleton object is created and, as a
# result, the global control interface is registered (this is done from
# the constructor).
ap1.get_global_instance()
if init_on_ap:
initiator = ap1
responder = sta1
new_iface = ap2.ifname() if bad_param_type != bad_param_session_initiate_with_no_new_iface_set else None
new_peer_addr = ap2.get_actual_peer_addr()
resp_newif = sta2.ifname()
else:
initiator = sta1
responder = ap1
new_iface = sta2.ifname() if bad_param_type != bad_param_session_initiate_with_no_new_iface_set else None
new_peer_addr = sta2.get_actual_peer_addr()
resp_newif = ap2.ifname()
peeraddr = None if bad_param_type != bad_param_session_initiate_with_bad_peer_addr_set else '10:DE:AD:DE:AD:11'
initiator.add_peer(responder, peeraddr, new_peer_addr)
if bad_param_type == bad_param_session_initiate_response_with_zero_llt:
initiator.set_fst_parameters(llt='0')
sid = initiator.add_session()
initiator.configure_session(sid, new_iface)
if bad_param_type == bad_param_session_initiate_no_params:
sid = ''
elif bad_param_type == bad_param_session_initiate_bad_session_id:
sid = '-1'
if bad_param_type == bad_param_session_initiate_request_with_bad_stie:
actual_fsts_id = initiator.get_fsts_id_by_sid(sid)
initiator.send_test_session_setup_request(str(actual_fsts_id), "bad_new_band")
responder.wait_for_session_event(5)
elif bad_param_type == bad_param_session_initiate_response_with_reject:
initiator.send_session_setup_request(sid)
initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
setup_event = responder.wait_for_session_event(5, [],
['EVENT_FST_SETUP'])
if not 'id' in setup_event:
raise Exception("No session id in FST setup event")
responder.send_session_setup_response(str(setup_event['id']),
"reject")
event = initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
if event['new_state'] != "INITIAL" or event['reason'] != "REASON_REJECT":
raise Exception("Response with reject not handled as expected")
bad_parameter_detected = True
elif bad_param_type == bad_param_session_initiate_response_with_bad_stie:
initiator.send_session_setup_request(sid)
initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
responder.wait_for_session_event(5, [], ['EVENT_FST_SETUP'])
actual_fsts_id = initiator.get_fsts_id_by_sid(sid)
responder.send_test_session_setup_response(str(actual_fsts_id),
"accept", "bad_new_band")
event = initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
if event['new_state'] != "INITIAL" or event['reason'] != "REASON_ERROR_PARAMS":
raise Exception("Response with bad STIE not handled as expected")
bad_parameter_detected = True
elif bad_param_type == bad_param_session_initiate_response_with_zero_llt:
initiator.initiate_session(sid, "accept")
event = initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
if event['new_state'] != "TRANSITION_DONE":
raise Exception("Response reception for a session with llt=0 not handled as expected")
bad_parameter_detected = True
elif bad_param_type == bad_param_session_initiate_stt_no_response:
initiator.send_session_setup_request(sid)
initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
responder.wait_for_session_event(5, [], ['EVENT_FST_SETUP'])
event = initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
if event['new_state'] != "INITIAL" or event['reason'] != "REASON_STT":
raise Exception("No response scenario not handled as expected")
bad_parameter_detected = True
elif bad_param_type == bad_param_session_initiate_concurrent_setup_request:
responder.add_peer(initiator)
resp_sid = responder.add_session()
responder.configure_session(resp_sid, resp_newif)
initiator.send_session_setup_request(sid)
actual_fsts_id = initiator.get_fsts_id_by_sid(sid)
responder.send_test_session_setup_request(str(actual_fsts_id))
event = initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
initiator_addr = initiator.get_own_mac_address()
responder_addr = responder.get_own_mac_address()
if initiator_addr < responder_addr:
event = initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
if event['new_state'] != "INITIAL" or event['reason'] != "REASON_SETUP":
raise Exception("Concurrent setup scenario not handled as expected")
event = initiator.wait_for_session_event(5, [], ["EVENT_FST_SETUP"])
# The incoming setup request received by the initiator has
# priority over the one sent previously by the initiator itself
# because the initiator's MAC address is numerically lower than
# the one of the responder. Thus, the initiator should generate
# an FST_SETUP event.
else:
event = initiator.wait_for_session_event(5, [], ["EVENT_FST_SESSION_STATE"])
if event['new_state'] != "INITIAL" or event['reason'] != "REASON_STT":
raise Exception("Concurrent setup scenario not handled as expected")
# The incoming setup request was dropped at the initiator
# because its MAC address is numerically bigger than the one of
# the responder. Thus, the initiator continue to wait for a
# setup response until the STT event fires.
bad_parameter_detected = True
else:
initiator.initiate_session(sid, "accept")
except Exception, e:
if e.args[0].startswith("Cannot initiate fst session"):
if bad_param_type != bad_param_none:
bad_parameter_detected = True
elif e.args[0].startswith("No FST-EVENT-SESSION received"):
if bad_param_type == bad_param_session_initiate_request_with_bad_stie:
bad_parameter_detected = True
if not bad_parameter_detected:
#The exception was unexpected
logger.info(e)
exception_already_raised = True
raise
finally:
fst_module_aux.disconnect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
if not exception_already_raised:
if bad_parameter_detected:
logger.info("Success. Bad parameter was detected (%s)" % bad_param_names[bad_param_type])
else:
if bad_param_type == bad_param_none:
logger.info("Success. Session initiated")
else:
raise Exception("Failure. Bad parameter was not detected (%s)" % bad_param_names[bad_param_type])
else:
logger.info("Failure. Unexpected exception")
def fst_transfer_session(apdev, test_params, bad_param_type, init_on_ap,
| |
child_node_set
# #print "parent_loop", parent_loop, "parent", node.parent
# if (parent_loop is not None and
# not parent_loop.loop_variant_set.intersection(child_node_set)):
# #print "pull up loop invariant", assign_alias
# parent_loop.parent.insert_before(parent_loop, assign_alias)
# else:
# insert_block, insert_marker = self.get_insert_block_and_point(node)
# insert_block.insert_before(insert_marker, assign_alias)
# node.parent.replace(node, alias)
def analyzeGetAttrNode(self, node):
if not self.options.alias_invariants:
return
# fixme: only handle the trivial case for now
# simplifies the protocol for making up alias names
if type(node.expression) != ast.IdentifierNode:
return
scope = self.get_parent_scope(node)
alias = scope.aliased_expression_map.get(node)
if not alias:
if node.expression.name[0] != '_':
alias_format = '_%s_%s'
else:
alias_format = '%s_%s'
alias_name = alias_format % (node.expression.name, node.name)
if alias_name in scope.alias_name_set:
print "duplicate alias_name", alias_name
print "scope", scope
print "scope.alias_name_set", scope.alias_name_set
print " ".join("scope.aliased_expression_map",
scope.aliased_expression_map)
return
alias = ast.IdentifierNode(alias_name)
scope.alias_name_set.add(alias_name)
scope.aliased_expression_map[node] = alias
assign_alias = ast.AssignNode(alias, node)
parent_loop = _get_parent_loop(node)
# fixme: check to see if this expression is loop-invariant
# must add a test case for this
child_node_set = set(node.getChildNodes())
#print "child_node_set", child_node_set
#print "parent_loop", parent_loop, "parent", node.parent
if (self.options.inline_hoist_loop_invariant_aliases and
parent_loop is not None and
not parent_loop.loop_variant_set.intersection(
child_node_set)):
# print "pull up loop invariant", assign_alias
parent_loop.parent.insert_before(parent_loop, assign_alias)
else:
insert_block, insert_marker = self.get_insert_block_and_point(
node)
insert_block.insert_before(insert_marker, assign_alias)
node.parent.replace(node, alias)
def analyzeIfNode(self, if_node):
self.visit_ast(if_node.test_expression, if_node)
for n in if_node.child_nodes:
self.visit_ast(n, if_node)
for n in if_node.else_.child_nodes:
self.visit_ast(n, if_node.else_)
parent_scope = self.get_parent_scope(if_node)
if_scope_vars = if_node.scope.local_identifiers
# once both branches are optimized, walk the scopes for any variables
# that are defined in both places. those will be promoted to function
# scope since it is safe to assume that those will defined fixme: this
# feels like a bit of hack - but not sure how to do this correctly
# without reverting to slower performance for almost all calls to
# resolve_placeholder.
#
# it seems like certain optimizations need to be hoisted up to the
# parent scope. this is particularly the case when you are aliasing
# common functions that are likely to occur in the parent scope after
# the conditional block. you *need* to hoist those, or you will have
# errors when the branch fails. essentially you have to detect and hoist
# 'branch invariant' optimizations.
#
# TODO: we can try to hoist up invariants if they don't depend on the
# condition. this is somewhat hard to know, so the best way to do so
# without multiple passes of the optimizer is to hoist only things that
# were already defined in the parent scope - like _buffer, or things on
# self.
if if_node.else_.child_nodes:
common_local_identifiers = (if_scope_vars
& if_node.else_.scope.local_identifiers)
# The set of nodes that are not defined in both the if and else
# branches.
partial_local_identifiers = (
(if_scope_vars ^ if_node.else_.scope.local_identifiers)
| if_node.scope.partial_local_identifiers
| if_node.else_.scope.partial_local_identifiers)
common_alias_name_set = (if_node.scope.alias_name_set
& if_node.else_.scope.alias_name_set)
# Only promote aliased expressions to the parent scope when the
# alias would be used in both the if and else branches.
common_aliased_expression_map = _get_common_aliased_expression_map(
if_node.scope, if_node.else_.scope)
parent_scope.local_identifiers.update(common_local_identifiers)
parent_scope.alias_name_set.update(common_alias_name_set)
parent_scope.aliased_expression_map.update(
common_aliased_expression_map)
else:
partial_local_identifiers = if_scope_vars
non_parent_scope_identifiers = (
partial_local_identifiers - parent_scope.local_identifiers)
parent_scope.partial_local_identifiers.update(
non_parent_scope_identifiers)
# Any variable considered dirty in an if or else block should be dirty
# in the parent scope as well.
if_dirty_identifiers = if_node.scope.dirty_local_identifiers
else_dirty_identifiers = if_node.else_.scope.dirty_local_identifiers
parent_scope.dirty_local_identifiers.update(if_dirty_identifiers)
parent_scope.dirty_local_identifiers.update(else_dirty_identifiers)
def analyzeBinOpNode(self, n):
# if you are trying to use short-circuit behavior, these two
# optimizations can sabotage correct execution since the rhs may be
# hoisted above the ast.IfNode and cause it to get executed prior to
# passing the lhs check.
should_visit_left = True
and_or_operator = n.operator in ('and', 'or')
if and_or_operator:
self.binop_count += 1
cache_placeholders = self.options.cache_resolved_placeholders
cache_udn_expressions = self.options.cache_resolved_udn_expressions
# If this is the first binop, we can visit the LHS since that must
# always be executed.
if self.binop_count == _BINOP_FIRST_PASS:
self.visit_ast(n.left, n)
should_visit_left = False
self.options.cache_resolved_placeholders = False
self.options.cache_resolved_udn_expressions = False
if should_visit_left:
self.visit_ast(n.left, n)
self.visit_ast(n.right, n)
if and_or_operator:
self.binop_count -= 1
self.options.cache_resolved_placeholders = cache_placeholders
self.options.cache_resolved_udn_expressions = cache_udn_expressions
analyzeBinOpExpressionNode = analyzeBinOpNode
def analyzeUnaryOpNode(self, op_node):
self.visit_ast(op_node.expression, op_node)
def analyzeGetUDNNode(self, node):
if not self.options.prefer_whole_udn_expressions:
self.visit_ast(node.expression, node)
# If self._filter_function is created in a macro, make sure we rename
# it.
self_node = ast.IdentifierNode('self')
if node.expression == self_node and node.name == '_filter_function':
alias = ast.IdentifierNode('_self_private_filter_function',
pos=node.pos)
node.parent.replace(node, alias)
return
# If self.filter_function is created in a macro, make sure we rename it.
if node.expression == self_node and node.name == 'filter_function':
alias = ast.IdentifierNode('_self_filter_function', pos=node.pos)
node.parent.replace(node, alias)
return
if self.options.cache_resolved_udn_expressions:
cached_udn = ast.IdentifierNode(
_generate_cached_resolved_placeholder(node),
pos=node.pos)
(local_identifiers, _, _) = _get_local_identifiers(node)
if cached_udn in local_identifiers:
node.parent.replace(node, cached_udn)
else:
insert_block, insert_marker = self.get_insert_block_and_point(
node)
# if there is a reassignment in the parent block, don't cache
# this incase it needs to be re-resolved.
# #set $text = $text.replace('\r\n', '\n')
# #set $text = $text.replace('\t', ' ')
# in this example, if you cache the udn expression text.replace,
# you have a problem - you won't ever use the new string create
# by the first call to replace
for child_node in insert_block.child_nodes:
if (isinstance(child_node, ast.AssignNode) and
child_node.left == node.expression):
return
scope = self.get_parent_scope(node)
scope.alias_name_set.add(cached_udn.name)
scope.aliased_expression_map[node] = cached_udn
# note: this is sketchy enough that it requires some explanation
# basically, you need to visit the node for the parent function
# to get the memo that this value is aliased. unfortunately, the
# naive case of just calling visit_ast blows up since it tries
# to double analyze a certain set of nodes. you only really need
# to analyze that the assignment took place, then you can safely
# alias the actual function call. definitely sketchy, but it
# does seem to work
assign_rph = ast.AssignNode(cached_udn, None, pos=node.pos)
cached_udn.parent = assign_rph
insert_block.insert_before(insert_marker, assign_rph)
self.visit_ast(assign_rph, insert_block)
assign_rph.right = node
node.parent.replace(node, cached_udn)
elif self.options.prefer_whole_udn_expressions:
self.visit_ast(node.expression, node)
def analyzeSliceNode(self, pnode):
self.visit_ast(pnode.expression, pnode)
self.visit_ast(pnode.slice_expression, pnode)
# a second pass over the optimized tree to hoist invariant aliases to their
# parent blocks
class FinalPassAnalyzer(_BaseAnalyzer):
def analyzeTemplateNode(self, template):
self.visit_ast(template.main_function, template)
for n in template.child_nodes:
self.visit_ast(n, template)
def analyzeFunctionNode(self, function):
for n in function.child_nodes:
self.visit_ast(n, function)
if self.options.batch_buffer_writes:
self.collect_writes(function)
def analyzeForNode(self, for_node):
for n in for_node.child_nodes:
self.visit_ast(n, for_node)
self.reanalyzeLoopNode(for_node)
if self.options.batch_buffer_writes:
self.collect_writes(for_node)
def analyzeIfNode(self, if_node):
# depth-first
for n in if_node.child_nodes:
self.visit_ast(n, if_node)
for n in if_node.else_.child_nodes:
self.visit_ast(n, if_node.else_)
self.reanalyzeConditionalNode(if_node)
self.reanalyzeConditionalNode(if_node.else_)
if self.options.batch_buffer_writes:
self.collect_writes(if_node)
self.collect_writes(if_node.else_)
def analyzeBufferWrite(self, buffer_write):
"""Perform ast.BufferWrite optimizations.
Do this in the final pass optimizer to make sure that the
optimization is handled after caching placeholders.
"""
self.visit_ast(buffer_write.expression, buffer_write)
# All filterning is done before writing to the buffer. If the function
# output needed filtering then it would be wrapped in a ast.FilterNode.
if isinstance(buffer_write.expression, ast.CallFunctionNode):
buffer_write.expression.sanitization_state = (
ast.SanitizedState.OUTPUTTED_IMMEDIATELY)
def hoist(self, parent_node, parent_block, insertion_point, alias_node,
assign_alias_node):
# prune the implementation in the nested block
# print "prune", alias_node
# print " ".join("parent_block aliases",
# parent_block.scope.aliased_expression_map)
parent_node.remove(assign_alias_node)
# if we've already hoisted an assignment, don't do it again
if alias_node not in parent_block.scope.hoisted_aliases:
# prune the original implementation in the current block and
# reinsert the alias before it's first potential usage if it
# is needed earlier in the execution path.
# when a variable aliased in both the if and
# else blocks is promoted to the parent scope
# the implementation isn't actually hoisted (should it be?)
# inline with the ast.IfNode optimization so we need to check if the
# node is already here
if assign_alias_node in parent_block.child_nodes:
current_pos = parent_block.child_nodes.index(assign_alias_node)
# an else node's parent is the ast.IfNode, which is the relevant
# node when searching for the insertion point
needed_pos = parent_block.child_nodes.index(insertion_point)
if needed_pos < current_pos:
parent_block.child_nodes.remove(assign_alias_node)
if isinstance(parent_node, ast.ElseNode):
parent_block.insert_before(parent_node.parent,
assign_alias_node)
else:
parent_block.insert_before(parent_node,
assign_alias_node)
# print "insert_before", alias_node
else:
# still need to insert the alias
parent_block.insert_before(insertion_point, assign_alias_node)
parent_block.scope.hoisted_aliases.append(alias_node)
# NOTE: once we hoist an expression, we need to make sure that we no
# longer use this for dependencies in the current scope
del parent_node.scope.aliased_expression_map[alias_node]
parent_node.scope.alias_name_set.remove(assign_alias_node.left.name)
# FIXME: this is probably an indication of a bug or unnecessary
# difference between the caching of | |
<reponame>MichaelHopwood/HopML
# Source
import itertools
from sklearn.linear_model import LinearRegression, RANSACRegressor
import numpy as np
from sklearn.metrics import mean_squared_error, r2_score
import pandas as pd
from scipy import stats
from statsmodels.stats.outliers_influence import variance_inflation_factor
import statsmodels.api as sm
def _array_from_df(df, X_parameters):
return np.vstack([df[xcol].values for xcol in X_parameters]).T
class Model:
"""Linear model kernel
"""
def __init__(self, estimators=None):
self.train_index = None
self.test_index = None
# Other solvers, like RANSAC or THEIL SEN can be added by user
self.estimators = estimators or {'OLS':
{'estimator': LinearRegression()},
'RANSAC':
{'estimator': RANSACRegressor()}
}
def train(self):
"""
Train the model.
"""
if self.verbose >= 1:
print("\nBegin training.")
for name, info in self.estimators.items():
info['estimator'].fit(self.train_X, self.train_y)
self._evaluate(name, info, self.train_X, self.train_y)
def _evaluate(self, name, info, X, y, data_split='train'):
"""Evaluate the model.
"""
pred = info['estimator'].predict(X)
mse = mean_squared_error(y, pred)
r2 = r2_score(y, pred)
try:
coeffs = info['estimator'].coef_
except AttributeError:
coeffs = None
if not isinstance(coeffs, type(None)):
# @dev: Compare p-value & CI calculations to one of
# @dev: an open source package as a validation step.
# @dev: Beware that the evaluation uses OLS and may
# @dev: be mismatched if user inputs other regressors.
Xnew = pd.DataFrame(X, columns=self.variate_names)
Xnew = sm.add_constant(Xnew)
est = sm.OLS(y, Xnew)
est2 = est.fit()
statsdf = est2.summary()
# @dev the below snippet calculates statistical parameters
# @dev using base numpy. This is commented out because we
# @dev have opted to use the statsmodels package.
# params = np.append(info['estimator'].intercept_, coeffs)
# # Check variable significanace
# newX = pd.DataFrame({"Constant": np.ones(
# len(X)
# )}).join(pd.DataFrame(X))
# MSE = (sum((y-pred)**2))/(len(newX)-len(newX.columns))
# try:
# var_b = MSE * np.linalg.inv(np.dot(newX.T,
# newX)
# ).diagonal()
# except np.linalg.LinAlgError:
# # Add random noise to avoid a LinAlg error
# newX += 0.00001*np.random.rand(*newX.shape)
# var_b = MSE * np.linalg.inv(np.dot(newX.T,
# newX)
# ).diagonal()
# sd_b = np.sqrt(var_b)
# ts_b = params / sd_b
# p_values = [2*(1-stats.t.cdf(np.abs(i),
# (len(newX)-newX.shape[1])))
# for i in ts_b]
# vif = [variance_inflation_factor(newX.values, i)
# for i in range(newX.shape[1])]
# lb_ci = params - sd_b * 1.96 # percentile: 0.025
# ub_ci = params + sd_b * 1.96 # percentile: 0.975
# # Round
# sd_b = np.round(sd_b, 3)
# ts_b = np.round(ts_b, 3)
# p_values = np.round(p_values, 3)
# params = np.round(params, 4)
# lb_ci = np.round(lb_ci, 4)
# ub_ci = np.round(ub_ci, 4)
# statsdf = pd.DataFrame()
# (statsdf["coef"],
# statsdf["std err"],
# statsdf["t"],
# statsdf["P>|t|"],
# statsdf["[0.025"],
# statsdf["0.975]"],
# statsdf["vif"]
# ) = [params,
# sd_b,
# ts_b,
# p_values,
# lb_ci,
# ub_ci,
# vif
# ]
# statsdf.index = ["constant"] + list(self.variate_names)
else:
statsdf = None
if self.verbose >= 1:
print(f'[{name}] Mean squared error: %.2f'
% mse)
print(f'[{name}] Coefficient of determination: %.2f'
% r2)
# Display coefficients
if not isinstance(coeffs, type(None)):
print(f'[{name}] {len(coeffs)} coefficient trained.')
else:
# For RANSAC and others
pass
if self.verbose >= 2:
# The coefficients
if not isinstance(coeffs, type(None)):
if data_split == 'train':
print(statsdf)
else:
# For RANSAC and others
pass
if data_split == 'train':
info['train_index'] = self.train_index
info['train_prediction'] = pred
info['train_X'] = X
info['train_y'] = y
info['train_eval'] = {'mse': mse, 'r2': r2, 'statsdf': statsdf}
elif data_split == 'test':
info['test_index'] = self.test_index
info['test_prediction'] = pred
info['test_X'] = X
info['test_y'] = y
info['test_eval'] = {'mse': mse, 'r2': r2, 'statsdf': statsdf}
def predict(self):
"""Predict using the model.
"""
if self.verbose >= 1:
print("\nBegin testing.")
for name, info in self.estimators.items():
self._evaluate(name, info, self.test_X,
self.test_y, data_split='test')
def _map_season(df_index):
# Derived from https://stackoverflow.com/questions/44526662/group-data-by-season-according-to-the-exact-dates
spring = range(80, 172)
summer = range(172, 264)
fall = range(264, 355)
def _season(x):
if x in spring:
return 1
if x in summer:
return 2
if x in fall:
return 3
else:
return 0
return df_index.dayofyear.map(_season)
class TimeWeightedProcess:
"""Generate time-oriented dummy variables for linear regression. Available timeframes
include "month", "season", and "hour".
"""
def __init__(self, verbose=0):
self.verbose = verbose
self.set_time_bins = None
def time_weight(self, X, time_weighted='season', data_split='train'):
if time_weighted == 'month':
if data_split == 'train':
time_bins = self.train_index.month
elif data_split == 'test':
time_bins = self.test_index.month
if time_weighted == 'season':
if data_split == 'train':
time_bins = _map_season(self.train_index)
elif data_split == 'test':
time_bins = _map_season(self.test_index)
elif time_weighted == 'hour':
if data_split == 'train':
time_bins = self.train_index.hour
elif data_split == 'test':
time_bins = self.test_index.hour
if data_split == 'train':
self.set_time_bins = set(time_bins)
elif data_split == 'test' and not isinstance(self.set_time_bins, set):
raise Exception(
"Must construct train before constructing test " +
"if using the TimeWeightedProcess.")
if self.verbose >= 1:
print(data_split, set(time_bins))
df = pd.DataFrame()
df['time_bins'] = time_bins
indices = {}
for group in self.set_time_bins:
indices[group] = df[df["time_bins"] == group].index
new_variable_names = []
for ii, param in enumerate(self.variate_names):
df[f"col_{ii}"] = X[:, ii]
# add groups
for group in self.set_time_bins:
vals = np.zeros(len(df))
vals[indices[group]] = df.iloc[indices[group]][f"col_{ii}"]
df[f"col_{ii}_{group}"] = vals
new_variable_names.append(f"{param} | {time_weighted}:{group}")
# remove original data
del df[f"col_{ii}"]
del df["time_bins"]
xs = df.values
self.variate_names = new_variable_names
return xs
class DefaultModel(Model, TimeWeightedProcess):
"""Generate a simple model using the input data, without any data transposition.
"""
def __init__(self, time_weighted=None, estimators=None, verbose=0, X_parameters=[]):
super().__init__(estimators)
self.verbose = verbose
self.time_weighted = time_weighted
self.X_parameters = X_parameters
def construct(self, X, y, data_split='train'):
self.variate_names = self.X_parameters
if not isinstance(self.time_weighted, type(None)):
X = self.time_weight(
X, time_weighted=self.time_weighted, data_split=data_split)
if data_split == 'train':
self.train_X = X
self.train_y = y
elif data_split == 'test':
self.test_X = X
self.test_y = y
class PolynomialModel(Model, TimeWeightedProcess):
"""Add all interactions between terms with a degree.
"""
def __init__(self, degree=2,
estimators=None,
time_weighted=None,
verbose=0,
X_parameters=[]):
super().__init__(estimators)
self.degree = degree
self.time_weighted = time_weighted
self.verbose = verbose
self.X_parameters = X_parameters
def construct(self, X, y, data_split='train'):
num_inputs = X.shape[1]
# add column of rows in first index of matrix
xs = np.array(X)
# construct identity matrix
iden_matrix = []
for i in range(num_inputs):
# create np.array of np.zeros
row = np.zeros(num_inputs, dtype=int)
# add 1 to diagonal index
row[i] = 1
iden_matrix.append(row)
# gather list
all_combinations = []
for degree in range(1, self.degree + 1):
all_combinations.append(itertools.combinations_with_replacement(
iden_matrix, degree))
# list of polynomial powers
poly_powers = []
self.variate_names = []
for combinations in all_combinations:
for combination in combinations:
sum_arr = np.zeros(num_inputs, dtype=int)
sum_arr += sum((np.array(j) for j in combination))
s = ""
has_first_term = False
for idx, p in enumerate(sum_arr):
if p == 0:
continue
if has_first_term:
s += " * "
else:
has_first_term = True
s += r"{}^{}".format(self.X_parameters[idx], p)
self.variate_names.append(s)
poly_powers.append(sum_arr)
self.powers = poly_powers
# Raise data to specified degree pattern and stack
A = []
for power in poly_powers:
product = (xs**power).prod(1)
A.append(product.reshape(product.shape + (1,)))
A = np.hstack(np.array(A))
if not isinstance(self.time_weighted, type(None)):
A = self.time_weight(
A, time_weighted=self.time_weighted, data_split=data_split)
if data_split == 'train':
self.train_X = A
self.train_y = y
elif data_split == 'test':
self.test_X = A
self.test_y = y
return
def modeller(prod_col_dict,
kernel_type='default',
time_weighted='month',
X_parameters=[],
Y_parameter=None,
estimators=None,
prod_df=None,
test_split=0.2,
train_df=None,
test_df=None,
degree=3,
verbose=0):
"""Wrapper method to conduct the modelling of the timeseries data.
To input the data, there are two options.
Option 1: include full production data in `prod_df` parameter and `test_split` so
that the test split is conducted
Option 2: conduct the test-train split prior to calling the function and pass in data
under `test_df` and `train_df`
Parameters
----------
prod_col_dict: dict of {str : str}
A dictionary that contains the column names relevant
for the production data
- **siteid** (*string*), should be assigned to
site-ID column name in prod_df
- **timestamp** (*string*), should be assigned to
time-stamp column name in prod_df
- **irradiance** (*string*), should be assigned to
irradiance column name in prod_df, where data
should be in [W/m^2]
- **baseline** (*string*), should be assigned to
preferred column name to capture model calculations
in prod_df
- **dcsize**, (*string*), should be assigned to
preferred column name for site capacity in prod_df
kernel_type : str
Type of kernel type for the statistical model
- 'default', establishes a kernel where one component is instantiated
in the model for each feature.
- 'polynomial', a paraboiloidal polynomial with a dynamic number of
covariates (Xs) and degrees (n). For example, with 2 covariates and a
degree of 2, the formula would be:
Y(α , X) = α_0 + α_1 X_1 + | |
from typing import Dict, Iterator, Optional, Tuple, Union
import enum
import pathlib
import time
import warnings
from ctypes import CDLL, c_int
from wyzecam.api_models import WyzeAccount, WyzeCamera
try:
import av
import av.video.frame
except ImportError:
av = None
try:
import cv2
except ImportError:
cv2 = None
try:
import numpy as np
except ImportError:
np = None # type: ignore
from wyzecam.tutk import tutk
from wyzecam.tutk.tutk_ioctl_mux import TutkIOCtrlMux
from wyzecam.tutk.tutk_protocol import (
K10000ConnectRequest,
K10056SetResolvingBit,
respond_to_ioctrl_10001,
)
class WyzeIOTC:
"""Wyze IOTC singleton, used to construct iotc_sessions
This object should generally be used inside a context manager, i.e.:
```python
with WyzeIOTC() as wyze:
with wyze.connect_and_auth(account, camera) as session:
... # send commands to the camera, then start streaming
```
:var tutk_platform_lib: the underlying c library used to communicate with the wyze
device; see [wyzecam.tutk.tutk.load_library][]
:var udp_port: the UDP port used on this machine for communication with wyze cameras on the same network
:vartype udp_port: int
:var max_num_av_channels: the maximum number of simultaneous sessions this object supports.
:vartype max_num_av_channels: int
:var version: the version of the underyling `tutk_platform_lib`
"""
def __init__(
self,
tutk_platform_lib: Optional[Union[str, CDLL]] = None,
udp_port: Optional[int] = None,
max_num_av_channels: Optional[int] = None,
) -> None:
"""Construct a WyzeIOTC session object
You should only create one of these at a time.
:param tutk_platform_lib: The underlying c library (from tutk.load_library()), or the path
to this library.
:param udp_port: Specify a UDP port. Random UDP port is used if it is specified as 0.
:param max_num_av_channels: The max number of AV channels. If it is specified
less than 1, AV will set max number of AV channels as 1.
"""
if tutk_platform_lib is None:
tutk_platform_lib = tutk.load_library()
if isinstance(tutk_platform_lib, str):
path = pathlib.Path(tutk_platform_lib)
tutk_platform_lib = tutk.load_library(str(path.absolute()))
self.tutk_platform_lib: CDLL = tutk_platform_lib
self.initd = False
self.udp_port = udp_port
self.max_num_av_channels = max_num_av_channels
def initialize(self):
"""Initialize the underlying TUTK library
This is called automatically by the context manager,
and should only be called if you intend to manually handle
cleanup of this classes resources (by calling deinitialize
when done with it!)
"""
if self.initd:
return
self.initd = True
errno = tutk.iotc_initialize(
self.tutk_platform_lib, udp_port=self.udp_port or 0
)
if errno < 0:
raise tutk.TutkError(errno)
actual_num_chans = tutk.av_initialize(
self.tutk_platform_lib, max_num_channels=self.max_num_av_channels
)
if actual_num_chans < 0:
raise tutk.TutkError(errno)
self.max_num_av_channels = actual_num_chans
def deinitialize(self):
"""Deinitialize the underlying TUTK library
This is called automatically by the context manager
"""
tutk.av_deinitialize(self.tutk_platform_lib)
tutk.iotc_deinitialize(self.tutk_platform_lib)
@property
def version(self):
"""Get the version of the underlying TUTK library"""
return tutk.iotc_get_version(self.tutk_platform_lib)
def __enter__(self):
self.initialize()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.deinitialize()
def connect_and_auth(
self, account: WyzeAccount, camera: WyzeCamera
) -> "WyzeIOTCSession":
"""Initialize a new iotc session with the specified camera, and account information.
The result of this method should be used as a context manager, i.e. using the 'with'
keyword. This allows us to automatically clean up after we're done with the session:
```python
with WyzeIOTC() as iotc:
with iotc.connect_and_auth(account, camera) as session:
... # send configuration commands, or stream video from the session.
```
See [WyzeIOTCSession](../iotc_session/) for more info.
:param account: the account object returned from [wyzecam.api.get_user_info][]
:param camera: the camera object returned from [wyzecam.api.get_camera_list][]
:returns: An object representing the Wyze IOTC Session, a [WyzeIOTCSession](../iotc_session/)
"""
return WyzeIOTCSession(self.tutk_platform_lib, account, camera)
class WyzeIOTCSessionState(enum.IntEnum):
"""An enum describing the possible states of a WyzeIOTCSession"""
DISCONNECTED = 0
"""Not yet connected"""
IOTC_CONNECTING = 1
"""Currently attempting to connect the IOTC session"""
AV_CONNECTING = 2
"""Currently attempting to connect the AV session"""
CONNECTED = 3
"""Fully connected to the camera, but have not yet attempted to authenticate"""
CONNECTING_FAILED = 4
"""Connection failed, no longer connected"""
AUTHENTICATING = 5
"""Attempting to authenticate"""
AUTHENTICATION_SUCCEEDED = 6
"""Fully connected and authenticated"""
AUTHENTICATION_FAILED = 7
"""Authentication failed, no longer connected"""
class WyzeIOTCSession:
"""An IOTC session object, used for communicating with Wyze cameras
This is constructed from a WyzeIOTC object:
```python
with WyzeIOTC() as wyze:
with wyze.connect_and_auth(account, camera) as session:
... # send configuration commands, or stream video
```
However, you can construct it manually, which can be helpful if you intend to set a
different frame size or bitrate than the defaults:
```python
with WyzeIOTCSession(lib, account, camera, bitrate=tutk.BITRATE_SD)
...
```
> **Note:** WyzeIOTCSession is intended to be used as a context manager. Otherwise,
> you will need to manually tell the session to connect and authenticate, by calling
> session._connect() followed by session._auth(), and session._disconnect() when you're
> ready to disconnect the session.
:var tutk_platform_lib: The underlying c library (from [tutk.load_library][wyzecam.tutk.tutk.load_library])
:var account: A [WyzeAccount][wyzecam.api_models.WyzeAccount] instance, see
[api.get_user_info][wyzecam.api.get_user_info]
:var camera: A [WyzeCamera][wyzecam.api_models.WyzeCamera] instance, see
[api.get_camera_list][wyzecam.api.get_camera_list]
:var preferred_frame_size: The preferred size of the video stream returned by the camera.
See [wyzecam.tutk.tutk.FRAME_SIZE_1080P][].
:var preferred_bitrate: The preferred bitrate of the video stream returned by the camera.
See [wyzecam.tutk.tutk.BITRATE_HD][].
:var session_id: The id of this session, once connected.
:var av_chan_id: The AV channel of this session, once connected.
:var state: The current connection state of this session. See
[WyzeIOTCSessionState](../iotc_session_state/).
"""
def __init__(
self,
tutk_platform_lib: CDLL,
account: WyzeAccount,
camera: WyzeCamera,
frame_size: int = tutk.FRAME_SIZE_1080P,
bitrate: int = tutk.BITRATE_HD,
) -> None:
"""Construct a wyze iotc session
:param tutk_platform_lib: The underlying c library (from
[tutk.load_library][wyzecam.tutk.tutk.load_library])
:param account: A [WyzeAccount][wyzecam.api_models.WyzeAccount] instance, see
[api.get_user_info][wyzecam.api.get_user_info]
:param camera: A [WyzeCamera][wyzecam.api_models.WyzeCamera] instance, see
[api.get_camera_list][wyzecam.api.get_camera_list]
:param frame_size: Configures the size of the video stream returned by the camera.
See [wyzecam.tutk.tutk.FRAME_SIZE_1080P][].
:param bitrate: Configures the bitrate of the video stream returned by the camera.
See [wyzecam.tutk.tutk.BITRATE_HD][].
"""
self.tutk_platform_lib: CDLL = tutk_platform_lib
self.account: WyzeAccount = account
self.camera: WyzeCamera = camera
self.session_id: Optional[c_int] = None
self.av_chan_id: Optional[c_int] = None
self.state: WyzeIOTCSessionState = WyzeIOTCSessionState.DISCONNECTED
self.preferred_frame_size: int = frame_size
self.preferred_bitrate: int = bitrate
def session_check(self) -> tutk.SInfoStruct:
"""Used by a device or a client to check the IOTC session info.
A device or a client may use this function to check if the IOTC session is
still alive as well as getting the IOTC session info.
:returns: A [`tutk.SInfoStruct`][wyzecam.tutk.tutk.SInfoStruct]
"""
assert (
self.session_id is not None
), "Please call _connect() before session_check()"
errcode, sess_info = tutk.iotc_session_check(
self.tutk_platform_lib, self.session_id
)
if errcode < 0:
raise tutk.TutkError(errcode)
return sess_info
def iotctrl_mux(self) -> TutkIOCtrlMux:
"""Constructs a new TutkIOCtrlMux for this session
Use this to send configuration messages, such as change the cameras resolution.
Note that you either should treat the result of this as a context manager (using
with), or call start_listening() explicitly on the result. This starts a separate
thread listening for the responses from the camera.
```python
with session.ioctrl_mux() as mux:
msg = tutk_protocol.K10056SetResolvingBit(
tutk.FRAME_SIZE_1080P, tutk.BITRATE_SD)
future = mux.send_ioctl(msg)
assert future.result() == True, "Change bitrate failed!"
```
"""
assert self.av_chan_id is not None, "Please call _connect() first!"
return TutkIOCtrlMux(self.tutk_platform_lib, self.av_chan_id)
def __enter__(self):
self._connect()
self._auth()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._disconnect()
def recv_video_data(
self,
) -> Iterator[Tuple[Optional[bytes], tutk.FrameInfoStruct]]:
"""A generator for returning raw video frames!
By iterating over the return value of this function, you will
get raw video frame data in the form of a bytes object. This
is convenient for accessing the raw video data without doing
the work of decoding or transcoding the actual video feed. If
you want to save the video to disk, display it, or otherwise process
the video, I highly recommend using `recv_video_frame` or
`recv_video_frame_nparray` instead of this function.
The second item in the tuple returned by this function, 'frame_info', is a useful
set of metadata about the frame as returned by the camera. See
[tutk.FrameInfoStruct][wyzecam.tutk.tutk.FrameInfoStruct] for more details about
the contents of this object.
Note that the format of this data is either raw h264 or HVEC H265 video. You will
have to introspect the frame_info object to determine the format!
```python
with wyzecam.WyzeIOTC() as wyze_iotc:
with wyze_iotc.connect_and_auth(account, camera) as sess:
for (frame, frame_info) in sess.recv_video_data():
# do something with the video data! :)
```
In order to use this, you will need to install [PyAV](https://pyav.org/docs/stable/).
:returns: A generator, which when iterated over, yields a tuple containing the decoded image
(as a [PyAV VideoFrame](https://pyav.org/docs/stable/api/video.html#av.video.frame.VideoFrame)),
as well as metadata about the frame (in the form of a
[tutk.FrameInfoStruct][wyzecam.tutk.tutk.FrameInfoStruct]).
"""
| |
11.0 * 10 ** (-0.4 * (25 - 23.9)))
def test_token_user_cannot_update_unowned_photometry(
upload_data_token, manage_sources_token, public_source, ztf_camera, public_group
):
status, data = api(
'POST',
'photometry',
data={
'obj_id': str(public_source.id),
'mjd': 58000.0,
'instrument_id': ztf_camera.id,
'flux': 12.24,
'fluxerr': 0.031,
'zp': 25.0,
'magsys': 'ab',
'filter': 'ztfi',
'group_ids': [public_group.id],
},
token=upload_data_token,
)
assert status == 200
assert data['status'] == 'success'
photometry_id = data['data']['ids'][0]
status, data = api(
'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token
)
assert status == 200
assert data['status'] == 'success'
np.testing.assert_allclose(data['data']['flux'], 12.24 * 10 ** (-0.4 * (25 - 23.9)))
status, data = api(
'PATCH',
f'photometry/{photometry_id}',
data={
'obj_id': str(public_source.id),
'flux': 11.0,
'mjd': 58000.0,
'instrument_id': ztf_camera.id,
'fluxerr': 0.031,
'zp': 25.0,
'magsys': 'ab',
'filter': 'ztfi',
},
token=manage_sources_token,
)
assert status == 401
def test_token_user_update_photometry_groups(
upload_data_token_two_groups,
manage_sources_token_two_groups,
public_source_two_groups,
ztf_camera,
public_group,
public_group2,
view_only_token,
):
upload_data_token = upload_data_token_two_groups
public_source = public_source_two_groups
status, data = api(
'POST',
'photometry',
data={
'obj_id': str(public_source.id),
'mjd': 58000.0,
'instrument_id': ztf_camera.id,
'flux': 12.24,
'fluxerr': 0.031,
'zp': 25.0,
'magsys': 'ab',
'filter': 'ztfi',
'group_ids': [public_group.id, public_group2.id],
},
token=upload_data_token,
)
assert status == 200
assert data['status'] == 'success'
photometry_id = data['data']['ids'][0]
status, data = api(
'GET', f'photometry/{photometry_id}?format=flux', token=view_only_token
)
assert status == 200
assert data['status'] == 'success'
status, data = api(
'PATCH',
f'photometry/{photometry_id}',
data={
'obj_id': str(public_source.id),
'flux': 11.0,
'mjd': 58000.0,
'instrument_id': ztf_camera.id,
'fluxerr': 0.031,
'zp': 25.0,
'magsys': 'ab',
'filter': 'ztfi',
'group_ids': [public_group2.id],
},
token=upload_data_token,
)
assert status == 200
assert data['status'] == 'success'
status, data = api(
'GET', f'photometry/{photometry_id}?format=flux', token=view_only_token
)
assert status == 400
assert data['status'] == 'error'
assert "Insufficient permissions" in data["message"]
def test_user_can_delete_owned_photometry_data(
upload_data_token, public_source, ztf_camera, public_group
):
status, data = api(
'POST',
'photometry',
data={
'obj_id': str(public_source.id),
'mjd': 58000.0,
'instrument_id': ztf_camera.id,
'flux': 12.24,
'fluxerr': 0.031,
'zp': 25.0,
'magsys': 'ab',
'filter': 'ztfi',
'group_ids': [public_group.id],
},
token=upload_data_token,
)
assert status == 200
assert data['status'] == 'success'
photometry_id = data['data']['ids'][0]
status, data = api(
'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token
)
assert status == 200
assert data['status'] == 'success'
np.testing.assert_allclose(data['data']['flux'], 12.24 * 10 ** (-0.4 * (25 - 23.9)))
status, data = api('DELETE', f'photometry/{photometry_id}', token=upload_data_token)
assert status == 200
status, data = api(
'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token
)
assert status == 400
def test_user_cannot_delete_unowned_photometry_data(
upload_data_token, manage_sources_token, public_source, ztf_camera, public_group
):
status, data = api(
'POST',
'photometry',
data={
'obj_id': str(public_source.id),
'mjd': 58000.0,
'instrument_id': ztf_camera.id,
'flux': 12.24,
'fluxerr': 0.031,
'zp': 25.0,
'magsys': 'ab',
'filter': 'ztfi',
'group_ids': [public_group.id],
},
token=upload_data_token,
)
assert status == 200
assert data['status'] == 'success'
photometry_id = data['data']['ids'][0]
status, data = api(
'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token
)
assert status == 200
assert data['status'] == 'success'
np.testing.assert_allclose(data['data']['flux'], 12.24 * 10 ** (-0.4 * (25 - 23.9)))
status, data = api(
'DELETE', f'photometry/{photometry_id}', token=manage_sources_token
)
assert status == 401
def test_admin_can_delete_unowned_photometry_data(
upload_data_token, super_admin_token, public_source, ztf_camera, public_group
):
status, data = api(
'POST',
'photometry',
data={
'obj_id': str(public_source.id),
'mjd': 58000.0,
'instrument_id': ztf_camera.id,
'flux': 12.24,
'fluxerr': 0.031,
'zp': 25.0,
'magsys': 'ab',
'filter': 'ztfi',
'group_ids': [public_group.id],
},
token=upload_data_token,
)
assert status == 200
assert data['status'] == 'success'
photometry_id = data['data']['ids'][0]
status, data = api(
'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token
)
assert status == 200
assert data['status'] == 'success'
np.testing.assert_allclose(data['data']['flux'], 12.24 * 10 ** (-0.4 * (25 - 23.9)))
status, data = api('DELETE', f'photometry/{photometry_id}', token=super_admin_token)
assert status == 200
status, data = api(
'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token
)
assert status == 400
def test_token_user_retrieving_source_photometry_and_convert(
view_only_token, public_source
):
status, data = api(
'GET',
f'sources/{public_source.id}/photometry?format=flux&magsys=ab',
token=view_only_token,
)
assert status == 200
assert data['status'] == 'success'
assert isinstance(data['data'], list)
assert 'mjd' in data['data'][0]
assert 'ra_unc' in data['data'][0]
data['data'] = sorted(data['data'], key=lambda d: d['mjd'])
mag1_ab = -2.5 * np.log10(data['data'][0]['flux']) + data['data'][0]['zp']
magerr1_ab = 2.5 / np.log(10) * data['data'][0]['fluxerr'] / data['data'][0]['flux']
maglast_ab = -2.5 * np.log10(data['data'][-1]['flux']) + data['data'][-1]['zp']
magerrlast_ab = (
2.5 / np.log(10) * data['data'][-1]['fluxerr'] / data['data'][-1]['flux']
)
status, data = api(
'GET',
f'sources/{public_source.id}/photometry?format=mag&magsys=ab',
token=view_only_token,
)
assert status == 200
assert data['status'] == 'success'
data['data'] = sorted(data['data'], key=lambda d: d['mjd'])
assert np.allclose(mag1_ab, data['data'][0]['mag'])
assert np.allclose(magerr1_ab, data['data'][0]['magerr'])
assert np.allclose(maglast_ab, data['data'][-1]['mag'])
assert np.allclose(magerrlast_ab, data['data'][-1]['magerr'])
status, data = api(
'GET',
f'sources/{public_source.id}/photometry?format=flux&magsys=vega',
token=view_only_token,
)
data['data'] = sorted(data['data'], key=lambda d: d['mjd'])
mag1_vega = -2.5 * np.log10(data['data'][0]['flux']) + data['data'][0]['zp']
magerr1_vega = (
2.5 / np.log(10) * data['data'][0]['fluxerr'] / data['data'][0]['flux']
)
maglast_vega = -2.5 * np.log10(data['data'][-1]['flux']) + data['data'][-1]['zp']
magerrlast_vega = (
2.5 / np.log(10) * data['data'][-1]['fluxerr'] / data['data'][-1]['flux']
)
assert status == 200
assert data['status'] == 'success'
ab = sncosmo.get_magsystem('ab')
vega = sncosmo.get_magsystem('vega')
vega_to_ab = {
filter: 2.5 * np.log10(ab.zpbandflux(filter) / vega.zpbandflux(filter))
for filter in ['ztfg', 'ztfr', 'ztfi']
}
assert np.allclose(mag1_ab, mag1_vega + vega_to_ab[data['data'][0]['filter']])
assert np.allclose(magerr1_ab, magerr1_vega)
assert np.allclose(
maglast_ab, maglast_vega + vega_to_ab[data['data'][-1]['filter']]
)
assert np.allclose(magerrlast_ab, magerrlast_vega)
def test_token_user_retrieve_null_photometry(
upload_data_token, public_source, ztf_camera, public_group
):
status, data = api(
'POST',
'photometry',
data={
'obj_id': str(public_source.id),
'mjd': 58000.0,
'instrument_id': ztf_camera.id,
'mag': None,
'magerr': None,
'limiting_mag': 22.3,
'magsys': 'ab',
'filter': 'ztfg',
'group_ids': [public_group.id],
},
token=upload_data_token,
)
assert status == 200
assert data['status'] == 'success'
photometry_id = data['data']['ids'][0]
status, data = api(
'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token
)
assert status == 200
assert data['status'] == 'success'
assert data['data']['flux'] is None
np.testing.assert_allclose(
data['data']['fluxerr'], 10 ** (-0.4 * (22.3 - 23.9)) / PHOT_DETECTION_THRESHOLD
)
status, data = api(
'GET', f'photometry/{photometry_id}?format=mag', token=upload_data_token
)
assert status == 200
assert data['status'] == 'success'
assert data['data']['mag'] is None
assert data['data']['magerr'] is None
def test_token_user_get_range_photometry(
upload_data_token, public_source, public_group, ztf_camera
):
status, data = api(
'POST',
'photometry',
data={
'obj_id': str(public_source.id),
'mjd': [58000.0, 58500.0, 59000.0],
'instrument_id': ztf_camera.id,
'flux': 12.24,
'fluxerr': 0.031,
'zp': 25.0,
'magsys': 'ab',
'filter': 'ztfg',
'group_ids': [public_group.id],
},
token=upload_data_token,
)
assert status == 200
assert data['status'] == 'success'
status, data = api(
'GET',
'photometry/range',
token=upload_data_token,
data={'instrument_ids': [ztf_camera.id], 'max_date': '2018-05-15T00:00:00'},
)
assert status == 200
assert data['status'] == 'success'
assert len(data['data']) == 1
status, data = api(
'GET',
'photometry/range?format=flux&magsys=vega',
token=upload_data_token,
data={'instrument_ids': [ztf_camera.id], 'max_date': '2019-02-01T00:00:00'},
)
assert status == 200
assert data['status'] == 'success'
assert len(data['data']) == 2
def test_reject_photometry_inf(
upload_data_token, public_source, public_group, ztf_camera
):
status, data = api(
'POST',
'photometry',
data={
'obj_id': str(public_source.id),
'mjd': [58000.0, 58500.0, 59000.0],
'instrument_id': ztf_camera.id,
'flux': math.inf,
'fluxerr': math.inf,
'zp': 25.0,
'magsys': 'ab',
'filter': 'ztfg',
'group_ids': [public_group.id],
},
token=upload_data_token,
)
assert status == 400
assert data['status'] == 'error'
status, data = api(
'POST',
'photometry',
data={
'obj_id': str(public_source.id),
'mjd': 58000.0,
'instrument_id': ztf_camera.id,
'mag': math.inf,
'magerr': math.inf,
'limiting_mag': 22.3,
'magsys': 'vega',
'filter': 'ztfg',
'group_ids': [public_group.id],
},
token=upload_data_token,
)
assert status == 400
assert data['status'] == 'error'
status, data = api(
'POST',
'photometry',
data={
'obj_id': str(public_source.id),
'mjd': 58000.0,
'instrument_id': ztf_camera.id,
'mag': 2.0,
'magerr': 23.0,
'limiting_mag': math.inf,
'magsys': 'vega',
'filter': 'ztfg',
'group_ids': [public_group.id],
},
token=upload_data_token,
)
assert status == 400
assert data['status'] == 'error'
status, data = api(
'POST',
'photometry',
data={
'obj_id': str(public_source.id),
'mjd': 58000.0,
'instrument_id': ztf_camera.id,
'mag': None,
'magerr': None,
'limiting_mag': -math.inf,
'magsys': 'vega',
'filter': 'ztfg',
'group_ids': [public_group.id],
},
token=upload_data_token,
)
assert status == 400
assert data['status'] == 'error'
status, data = api(
'POST',
'photometry',
data={
'obj_id': str(public_source.id),
'mjd': [58000.0, 58500.0, 59000.0],
'instrument_id': ztf_camera.id,
'flux': None,
'fluxerr': math.inf,
'zp': 25.0,
'magsys': 'ab',
'filter': 'ztfg',
'group_ids': [public_group.id],
},
token=upload_data_token,
)
assert status == 400
assert data['status'] == 'error'
def test_token_user_post_to_foreign_group_and_retrieve(
upload_data_token, public_source_two_groups, public_group2, ztf_camera
):
status, data = api(
'POST',
'photometry',
data={
'obj_id': str(public_source_two_groups.id),
'mjd': [58000.0, 58500.0, 59000.0],
'instrument_id': ztf_camera.id,
'flux': 12.24,
'fluxerr': 0.031,
'zp': 25.0,
'magsys': 'ab',
'filter': 'ztfg',
'group_ids': [public_group2.id],
},
token=upload_data_token,
)
assert status == 200
assert data['status'] == 'success'
photometry_id = data['data']['ids'][0]
status, data = api(
'GET', f'photometry/{photometry_id}?format=flux', token=upload_data_token
)
assert status == 200
def test_problematic_photometry_1263(
upload_data_token, public_source, public_group, ztf_camera, public_group2
):
payload = {
"obj_id": public_source.id,
"group_ids": [public_group.id, public_group2.id],
"magsys": "ab",
"zp": 23.9,
"instrument_id": ztf_camera.id,
'mjd': [
59145.46447,
59149.50347,
59149.50347,
59150.50872,
59150.50872,
59152.51631,
59155.50801,
59152.51631,
59155.50801,
59156.48479,
59156.48479,
59126.48693,
59128.46834,
59130.50257,
59135.47329,
59137.4758,
59139.45454,
59141.47449,
59143.50987,
59143.50987,
59145.46447,
59145.50556,
59150.52806,
59150.52806,
59151.52116,
59151.52116,
59152.48332,
59152.48332,
59155.50022,
59155.50022,
59156.5383,
59126.53144,
59128.51928,
59130.53196,
59135.51196,
59137.51334,
59139.51507,
59141.51422,
59143.48529,
59143.48529,
59145.50556,
],
'filter': [
'ztfg',
'ztfg',
'ztfg',
'ztfg',
'ztfg',
'ztfg',
'ztfg',
'ztfg',
'ztfg',
'ztfg',
'ztfg',
'ztfg',
'ztfg',
'ztfg',
'ztfg',
'ztfg',
'ztfg',
'ztfg',
'ztfg',
'ztfg',
'ztfg',
'ztfr',
'ztfr',
'ztfr',
'ztfr',
'ztfr',
'ztfr',
'ztfr',
'ztfr',
'ztfr',
'ztfr',
'ztfr',
'ztfr',
'ztfr',
'ztfr',
'ztfr',
'ztfr',
'ztfr',
'ztfr',
'ztfr',
'ztfr',
],
'flux': [
105.4095462,
100.4989583,
100.4986052,
97.45052422,
97.45411937,
91.71425204,
81.08011148,
91.71489652,
81.08110854,
59.37327478,
59.37452643,
None,
None,
None,
73.17457336,
82.20150344,
89.14970986,
102.1692537,
98.6103674,
98.60984771,
105.4086204,
100.8602976,
94.84847105,
94.85063718,
104.8945366,
| |
in asynchronous
mode.
"""
# ----------------------------------------------------------------------
def __init__(self, ip_address: str, host: str = None, daisy: DAISY = 'auto',
montage: Optional[Union[list, dict]] = None,
streaming_package_size: int = 250,
capture_stream: Optional[bool] = False,
board_id: str = '0',
parallel_boards: int = 1, ) -> None:
""""""
self.remote_host = None
self._ip_address = ip_address
self._readed = None
self._local_ip_address = self._get_local_ip_address()
if host == 'localhost':
host = None
if host:
try:
rpyc_service = rpyc.connect(host, 18861, config={
'allow_public_attrs': True,
'allow_pickle': True,
})
self.remote_host = getattr(rpyc_service.root, self.__class__.__name__)(
self._ip_address,
host=None,
daisy=daisy,
capture_stream=capture_stream,
montage=pickle.dumps(montage),
streaming_package_size=streaming_package_size,
board_id=board_id,
parallel_boards=parallel_boards,
)
except socket.gaierror:
logging.error("'openbci_rpyc' daemon are running?")
return
super().__init__(daisy, montage, streaming_package_size,
capture_stream, board_id, parallel_boards)
self._create_tcp_server()
time.sleep(5) # secure delay
self._start_tcp_client()
self._start_loop()
# ----------------------------------------------------------------------
def _get_local_ip_address(self) -> str:
"""Get the current network IP assigned."""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
local_ip_address = s.getsockname()[0]
s.close()
return local_ip_address
except Exception as e:
logging.warning('Impossible to detect a network connection, the WiFi'
'module and this machine must share the same network.')
logging.warning(f'If you are using this machine as server (access point) '
f'the address {DEFAULT_LOCAL_IP} will be used.')
logging.warning(e)
return DEFAULT_LOCAL_IP
# ----------------------------------------------------------------------
def write(self, data: Union[str, bytes]) -> None:
"""Send command to board through HTTP protocole.
Parameters
----------
data :
Commands to send, It should not be more than 31 characters long.
"""
if hasattr(data, 'decode'):
data = data.decode()
elif isinstance(data, int):
data = chr(data)
response = None
try:
logging.info(f"Sending command: '{data}'")
response = requests.post(
f"http://{self._ip_address}/command", json={'command': data})
except requests.exceptions.ConnectionError as msg:
if 'Connection aborted' in str(msg):
time.sleep(0.3)
return self.write(data)
except Exception as msg:
logging.warning(f"Error on sending command '{data}':{msg}")
return
if response and response.status_code == 200:
self._readed = response.text
elif response and response.status_code == 502:
logging.info(f"No confirmation from board, does not mean fail.")
else:
if response:
logging.warning(
f"Error code: {response.status_code} {response.text}")
self._readed = None
# ----------------------------------------------------------------------
def read(self, size=None) -> bytes:
"""Read the response for some command.
Unlike serial mode, over WiFi there is not read and write individual
commands, the response is got in the same write command. This
implementation tries to emulate the the behavior of serial read/write
for compatibility reasons. Not all command return a response.
"""
time.sleep(0.2) # critical dealy for wait a response.
return self._readed
# ----------------------------------------------------------------------
def start_stream(self) -> None:
"""Initialize a TCP client on the WiFi shield and sends the command to
starts stream."""
super().start_stream()
# self._start_tcp_client()
response = requests.get(f"http://{self._ip_address}/stream/start")
if response.status_code != 200:
logging.warning(
f"Unable to start streaming.\nCheck API for status code {response.status_code} on /stream/start")
# ----------------------------------------------------------------------
def stop_stream(self) -> None:
"""Stop the data collection that runs asynchronously and sends the
command to stops stream."""
super().stop_stream()
response = requests.get(f"http://{self._ip_address}/stream/stop")
if response.status_code != 200:
logging.warning(
f"Unable to stop streaming.\nCheck API for status code {response.status_code} on /stream/stop")
self.binary_stream.close()
asyncore.close_all()
# ----------------------------------------------------------------------
def kafka_context(self) -> Dict[str, Any]:
"""Kafka contex generator."""
return {
'daisy': self.daisy,
'boardmode': self.boardmode,
'montage': self.montage,
'connection': 'wifi',
'gain': self._gain,
'parallel_boards': self.parallel_boards,
}
# ----------------------------------------------------------------------
def _create_tcp_server(self) -> None:
"""Create TCP server, this server will handle the streaming EEG data."""
# kafka_context = {
# 'daisy': self.daisy,
# 'boardmode': self.boardmode,
# 'montage': self.montage,
# 'connection': 'wifi',
# }
self.local_wifi_server = WiFiShieldTCPServer(self._local_ip_address,
lambda: getattr(
self, 'binary_stream'),
self.kafka_context,
)
self.local_wifi_server_port = self.local_wifi_server.socket.getsockname()[
1]
logging.info(
f"Open socket on {self._local_ip_address}:{self.local_wifi_server_port}")
# ----------------------------------------------------------------------
def _start_tcp_client(self):
"""Connect the board to the TCP server. Sends configuration of the
previously server created to the board, so they can connected to.
"""
if self._ip_address is None:
raise ValueError('self._ip_address cannot be None')
logging.info(f"Init WiFi connection with IP: {self._ip_address}")
self.requests_session = requests.Session()
response = requests.get(f"http://{self._ip_address}/board")
if response.status_code == 200:
board_info = response.json()
if not board_info['board_connected']:
raise RuntimeError("No board connected to WiFi Shield.")
self._gain = board_info['gains']
self.local_wifi_server.set_gain(self._gain)
# res_tcp_post = requests.post(f"http://{self._ip_address}/tcp",
# json={
# 'ip': self._local_ip_address,
# 'port': self.local_wifi_server_port,
# 'output': 'json',
# 'delimiter': True,
# 'latency': 1000,
# })
res_tcp_post = requests.post(f"http://{self._ip_address}/tcp",
json={
'ip': self._local_ip_address,
'port': self.local_wifi_server_port,
'output': 'raw',
'latency': 1000,
})
if res_tcp_post.status_code == 200:
tcp_status = res_tcp_post.json()
if tcp_status['connected']:
logging.info("WiFi Shield to Python TCP Socket Established")
else:
raise RuntimeWarning(
"WiFi Shield is not able to connect to local server.")
else:
logging.warning(
f"status_code {res_tcp_post.status_code}:{res_tcp_post.reason}")
# ----------------------------------------------------------------------
def set_latency(self, latency: int) -> None:
""""""
response = None
try:
response = requests.post(
f"http://{self._ip_address}/latency", json={'latency': latency, })
except Exception as e:
logging.warning(f"Error on setting latency '{data}': {e}")
return
if response:
if response.status_code == 200:
return
logging.warning(
f"Error code: {response.status_code} {response.text}")
# ----------------------------------------------------------------------
def close(self) -> None:
"""Stops TCP server and data acquisition."""
self.stop_stream()
requests.delete(f"http://{self._ip_address}/tcp")
super().close()
# ----------------------------------------------------------------------
def _start_loop(self):
"""Start the TCP server on a thread asyncore loop."""
self.th_loop = Thread(target=asyncore.loop, args=(), )
self.th_loop.start()
# ########################################################################
# class CytonR:
# """"""
# # ----------------------------------------------------------------------
# def __init__(self, mode: MODE, endpoint: Union[str, List] = None, host: str = None,
# daisy: DAISY = 'auto',
# montage: Optional[Union[list, dict]] = None,
# streaming_package_size: int = 250,
# capture_stream: Optional[bool] = False,
# number_of_channels: List = [],
# ) -> Union[CytonRFDuino, CytonWiFi]:
# """"""
# if host == 'localhost':
# host = None
# if host:
# rpyc_service = rpyc.connect(host, 18861, config={
# 'allow_public_attrs': True,
# 'allow_pickle': True,
# })
# self.remote_host = getattr(rpyc_service.root, 'Cyton')(
# mode,
# endpoint,
# host=None,
# daisy=daisy,
# montage=pickle.dumps(montage),
# streaming_package_size=streaming_package_size,
# capture_stream=capture_stream,
# number_of_channels=number_of_channels,
# )
# # ----------------------------------------------------------------------
# def __getattribute__(self, attr: str) -> Any:
# """Some attributes must be acceded from RPyC."""
# if super().__getattribute__('remote_host'):
# return getattr(super().__getattribute__('remote_host'), attr)
# return super().__getattribute__(attr)
# ----------------------------------------------------------------------
def wifi(host, ip):
""""""
rpyc_service = rpyc.connect(host, 18861, config={
'allow_public_attrs': True,
'allow_pickle': True,
})
return rpyc_service.root.Wifi(ip)
# ----------------------------------------------------------------------
def restart_services(host):
""""""
rpyc_service = rpyc.connect(host, 18861, config={
'allow_public_attrs': True,
'allow_pickle': True,
})
return rpyc_service.root.RestartServices()
########################################################################
class Cyton:
"""
`Cyton` is a shortcut for `CytonRFDuino` or `CytonWiFi`:
>>> Cyton('serial', ...)
is equals to:
>>> CytonRFDuino(...)
and
>>> Cyton('wifi', ...)
the same that do:
>>> CytonWiFi(...)
Parameters
----------
mode
`serial` or `wifi`
endpoint
Serial port for RFduino or IP address for WiFi module.
host
IP address for the server that has the OpenBCI board attached, by
default its assume that is the same machine where is it executing, this
is the `localhost`.
daisy
Daisy board can be detected on runtime or declare it specifically.
montage
A list means consecutive channels e.g. `['Fp1', 'Fp2', 'F3', 'Fz',
'F4']` and a dictionary means specific channels `{1: 'Fp1', 2: 'Fp2',
3: 'F3', 4: 'Fz', 5: 'F4'}`.
streaming_package_size
The streamer will try to send packages of this size, this is NOT the
sampling rate for data acquisition.
capture_stream
Indicates if the data from the stream will be captured in asynchronous
mode.
"""
# ----------------------------------------------------------------------
def __init__(self, mode: MODE, endpoint: Union[str, List] = None, host: str = None,
daisy: Optional[List[DAISY]] = None,
montage: Optional[Union[list, dict]] = None,
streaming_package_size: int = 250,
capture_stream: Optional[bool] = False,
number_of_channels: List = [],
) -> Union[CytonRFDuino, CytonWiFi]:
if isinstance(endpoint, str):
endpoint = [endpoint]
if host == 'localhost':
host = None
if daisy is None:
daisy = [False for _ in endpoint]
elif isinstance(daisy, bool):
daisy = [daisy]
self.remote_host = None
self.openbci = None
if host:
self.openbci = None
rpyc_service = rpyc.connect(host, 18861, config={
'allow_public_attrs': True,
'allow_pickle': True,
})
self.remote_host = getattr(rpyc_service.root, 'Cyton')(
mode,
endpoint,
host=None,
daisy=daisy,
montage=pickle.dumps(montage),
streaming_package_size=streaming_package_size,
capture_stream=capture_stream,
number_of_channels=number_of_channels,
)
else:
openbci = []
if montage:
montage = pickle.loads(montage)
montage = self.split_montage(montage, number_of_channels)
else:
montage = [montage] * len(endpoint)
for board_id, end, mtg in zip(range(len(endpoint)), endpoint, montage):
if mode == 'serial':
openbci.append(CytonRFDuino(end, host, daisy[board_id], mtg,
streaming_package_size, capture_stream, board_id, len(
number_of_channels)))
elif mode == 'wifi':
openbci.append(CytonWiFi(end, host, daisy[board_id], mtg,
streaming_package_size, capture_stream, board_id, len(
number_of_channels)))
self.openbci = openbci
# ----------------------------------------------------------------------
def __getattribute__(self, attr: str) -> Any:
"""Some attributes must be acceded from RPyC."""
if super().__getattribute__('remote_host'):
return getattr(super().__getattribute__('remote_host'), attr)
# return super().__getattribute__(attr)
openbci = super().__getattribute__('openbci')
if isinstance(openbci, list):
if isinstance(getattr(openbci[0], attr), (types.MethodType, types.FunctionType)):
# The mthods will be aplied to all boards
def wrap(*args, **kwargs):
return [getattr(mod, attr)(*args, **kwargs) for mod in openbci]
return wrap
# The attribute of the first board will be used by default
return getattr(openbci[0], attr)
return super().__getattribute__(attr)
# ----------------------------------------------------------------------
def | |
# Copyright (c) 1999-2008 <NAME>; All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Comments and/or additions are welcome (send e-mail to:
# <EMAIL>).
#
"""
stats.py module
(Requires pstat.py module.)
#################################################
####### Written by: <NAME> ###########
####### Last modified: Oct 31, 2008 ###########
#################################################
A collection of basic statistical functions for python. The function
names appear below.
IMPORTANT: There are really *3* sets of functions. The first set has an 'l'
prefix, which can be used with list or tuple arguments. The second set has
an 'a' prefix, which can accept NumPy array arguments. These latter
functions are defined only when NumPy is available on the system. The third
type has NO prefix (i.e., has the name that appears below). Functions of
this set are members of a "Dispatch" class, c/o David Ascher. This class
allows different functions to be called depending on the type of the passed
arguments. Thus, stats.mean is a member of the Dispatch class and
stats.mean(range(20)) will call stats.lmean(range(20)) while
stats.mean(Numeric.arange(20)) will call stats.amean(Numeric.arange(20)).
This is a handy way to keep consistent function names when different
argument types require different functions to be called. Having
implementated the Dispatch class, however, means that to get info on
a given function, you must use the REAL function name ... that is
"print stats.lmean.__doc__" or "print stats.amean.__doc__" work fine,
while "print stats.mean.__doc__" will print the doc for the Dispatch
class. NUMPY FUNCTIONS ('a' prefix) generally have more argument options
but should otherwise be consistent with the corresponding list functions.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful
CENTRAL TENDENCY: geometricmean
harmonicmean
mean
median
medianscore
mode
MOMENTS: moment
variation
skew
kurtosis
skewtest (for Numpy arrays only)
kurtosistest (for Numpy arrays only)
normaltest (for Numpy arrays only)
ALTERED VERSIONS: tmean (for Numpy arrays only)
tvar (for Numpy arrays only)
tmin (for Numpy arrays only)
tmax (for Numpy arrays only)
tstdev (for Numpy arrays only)
tsem (for Numpy arrays only)
describe
FREQUENCY STATS: itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
VARIABILITY: obrientransform
samplevar
samplestdev
signaltonoise (for Numpy arrays only)
var
stdev
sterr
sem
z
zs
zmap (for Numpy arrays only)
TRIMMING FCNS: threshold (for Numpy arrays only)
trimboth
trim1
round (round all vals to 'n' decimals; Numpy only)
CORRELATION FCNS: covariance (for Numpy arrays only)
correlation (for Numpy arrays only)
paired
pearsonr
spearmanr
pointbiserialr
kendalltau
linregress
INFERENTIAL STATS: ttest_1samp
ttest_ind
ttest_rel
chisquare
ks_2samp
mannwhitneyu
ranksums
wilcoxont
kruskalwallish
friedmanchisquare
PROBABILITY CALCS: chisqprob
erfcc
zprob
ksprob
fprob
betacf
gammln
betai
ANOVA FUNCTIONS: F_oneway
F_value
SUPPORT FUNCTIONS: writecc
incr
sign (for Numpy arrays only)
sum
cumsum
ss
summult
sumdiffsquared
square_of_sums
shellsort
rankdata
outputpairedstats
findwithin
"""
## CHANGE LOG:
## ===========
## 09-07-21 ... added capability for getting the 'proportion' out of l/amannwhitneyu (but comment-disabled)
## 08-10-31 ... fixed import LinearAlgebra bug before glm fcns
## 07-11-26 ... conversion for numpy started
## 07-05-16 ... added Lin's Concordance Correlation Coefficient (alincc) and acov
## 05-08-21 ... added "Dice's coefficient"
## 04-10-26 ... added ap2t(), an ugly fcn for converting p-vals to T-vals
## 04-04-03 ... added amasslinregress() function to do regression on N-D arrays
## 03-01-03 ... CHANGED VERSION TO 0.6
## fixed atsem() to properly handle limits=None case
## improved histogram and median functions (estbinwidth) and
## fixed atvar() function (wrong answers for neg numbers?!?)
## 02-11-19 ... fixed attest_ind and attest_rel for div-by-zero Overflows
## 02-05-10 ... fixed lchisqprob indentation (failed when df=even)
## 00-12-28 ... removed aanova() to separate module, fixed licensing to
## match Python License, fixed doc string & imports
## 00-04-13 ... pulled all "global" statements, except from aanova()
## added/fixed lots of documentation, removed io.py dependency
## changed to version 0.5
## 99-11-13 ... added asign() function
## 99-11-01 ... changed version to 0.4 ... enough incremental changes now
## 99-10-25 ... added acovariance and acorrelation functions
## 99-10-10 ... fixed askew/akurtosis to avoid divide-by-zero errors
## added aglm function (crude, but will be improved)
## 99-10-04 ... upgraded acumsum, ass, asummult, asamplevar, avar, etc. to
## all handle lists of 'dimension's and keepdims
## REMOVED ar0, ar2, ar3, ar4 and replaced them with around
## reinserted fixes for abetai to avoid math overflows
## 99-09-05 ... rewrote achisqprob/aerfcc/aksprob/afprob/abetacf/abetai to
## handle multi-dimensional arrays (whew!)
## 99-08-30 ... fixed l/amoment, l/askew, l/akurtosis per D'Agostino (1990)
## added anormaltest per same reference
## re-wrote azprob to calc arrays of probs all at once
## 99-08-22 ... edited attest_ind printing section so arrays could be rounded
## 99-08-19 ... fixed amean and aharmonicmean for non-error(!) overflow on
## short/byte arrays (mean of # s btw 100-300 = -150??)
## 99-08-09 ... fixed asum so that the None case works for Byte arrays
## 99-08-08 ... fixed 7/3 'improvement' to handle t-calcs on N-D arrays
## 99-07-03 ... improved attest_ind, attest_rel (zero-division errortrap)
## 99-06-24 ... fixed bug(?) in attest_ind (n1=a.shape[0])
## 04/11/99 ... added asignaltonoise, athreshold functions, changed all
## max/min in array section to N.maximum/N.minimum,
## fixed square_of_sums to prevent integer overflow
## 04/10/99 ... !!! Changed function name ... sumsquared ==> square_of_sums
## 03/18/99 ... Added ar0, ar2, ar3 and ar4 rounding functions
## 02/28/99 ... Fixed aobrientransform to return an array rather than a list
## 01/15/99 ... Essentially ceased updating list-versions of functions (!!!)
## 01/13/99 ... CHANGED TO VERSION 0.3
## fixed bug in a/lmannwhitneyu p-value calculation
## 12/31/98 ... fixed variable-name bug in ldescribe
## 12/19/98 ... fixed bug in findwithin (fcns needed pstat. prefix)
## 12/16/98 ... changed amedianscore to return float (not array) for 1 score
## 12/14/98 ... added atmin and atmax functions
## removed umath from import line (not needed)
## l/ageometricmean modified to reduce chance of overflows (take
## nth root first, then multiply)
## 12/07/98 ... added __version__variable (now 0.2)
## removed all 'stats.' from anova() fcn
## 12/06/98 ... changed those functions (except shellsort) that altered
## arguments in-place ... cumsum, ranksort, ...
## updated (and fixed some) doc-strings
## 12/01/98 ... added anova() function (requires NumPy)
## incorporated Dispatch class
## 11/12/98 ... added functionality to amean, aharmonicmean, ageometricmean
## added 'asum' function (added functionality to N.add.reduce)
## fixed both moment and amoment (two errors)
## changed name of skewness and askewness to skew and askew
## fixed (a)histogram (which sometimes counted points <lowerlimit)
import copy
import math
from mo_math.vendor.strangman import pstat
# from types import *
__version__ = 0.6
############# DISPATCH CODE ##############
####################################
####### CENTRAL TENDENCY #########
####################################
def geometricmean(inlist):
"""
Calculates the geometric mean of the values in the passed list.
That is: n-th root of (x1 * x2 * ... * xn). Assumes a '1D' list.
Usage: lgeometricmean(inlist)
"""
mult = 1.0
one_over_n = 1.0 / len(inlist)
for item in inlist:
mult = mult * pow(item, one_over_n)
return mult
def harmonicmean(inlist):
"""
Calculates the harmonic mean of the values in the passed list.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Assumes a '1D' list.
Usage: lharmonicmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + 1.0 / item
return len(inlist) / sum
def mean(inlist):
"""
Returns the arithematic mean of the values in the passed list.
Assumes a '1D' list, but will function on the 1st dim of an array(!).
Usage: lmean(inlist)
"""
| |
= ['userId', 'fileId', 'version', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method SetAnnotationCollaborators" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ant/{userId}/files/{fileId}/version/{version}/collaborators'.replace('*', '')
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
if ('userId' in params):
replacement = str(self.apiClient.toPathValue(params['userId']))
resourcePath = resourcePath.replace('{' + 'userId' + '}',
replacement)
if ('fileId' in params):
replacement = str(self.apiClient.toPathValue(params['fileId']))
resourcePath = resourcePath.replace('{' + 'fileId' + '}',
replacement)
if ('version' in params):
replacement = str(self.apiClient.toPathValue(params['version']))
resourcePath = resourcePath.replace('{' + 'version' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'SetCollaboratorsResponse')
return responseObject
def GetAnnotationCollaborators(self, userId, fileId, **kwargs):
"""Get list of annotation collaborators
Args:
userId, str: User GUID (required)
fileId, str: File ID (required)
Returns: GetCollaboratorsResponse
"""
if( userId == None or fileId == None ):
raise ApiException(400, "missing required parameters")
allParams = ['userId', 'fileId']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method GetAnnotationCollaborators" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ant/{userId}/files/{fileId}/collaborators'.replace('*', '')
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('userId' in params):
replacement = str(self.apiClient.toPathValue(params['userId']))
resourcePath = resourcePath.replace('{' + 'userId' + '}',
replacement)
if ('fileId' in params):
replacement = str(self.apiClient.toPathValue(params['fileId']))
resourcePath = resourcePath.replace('{' + 'fileId' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'GetCollaboratorsResponse')
return responseObject
def AddAnnotationCollaborator(self, userId, fileId, **kwargs):
"""Add an annotation collaborator
Args:
userId, str: User GUID (required)
fileId, str: File ID (required)
body, ReviewerInfo: Reviewer Info (optional)
Returns: AddCollaboratorResponse
"""
if( userId == None or fileId == None ):
raise ApiException(400, "missing required parameters")
allParams = ['userId', 'fileId', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method AddAnnotationCollaborator" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ant/{userId}/files/{fileId}/collaborators'.replace('*', '')
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
if ('userId' in params):
replacement = str(self.apiClient.toPathValue(params['userId']))
resourcePath = resourcePath.replace('{' + 'userId' + '}',
replacement)
if ('fileId' in params):
replacement = str(self.apiClient.toPathValue(params['fileId']))
resourcePath = resourcePath.replace('{' + 'fileId' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'AddCollaboratorResponse')
return responseObject
def DeleteDocumentReviewer(self, userId, fileId, reviewerId, **kwargs):
"""Delete document reviewer
Args:
userId, str: User GUID (required)
fileId, str: File ID (required)
reviewerId, str: Reviewer ID (required)
Returns: AddCollaboratorResponse
"""
if( userId == None or fileId == None or reviewerId == None ):
raise ApiException(400, "missing required parameters")
allParams = ['userId', 'fileId', 'reviewerId']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method DeleteDocumentReviewer" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ant/{userId}/files/{fileId}/collaborators/{reviewerId}'.replace('*', '')
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
if ('userId' in params):
replacement = str(self.apiClient.toPathValue(params['userId']))
resourcePath = resourcePath.replace('{' + 'userId' + '}',
replacement)
if ('fileId' in params):
replacement = str(self.apiClient.toPathValue(params['fileId']))
resourcePath = resourcePath.replace('{' + 'fileId' + '}',
replacement)
if ('reviewerId' in params):
replacement = str(self.apiClient.toPathValue(params['reviewerId']))
resourcePath = resourcePath.replace('{' + 'reviewerId' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'AddCollaboratorResponse')
return responseObject
def GetReviewerContacts(self, userId, **kwargs):
"""Get list of reviewer contacts
Args:
userId, str: User GUID (required)
Returns: GetReviewerContactsResponse
"""
if( userId == None ):
raise ApiException(400, "missing required parameters")
allParams = ['userId']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method GetReviewerContacts" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ant/{userId}/contacts'.replace('*', '')
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
if ('userId' in params):
replacement = str(self.apiClient.toPathValue(params['userId']))
resourcePath = resourcePath.replace('{' + 'userId' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'GetReviewerContactsResponse')
return responseObject
def SetReviewerContacts(self, userId, **kwargs):
"""Get list of reviewer contacts
Args:
userId, str: User GUID (required)
body, List[ReviewerContactInfo]: Reviewer Contacts Array (optional)
Returns: GetReviewerContactsResponse
"""
if( userId == None ):
raise ApiException(400, "missing required parameters")
allParams = ['userId', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method SetReviewerContacts" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ant/{userId}/reviewerContacts'.replace('*', '')
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
if ('userId' in params):
replacement = str(self.apiClient.toPathValue(params['userId']))
resourcePath = resourcePath.replace('{' + 'userId' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'GetReviewerContactsResponse')
return responseObject
def MoveAnnotation(self, userId, annotationId, body, **kwargs):
"""Move annotation
Args:
userId, str: User GUID (required)
annotationId, str: Annotation ID (required)
body, Point: position (required)
Returns: MoveAnnotationResponse
"""
if( userId == None or annotationId == None or body == None ):
raise ApiException(400, "missing required parameters")
allParams = ['userId', 'annotationId', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method MoveAnnotation" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ant/{userId}/annotations/{annotationId}/position'.replace('*', '')
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
if ('userId' in params):
replacement = str(self.apiClient.toPathValue(params['userId']))
resourcePath = resourcePath.replace('{' + 'userId' + '}',
replacement)
if ('annotationId' in params):
replacement = str(self.apiClient.toPathValue(params['annotationId']))
resourcePath = resourcePath.replace('{' + 'annotationId' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'MoveAnnotationResponse')
return responseObject
def ResizeAnnotation(self, userId, annotationId, body, **kwargs):
"""Resize annotation
Args:
userId, str: User GUID (required)
annotationId, str: Annotation ID (required)
body, AnnotationSizeInfo: position (required)
Returns: ResizeAnnotationResponse
"""
if( userId == None or annotationId == None or body == None ):
raise ApiException(400, "missing required parameters")
allParams = ['userId', 'annotationId', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method ResizeAnnotation" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ant/{userId}/annotations/{annotationId}/size'.replace('*', '')
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
if ('userId' in params):
replacement = str(self.apiClient.toPathValue(params['userId']))
resourcePath = resourcePath.replace('{' + 'userId' + '}',
replacement)
if ('annotationId' in params):
replacement = str(self.apiClient.toPathValue(params['annotationId']))
resourcePath = resourcePath.replace('{' + 'annotationId' + '}',
replacement)
postData = (params['body'] if 'body' in params else None)
response = self.apiClient.callAPI(self.basePath, resourcePath, method, queryParams,
postData, headerParams)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'ResizeAnnotationResponse')
return responseObject
def SetAnnotationAccess(self, userId, annotationId, body, **kwargs):
"""Set Annotation Access
Args:
userId, str: User GUID (required)
annotationId, str: Annotation ID (required)
body, int: Annotation Access (required)
Returns: SetAnnotationAccessResponse
"""
if( userId == None or annotationId == None or body == None ):
raise ApiException(400, "missing required parameters")
allParams = ['userId', 'annotationId', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method SetAnnotationAccess" % key)
params[key] = val
del params['kwargs']
resourcePath = '/ant/{userId}/annotations/{annotationId}/annotationAccess'.replace('*', '')
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
if ('userId' in params):
replacement = | |
= pool_shape
rs, cs = pool_stride
assert pr > 0
assert pc > 0
assert pr <= r
assert pc <= c
# Compute index in pooled space of last needed pool
# (needed = each input pixel must appear in at least one pool)
def last_pool(im_shp, p_shp, p_strd):
rval = int(np.ceil(float(im_shp - p_shp) / p_strd))
assert p_strd * rval + p_shp >= im_shp
assert p_strd * (rval - 1) + p_shp < im_shp
return rval
# Compute starting row of the last pool
last_pool_r = last_pool(image_shape[0],
pool_shape[0],
pool_stride[0]) * pool_stride[0]
# Compute number of rows needed in image for all indexes to work out
required_r = last_pool_r + pr
last_pool_c = last_pool(image_shape[1],
pool_shape[1],
pool_stride[1]) * pool_stride[1]
required_c = last_pool_c + pc
for c01bv in get_debug_values(c01b):
assert not contains_inf(c01bv)
assert c01bv.shape[1] == r
assert c01bv.shape[2] == c
wide_infinity = T.alloc(-np.inf,
c01b.shape[0],
required_r,
required_c,
c01b.shape[3])
name = c01b.name
if name is None:
name = 'anon_bc01'
c01b = T.set_subtensor(wide_infinity[:, 0:r, 0:c, :], c01b)
c01b.name = 'infinite_padded_' + name
for row_within_pool in xrange(pool_shape[0]):
row_stop = last_pool_r + row_within_pool + 1
for col_within_pool in xrange(pool_shape[1]):
col_stop = last_pool_c + col_within_pool + 1
cur = c01b[:,
row_within_pool:row_stop:rs,
col_within_pool:col_stop:cs,
:]
cur.name = ('max_pool_cur_' + c01b.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
if mx is None:
mx = cur
else:
mx = T.maximum(mx, cur)
mx.name = ('max_pool_mx_' + c01b.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
mx.name = 'max_pool(' + name + ')'
for mxv in get_debug_values(mx):
assert isfinite(mxv)
return mx
def mean_pool(bc01, pool_shape, pool_stride, image_shape):
"""
Does mean pooling (aka average pooling) via a Theano graph.
Parameters
----------
bc01 : theano tensor
minibatch in format (batch size, channels, rows, cols)
pool_shape : tuple
shape of the pool region (rows, cols)
pool_stride : tuple
strides between pooling regions (row stride, col stride)
image_shape : tuple
(rows, cols) tuple to avoid doing some arithmetic in theano
Returns
-------
pooled : theano tensor
The output of pooling applied to `bc01`
See Also
--------
max_pool : Same thing but with max pooling
Examples
--------
>>> import theano
>>> import theano.tensor as T
>>> from pylearn2.models.mlp import mean_pool
>>> import numpy as np
>>> t = np.array([[1, 1, 3, 3],
... [1, 1, 3, 3],
... [5, 5, 7, 7],
... [5, 5, 7, 7],
... [9, 9, 11, 11],
... [9, 9, 11, 11]])
>>> X = np.zeros((3, t.shape[0], t.shape[1]))
>>> X[:] = t
>>> X = X[np.newaxis]
>>> X_sym = T.tensor4('X')
>>> pool_it = mean_pool(X_sym, pool_shape=(2, 2), pool_stride=(2, 2),
... image_shape=(6, 4))
>>> f = theano.function(inputs=[X_sym], outputs=pool_it)
This will pool over over windows of size (2, 2) while also stepping by this
same amount, shrinking the examples input to [[1, 3], [5, 7], [9, 11]].
"""
mx = None
r, c = image_shape
pr, pc = pool_shape
rs, cs = pool_stride
# Compute index in pooled space of last needed pool
# (needed = each input pixel must appear in at least one pool)
def last_pool(im_shp, p_shp, p_strd):
rval = int(np.ceil(float(im_shp - p_shp) / p_strd))
assert p_strd * rval + p_shp >= im_shp
assert p_strd * (rval - 1) + p_shp < im_shp
return rval
# Compute starting row of the last pool
last_pool_r = last_pool(image_shape[0],
pool_shape[0],
pool_stride[0]) * pool_stride[0]
# Compute number of rows needed in image for all indexes to work out
required_r = last_pool_r + pr
last_pool_c = last_pool(image_shape[1],
pool_shape[1],
pool_stride[1]) * pool_stride[1]
required_c = last_pool_c + pc
for bc01v in get_debug_values(bc01):
assert not contains_inf(bc01v)
assert bc01v.shape[2] == image_shape[0]
assert bc01v.shape[3] == image_shape[1]
wide_infinity = T.alloc(-np.inf,
bc01.shape[0],
bc01.shape[1],
required_r,
required_c)
name = bc01.name
if name is None:
name = 'anon_bc01'
bc01 = T.set_subtensor(wide_infinity[:, :, 0:r, 0:c], bc01)
bc01.name = 'infinite_padded_' + name
# Create a 'mask' used to keep count of the number of elements summed for
# each position
wide_infinity_count = T.alloc(0, bc01.shape[0], bc01.shape[1], required_r,
required_c)
bc01_count = T.set_subtensor(wide_infinity_count[:, :, 0:r, 0:c], 1)
for row_within_pool in xrange(pool_shape[0]):
row_stop = last_pool_r + row_within_pool + 1
for col_within_pool in xrange(pool_shape[1]):
col_stop = last_pool_c + col_within_pool + 1
cur = bc01[:,
:,
row_within_pool:row_stop:rs,
col_within_pool:col_stop:cs]
cur.name = ('mean_pool_cur_' + bc01.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
cur_count = bc01_count[:,
:,
row_within_pool:row_stop:rs,
col_within_pool:col_stop:cs]
if mx is None:
mx = cur
count = cur_count
else:
mx = mx + cur
count = count + cur_count
mx.name = ('mean_pool_mx_' + bc01.name + '_' +
str(row_within_pool) + '_' + str(col_within_pool))
mx /= count
mx.name = 'mean_pool(' + name + ')'
for mxv in get_debug_values(mx):
assert isfinite(mxv)
return mx
@wraps(_WD)
def WeightDecay(*args, **kwargs):
warnings.warn("pylearn2.models.mlp.WeightDecay has moved to "
"pylearn2.costs.mlp.WeightDecay. This link"
"may be removed after 2015-05-13.")
return _WD(*args, **kwargs)
@wraps(_L1WD)
def L1WeightDecay(*args, **kwargs):
warnings.warn("pylearn2.models.mlp.L1WeightDecay has moved to "
"pylearn2.costs.mlp.WeightDecay. This link"
"may be removed after 2015-05-13.")
return _L1WD(*args, **kwargs)
class LinearGaussian(Linear):
"""
A Linear layer augmented with a precision vector, for modeling
conditionally Gaussian data.
Specifically, given an input x, this layer models the distrbution over
the output as
y ~ p(y | x) = N(y | Wx + b, beta^-1)
i.e., y is conditionally Gaussian with mean Wx + b and variance
beta^-1.
beta is a diagonal precision matrix so beta^-1 is a diagonal covariance
matrix.
Internally, beta is stored as the vector of diagonal values on this
matrix.
Since the output covariance is not a function of the input, this does
not provide an example-specific estimate of the error in the mean.
However, the vector-valued beta does mean that maximizing log p(y | x)
will reweight the mean squared error so that variables that can be
estimated easier will receive a higher penalty. This is one way of
adapting the model better to heterogenous data.
Parameters
----------
init_beta : float or ndarray
Any value > 0 that can be broadcasted to a vector of shape (dim, ).
The elements of beta are initialized to this value.
A good value is often the precision (inverse variance) of the target
variables in the training set, as provided by the
`beta_from_targets` function. This is the optimal beta for a dummy
model that just predicts the mean target value from the training set.
min_beta : float
The elements of beta are constrained to be >= this value.
This value must be > 0., otherwise the output conditional is not
constrained to be a valid probability distribution.
A good value is often the precision (inverse variance) of the target
variables in the training set, as provided by the
`beta_from_targets` function. This is the optimal beta for a dummy
model that just predicts the mean target value from the training set.
A trained model should always be able to obtain at least this much
precision, at least on the training set.
max_beta : float
The elements of beta are constrained to be <= this value.
We impose this constraint because for problems
where the training set values can be predicted
exactly, beta can grow without bound, which also makes the
gradients grow without bound, resulting in numerical problems.
kwargs : dict
Arguments to the `Linear` superclass.
"""
def __init__(self, init_beta, min_beta, max_beta, beta_lr_scale, **kwargs):
super(LinearGaussian, self).__init__(**kwargs)
self.__dict__.update(locals())
del self.self
del self.kwargs
@wraps(Layer.set_input_space)
def set_input_space(self, space):
super(LinearGaussian, self).set_input_space(space)
assert isinstance(self.output_space, VectorSpace)
self.beta = sharedX(self.output_space.get_origin() + self.init_beta,
'beta')
@wraps(Layer.get_layer_monitoring_channels)
def get_layer_monitoring_channels(self, state_below=None,
state=None, targets=None):
rval = super(LinearGaussian,
self).get_layer_monitoring_channels(state_below,
state,
targets)
assert isinstance(rval, OrderedDict)
rval['beta_min'] = self.beta.min()
rval['beta_mean'] = self.beta.mean()
rval['beta_max'] = self.beta.max()
if targets:
rval['mse'] = T.sqr(state - targets).mean()
return rval
@wraps(Linear.cost)
def cost(self, Y, Y_hat):
return (0.5 * T.dot(T.sqr(Y - Y_hat), self.beta).mean() -
0.5 * T.log(self.beta).sum())
@wraps(Linear.cost_matrix)
def cost_matrix(self, Y, Y_hat):
return 0.5 * T.sqr(Y - Y_hat) * self.beta - 0.5 * T.log(self.beta)
@wraps(Layer._modify_updates)
def _modify_updates(self, updates):
super(LinearGaussian, self)._modify_updates(updates)
if self.beta in updates:
updates[self.beta] = T.clip(updates[self.beta],
self.min_beta,
self.max_beta)
@wraps(Layer.get_lr_scalers)
def get_lr_scalers(self):
rval = super(LinearGaussian, self).get_lr_scalers()
if self.beta_lr_scale is not None:
rval[self.beta] = self.beta_lr_scale
return rval
@wraps(Layer.get_params)
def get_params(self):
return super(LinearGaussian, | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for Project
"""
#Import Local Modules
import marvin
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.integration.lib.utils import *
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
from marvin.remoteSSHClient import remoteSSHClient
import datetime
class Services:
"""Test Project Services
"""
def __init__(self):
self.services = {
"domain": {
"name": "Domain",
},
"project": {
"name": "Project",
"displaytext": "Test project",
},
"mgmt_server": {
"ipaddress": '192.168.100.21',
"username": 'root',
"password": 'password',
"port": 22,
},
"account": {
"email": "<EMAIL>",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"user": {
"email": "<EMAIL>",
"firstname": "User",
"lastname": "User",
"username": "User",
# Random characters are appended for unique
# username
"password": "password",
},
"disk_offering": {
"displaytext": "Tiny Disk Offering",
"name": "Tiny Disk Offering",
"disksize": 1
},
"volume": {
"diskname": "Test Volume",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 64, # In MBs
},
"virtual_machine": {
"displayname": "Test VM",
"username": "root",
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
# Hypervisor type should be same as
# hypervisor type of cluster
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"ostypeid": '01853327-513e-4508-9628-f1f55db1946f',
# Cent OS 5.3 (64 bit)
"sleep": 60,
"timeout": 10,
"mode": 'advanced'
}
class TestMultipleProjectCreation(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestMultipleProjectCreation,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone
cls.zone = get_zone(cls.api_client, cls.services)
# Create domains, account etc.
cls.domain = get_domain(
cls.api_client,
cls.services
)
configs = Configurations.list(
cls.api_client,
name='project.invite.required'
)
if not isinstance(configs, list):
raise unittest.SkipTest("List configurations has no config: project.invite.required")
elif (configs[0].value).lower() != 'false':
raise unittest.SkipTest("'project.invite.required' should be set to false")
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.user = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls._cleanup = [cls.account, cls.user]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created accounts, domains etc
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "basic", "sg", "eip", "advancedns", "simulator"])
def test_01_create_multiple_projects_by_account(self):
""" Verify an account can own multiple projects and can belong to
multiple projects
"""
# Validate the following
# 1. Create multiple project. Verify at step 1 An account is allowed
# to create multiple projects
# 2. add one account to multiple project. Verify at step 2 an account
# is allowed to added to multiple project
# Create project as a domain admin
project_1 = Project.create(
self.apiclient,
self.services["project"],
account=self.account.account.name,
domainid=self.account.account.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project_1)
self.debug("Created project with domain admin with ID: %s" %
project_1.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project_1.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project_1.name,
list_project.name,
"Check project name from list response"
)
# Create another project as a domain admin
project_2 = Project.create(
self.apiclient,
self.services["project"],
account=self.account.account.name,
domainid=self.account.account.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project_2)
self.debug("Created project with domain user with ID: %s" %
project_2.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project_2.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
# Add user to the project
project_1.addAccount(
self.apiclient,
self.user.account.name,
self.user.account.email
)
# listProjectAccount to verify the user is added to project or not
accounts_reponse = Project.listAccounts(
self.apiclient,
projectid=project_1.id,
account=self.user.account.name,
)
self.debug(accounts_reponse)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.role,
'Regular',
"Newly added user is not added as a regular user"
)
# Add user to the project
project_2.addAccount(
self.apiclient,
self.user.account.name,
self.user.account.email
)
# listProjectAccount to verify the user is added to project or not
accounts_reponse = Project.listAccounts(
self.apiclient,
projectid=project_2.id,
account=self.user.account.name,
)
self.debug(accounts_reponse)
self.assertEqual(
isinstance(accounts_reponse, list),
True,
"Check for a valid list accounts response"
)
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
account = accounts_reponse[0]
self.assertEqual(
account.role,
'Regular',
"Newly added user is not added as a regular user"
)
return
class TestCrossDomainAccountAdd(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestCrossDomainAccountAdd,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone
cls.zone = get_zone(cls.api_client, cls.services)
cls.domain = get_domain(
cls.api_client,
cls.services
)
configs = Configurations.list(
cls.api_client,
name='project.invite.required'
)
if not isinstance(configs, list):
raise unittest.SkipTest("List configurations has no config: project.invite.required")
elif (configs[0].value).lower() != 'false':
raise unittest.SkipTest("'project.invite.required' should be set to false")
# Create domains, account etc.
cls.new_domain = Domain.create(
cls.api_client,
cls.services["domain"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.user = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.new_domain.id
)
cls._cleanup = [cls.account, cls.user]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created accounts, domains etc
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "basic", "sg", "eip", "advancedns", "simulator"])
def test_02_cross_domain_account_add(self):
""" Verify No cross domain projects
"""
# Validate the following
# 1. Create a project in a domain.
# 2. Add different domain account to the project. Add account should
# fail
# Create project as a domain admin
project = Project.create(
self.apiclient,
self.services["project"],
account=self.account.account.name,
domainid=self.account.account.domainid
)
# Cleanup created project at end of test
self.cleanup.append(project)
self.debug("Created project with domain admin with ID: %s" %
project.id)
list_projects_reponse = Project.list(
self.apiclient,
id=project.id,
listall=True
)
self.assertEqual(
isinstance(list_projects_reponse, list),
True,
"Check for a valid list projects response"
)
list_project = list_projects_reponse[0]
self.assertNotEqual(
len(list_projects_reponse),
0,
"Check list project response returns a valid project"
)
self.assertEqual(
project.name,
list_project.name,
"Check project name from list response"
)
self.debug("Adding user: %s from domain: %s to project: %s" % (
self.user.account.name,
self.user.account.domainid,
project.id
))
with self.assertRaises(Exception):
# Add user to the project from different domain
project.addAccount(
self.apiclient,
self.user.account.name
)
self.debug("User add to project failed!")
return
class TestDeleteAccountWithProject(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeleteAccountWithProject,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone
cls.zone = get_zone(cls.api_client, cls.services)
cls.domain = get_domain(
cls.api_client,
cls.services
)
configs = Configurations.list(
cls.api_client,
name='project.invite.required'
)
if not isinstance(configs, list):
raise unittest.SkipTest("List configurations has no config: project.invite.required")
elif (configs[0].value).lower() != 'false':
raise unittest.SkipTest("'project.invite.required' should be set to false")
# Create account
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls._cleanup = [cls.account]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created accounts, domains etc
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "basic", "sg", "eip", "advancedns", "simulator"])
def test_03_delete_account_with_project(self):
""" Test As long as the project exists, its owner can't be removed
"""
# Validate the following
# 1. Create a | |
<reponame>vaporydev/ddht
import itertools
import logging
import secrets
import time
from typing import List, Optional, Tuple
from async_service import Service
from eth_enr import ENRAPI, ENRDatabaseAPI
from eth_enr.exceptions import OldSequenceNumber
from eth_typing import NodeID
from eth_utils import encode_hex
from eth_utils.toolz import take
from mypy_extensions import TypedDict
import trio
from trio.abc import SendChannel
from ddht._utils import every
from ddht.base_message import (
AnyInboundMessage,
AnyOutboundMessage,
InboundMessage,
OutboundMessage,
)
from ddht.endpoint import Endpoint
from ddht.enr import partition_enrs
from ddht.exceptions import UnexpectedMessage
from ddht.kademlia import KademliaRoutingTable, compute_log_distance, iter_closest_nodes
from ddht.v5.abc import MessageDispatcherAPI
from ddht.v5.constants import (
FIND_NODE_RESPONSE_TIMEOUT,
LOOKUP_PARALLELIZATION_FACTOR,
LOOKUP_RETRY_THRESHOLD,
NODES_MESSAGE_PAYLOAD_SIZE,
REQUEST_RESPONSE_TIMEOUT,
ROUTING_TABLE_LOOKUP_INTERVAL,
ROUTING_TABLE_PING_INTERVAL,
)
from ddht.v5.endpoint_tracker import EndpointVote
from ddht.v5.messages import FindNodeMessage, NodesMessage, PingMessage, PongMessage
class BaseRoutingTableManagerComponent(Service):
"""Base class for services that participate in managing the routing table."""
logger = logging.getLogger(
"ddht.v5.routing_table_manager.BaseRoutingTableManagerComponent"
)
def __init__(
self,
local_node_id: NodeID,
routing_table: KademliaRoutingTable,
message_dispatcher: MessageDispatcherAPI,
enr_db: ENRDatabaseAPI,
) -> None:
self.local_node_id = local_node_id
self.routing_table = routing_table
self.message_dispatcher = message_dispatcher
self.enr_db = enr_db
def update_routing_table(self, node_id: NodeID) -> None:
"""
Update a peer's entry in the routing table.
This method should be called, whenever we receive a message from them.
"""
self.logger.debug("Updating %s in routing table", encode_hex(node_id))
self.routing_table.update(node_id)
def get_local_enr(self) -> ENRAPI:
"""Get the local enr from the ENR DB."""
try:
local_enr = self.enr_db.get_enr(self.local_node_id)
except KeyError:
raise ValueError(
f"Local ENR with node id {encode_hex(self.local_node_id)} not "
f"present in db"
)
else:
return local_enr
async def maybe_request_remote_enr(
self, inbound_message: AnyInboundMessage
) -> None:
"""Request the peers ENR if there is a newer version according to a ping or pong."""
if not isinstance(inbound_message.message, (PingMessage, PongMessage)):
raise TypeError(
f"Only ping and pong messages contain an ENR sequence number, got "
f"{inbound_message}"
)
try:
remote_enr = self.enr_db.get_enr(inbound_message.sender_node_id)
except KeyError:
self.logger.warning(
"No ENR of %s present in the database even though it should post handshake. "
"Requesting it now.",
encode_hex(inbound_message.sender_node_id),
)
request_update = True
else:
current_sequence_number = remote_enr.sequence_number
advertized_sequence_number = inbound_message.message.enr_seq
if current_sequence_number < advertized_sequence_number:
self.logger.debug(
"ENR advertized by %s is newer than ours (sequence number %d > %d)",
encode_hex(inbound_message.sender_node_id),
advertized_sequence_number,
current_sequence_number,
)
request_update = True
elif current_sequence_number == advertized_sequence_number:
self.logger.debug(
"ENR of %s is up to date (sequence number %d)",
encode_hex(inbound_message.sender_node_id),
advertized_sequence_number,
)
request_update = False
elif current_sequence_number > advertized_sequence_number:
self.logger.debug(
"Peer %s advertizes apparently outdated ENR (sequence number %d < %d)",
encode_hex(inbound_message.sender_node_id),
advertized_sequence_number,
current_sequence_number,
)
request_update = False
else:
raise Exception("Invariant: Unreachable")
if request_update:
await self.request_remote_enr(inbound_message)
async def request_remote_enr(self, inbound_message: AnyInboundMessage) -> None:
"""Request the ENR of the sender of an inbound message and store it in the ENR db."""
self.logger.debug(
"Requesting ENR from %s", encode_hex(inbound_message.sender_node_id)
)
find_nodes_message = FindNodeMessage(
request_id=self.message_dispatcher.get_free_request_id(
inbound_message.sender_node_id
),
distance=0, # request enr of the peer directly
)
try:
with trio.fail_after(REQUEST_RESPONSE_TIMEOUT):
response = await self.message_dispatcher.request(
inbound_message.sender_node_id,
find_nodes_message,
endpoint=inbound_message.sender_endpoint,
)
except trio.TooSlowError:
self.logger.debug(
"FindNode request to %s has timed out",
encode_hex(inbound_message.sender_node_id),
)
return
sender_node_id = response.sender_node_id
self.update_routing_table(sender_node_id)
if not isinstance(response.message, NodesMessage):
self.logger.debug(
"Peer %s responded to FindNode with %s instead of Nodes message",
encode_hex(sender_node_id),
response.message.__class__.__name__,
)
return
self.logger.debug("Received Nodes message from %s", encode_hex(sender_node_id))
if len(response.message.enrs) == 0:
self.logger.debug(
"Peer %s responded to FindNode with an empty Nodes message",
encode_hex(sender_node_id),
)
elif len(response.message.enrs) > 1:
self.logger.debug(
"Peer %s responded to FindNode with more than one ENR",
encode_hex(inbound_message.sender_node_id),
)
for enr in response.message.enrs:
if enr.node_id != sender_node_id:
self.logger.debug(
"Peer %s responded to FindNode with ENR from %s",
encode_hex(sender_node_id),
encode_hex(response.message.enrs[0].node_id),
)
self.enr_db.set_enr(enr)
class PingHandlerService(BaseRoutingTableManagerComponent):
"""Responds to Pings with Pongs and requests ENR updates."""
logger = logging.getLogger("ddht.v5.routing_table_manager.PingHandlerService")
def __init__(
self,
local_node_id: NodeID,
routing_table: KademliaRoutingTable,
message_dispatcher: MessageDispatcherAPI,
enr_db: ENRDatabaseAPI,
outbound_message_send_channel: SendChannel[OutboundMessage[PongMessage]],
) -> None:
super().__init__(local_node_id, routing_table, message_dispatcher, enr_db)
self.outbound_message_send_channel = outbound_message_send_channel
async def run(self) -> None:
channel_handler_subscription = self.message_dispatcher.add_request_handler(
PingMessage
)
async with channel_handler_subscription:
async for inbound_message in channel_handler_subscription:
self.logger.debug(
"Handling %s from %s",
inbound_message,
encode_hex(inbound_message.sender_node_id),
)
self.update_routing_table(inbound_message.sender_node_id)
await self.respond_with_pong(inbound_message)
self.manager.run_task(self.maybe_request_remote_enr, inbound_message)
async def respond_with_pong(
self, inbound_message: InboundMessage[PingMessage]
) -> None:
if not isinstance(inbound_message.message, PingMessage):
raise TypeError(
f"Can only respond with Pong to Ping, not "
f"{inbound_message.message.__class__.__name__}"
)
local_enr = self.get_local_enr()
pong = PongMessage(
request_id=inbound_message.message.request_id,
enr_seq=local_enr.sequence_number,
packet_ip=inbound_message.sender_endpoint.ip_address,
packet_port=inbound_message.sender_endpoint.port,
)
outbound_message = inbound_message.to_response(pong)
self.logger.debug(
"Responding with Pong to %s", encode_hex(outbound_message.receiver_node_id)
)
await self.outbound_message_send_channel.send(outbound_message)
class FindNodeHandlerService(BaseRoutingTableManagerComponent):
"""Responds to FindNode with Nodes messages."""
logger = logging.getLogger("ddht.v5.routing_table_manager.FindNodeHandlerService")
def __init__(
self,
local_node_id: NodeID,
routing_table: KademliaRoutingTable,
message_dispatcher: MessageDispatcherAPI,
enr_db: ENRDatabaseAPI,
outbound_message_send_channel: SendChannel[OutboundMessage[NodesMessage]],
) -> None:
super().__init__(local_node_id, routing_table, message_dispatcher, enr_db)
self.outbound_message_send_channel = outbound_message_send_channel
async def run(self) -> None:
handler_subscription = self.message_dispatcher.add_request_handler(
FindNodeMessage
)
async with handler_subscription:
async for inbound_message in handler_subscription:
self.update_routing_table(inbound_message.sender_node_id)
if not isinstance(inbound_message.message, FindNodeMessage):
raise TypeError(
f"Received {inbound_message.__class__.__name__} from message dispatcher "
f"even though we subscribed to FindNode messages"
)
if inbound_message.message.distance == 0:
await self.respond_with_local_enr(inbound_message)
else:
await self.respond_with_remote_enrs(inbound_message)
async def respond_with_local_enr(
self, inbound_message: InboundMessage[FindNodeMessage]
) -> None:
"""Send a Nodes message containing the local ENR in response to an inbound message."""
local_enr = self.get_local_enr()
nodes_message = NodesMessage(
request_id=inbound_message.message.request_id, total=1, enrs=(local_enr,)
)
outbound_message = inbound_message.to_response(nodes_message)
self.logger.debug(
"Responding to %s with Nodes message containing local ENR",
inbound_message.sender_endpoint,
)
await self.outbound_message_send_channel.send(outbound_message)
async def respond_with_remote_enrs(
self, inbound_message: InboundMessage[FindNodeMessage]
) -> None:
"""Send a Nodes message containing ENRs of peers at a given node distance."""
node_ids = self.routing_table.get_nodes_at_log_distance(
inbound_message.message.distance
)
enrs = []
for node_id in node_ids:
try:
enr = self.enr_db.get_enr(node_id)
except KeyError:
self.logger.debug("Missing ENR for node %s", encode_hex(node_id))
else:
enrs.append(enr)
enr_partitions = partition_enrs(enrs, NODES_MESSAGE_PAYLOAD_SIZE) or ((),)
self.logger.debug(
"Responding to %s with %d Nodes message containing %d ENRs at distance %d",
inbound_message.sender_endpoint,
len(enr_partitions),
len(enrs),
inbound_message.message.distance,
)
for partition in enr_partitions:
nodes_message = NodesMessage(
request_id=inbound_message.message.request_id,
total=len(enr_partitions),
enrs=partition,
)
outbound_message = inbound_message.to_response(nodes_message)
await self.outbound_message_send_channel.send(outbound_message)
class PingSenderService(BaseRoutingTableManagerComponent):
"""Regularly sends pings to peers to check if they are still alive or not."""
logger = logging.getLogger("ddht.v5.routing_table_manager.PingSenderService")
def __init__(
self,
local_node_id: NodeID,
routing_table: KademliaRoutingTable,
message_dispatcher: MessageDispatcherAPI,
enr_db: ENRDatabaseAPI,
endpoint_vote_send_channel: SendChannel[EndpointVote],
) -> None:
super().__init__(local_node_id, routing_table, message_dispatcher, enr_db)
self.endpoint_vote_send_channel = endpoint_vote_send_channel
async def run(self) -> None:
async for _ in every(ROUTING_TABLE_PING_INTERVAL): # noqa: F841
if not self.routing_table.is_empty:
log_distance = (
self.routing_table.get_least_recently_updated_log_distance()
)
candidates = self.routing_table.get_nodes_at_log_distance(log_distance)
node_id = candidates[-1]
self.logger.debug("Pinging %s", encode_hex(node_id))
await self.ping(node_id)
else:
self.logger.debug("Routing table is empty, no one to ping")
async def ping(self, node_id: NodeID) -> None:
local_enr = self.get_local_enr()
ping = PingMessage(
request_id=self.message_dispatcher.get_free_request_id(node_id),
enr_seq=local_enr.sequence_number,
)
try:
with trio.fail_after(REQUEST_RESPONSE_TIMEOUT):
inbound_message = await self.message_dispatcher.request(node_id, ping)
except ValueError as value_error:
self.logger.debug(
"Failed to send ping to %s: %s", encode_hex(node_id), value_error
)
except trio.TooSlowError:
self.logger.debug("Ping to %s timed out", encode_hex(node_id))
else:
if not isinstance(inbound_message.message, PongMessage):
self.logger.debug(
"Peer %s responded to Ping with %s instead of Pong",
encode_hex(node_id),
inbound_message.message.__class__.__name__,
)
else:
self.logger.debug("Received Pong from %s", encode_hex(node_id))
self.update_routing_table(node_id)
pong = inbound_message.message
local_endpoint = Endpoint(
ip_address=pong.packet_ip, port=pong.packet_port
)
endpoint_vote = EndpointVote(
endpoint=local_endpoint, node_id=node_id, timestamp=time.monotonic()
)
await self.endpoint_vote_send_channel.send(endpoint_vote)
await self.maybe_request_remote_enr(inbound_message)
class LookupService(BaseRoutingTableManagerComponent):
"""Performs recursive lookups."""
logger = logging.getLogger("ddht.v5.routing_table_manager.LookupService")
async def run(self) -> None:
async for _ in every(ROUTING_TABLE_LOOKUP_INTERVAL):
target = NodeID(secrets.token_bytes(32))
await self.lookup(target)
async def lookup(self, target: NodeID) -> None:
self.logger.debug("Looking up %s", encode_hex(target))
queried_node_ids = set()
unresponsive_node_ids = set()
received_enrs: List[ENRAPI] = []
received_node_ids: List[NodeID] = []
async def lookup_and_store_response(peer: NodeID) -> None:
enrs = await self.lookup_at_peer(peer, target)
queried_node_ids.add(peer)
if enrs is not None:
for enr in enrs:
received_node_ids.append(enr.node_id)
try:
self.enr_db.set_enr(enr)
except OldSequenceNumber:
received_enrs.append(self.enr_db.get_enr(enr.node_id))
else:
received_enrs.append(enr)
else:
unresponsive_node_ids.add(peer)
for lookup_round_counter in itertools.count():
candidates = iter_closest_nodes(
target, self.routing_table, received_node_ids
)
responsive_candidates = itertools.dropwhile(
lambda node: node in unresponsive_node_ids, candidates
)
closest_k_candidates = take(
self.routing_table.bucket_size, responsive_candidates
)
closest_k_unqueried_candidates = (
candidate
for candidate in closest_k_candidates
if candidate not in queried_node_ids
)
nodes_to_query = tuple(
take(LOOKUP_PARALLELIZATION_FACTOR, closest_k_unqueried_candidates)
)
if nodes_to_query:
self.logger.debug(
"Starting lookup round %d for %s",
lookup_round_counter + 1,
encode_hex(target),
)
async with trio.open_nursery() as nursery:
for peer in nodes_to_query:
nursery.start_soon(lookup_and_store_response, peer)
else:
self.logger.debug(
"Lookup for %s finished in %d rounds",
encode_hex(target),
lookup_round_counter,
)
break
async def lookup_at_peer(
self, peer: NodeID, target: NodeID
) -> Optional[Tuple[ENRAPI, ...]]:
self.logger.debug(
"Looking up %s at node %s", encode_hex(target), encode_hex(peer)
)
distance = compute_log_distance(peer, target)
first_attempt = await self.request_nodes(peer, target, distance)
if first_attempt is None:
self.logger.debug("Lookup with node %s failed", encode_hex(peer))
return None
elif len(first_attempt) >= LOOKUP_RETRY_THRESHOLD:
self.logger.debug(
"Node %s responded with %d nodes with single attempt",
encode_hex(peer),
len(first_attempt),
)
return first_attempt
else:
second_attempt = await self.request_nodes(peer, target, distance)
both_attempts = first_attempt + (second_attempt or ())
self.logger.debug(
"Node %s responded with %d nodes in two attempts",
encode_hex(peer),
len(both_attempts),
)
return both_attempts
async def | |
OoO0O00
if 58 - 58: O0
II1IIiiI1 = II1IIiiI1 . split ( "\n" ) [ 0 ]
OoO0o0OOOO = II1IIiiI1 . split ( ) [ - 1 ]
if 91 - 91: iII111i / I1ii11iIi11i . iII111i - o0oOOo0O0Ooo + I1ii11iIi11i
IiiIIi1 = ""
O00 = lisp_is_macos ( )
if ( O00 ) :
II1IIiiI1 = commands . getoutput ( "ifconfig {} | egrep 'inet '" . format ( OoO0o0OOOO ) )
if ( II1IIiiI1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
else :
ooO0ooooO = 'ip addr show | egrep "inet " | egrep "{}"' . format ( OoO0o0OOOO )
II1IIiiI1 = commands . getoutput ( ooO0ooooO )
if ( II1IIiiI1 == "" ) :
ooO0ooooO = 'ip addr show | egrep "inet " | egrep "global lo"'
II1IIiiI1 = commands . getoutput ( ooO0ooooO )
if 86 - 86: ooOoO0o
if ( II1IIiiI1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
if 51 - 51: OoO0O00 - i11iIiiIii * I1IiiI
if 95 - 95: OOooOOo % I1ii11iIi11i + o0oOOo0O0Ooo % ooOoO0o
if 36 - 36: O0 / i1IIi % II111iiii / iII111i
if 96 - 96: Oo0Ooo / oO0o . II111iiii . Oo0Ooo
if 91 - 91: II111iiii . OOooOOo + o0oOOo0O0Ooo
if 8 - 8: OOooOOo * Oo0Ooo / iII111i - OoO0O00 - OoooooooOO
IiiIIi1 = ""
II1IIiiI1 = II1IIiiI1 . split ( "\n" )
if 100 - 100: oO0o . iIii1I11I1II1 . iIii1I11I1II1
for oOOo0ooO0 in II1IIiiI1 :
OO0o = oOOo0ooO0 . split ( ) [ 1 ]
if ( O00 == False ) : OO0o = OO0o . split ( "/" ) [ 0 ]
ii1i1II11II1i = lisp_address ( LISP_AFI_IPV4 , OO0o , 32 , 0 )
return ( ii1i1II11II1i )
if 95 - 95: I11i + o0oOOo0O0Ooo * I1ii11iIi11i
return ( lisp_address ( LISP_AFI_IPV4 , IiiIIi1 , 32 , 0 ) )
if 85 - 85: i11iIiiIii . OoooooooOO - iIii1I11I1II1
if 38 - 38: I11i . I11i * oO0o / OoooooooOO % ooOoO0o
if 80 - 80: OoO0O00 / IiII * I1IiiI % IiII
if 95 - 95: O0 / I11i . I1Ii111
if 17 - 17: I11i
if 56 - 56: ooOoO0o * o0oOOo0O0Ooo + I11i
if 48 - 48: IiII * OoO0O00 % I1Ii111 - I11i
if 72 - 72: i1IIi % ooOoO0o % IiII % oO0o - oO0o
if 97 - 97: o0oOOo0O0Ooo * O0 / o0oOOo0O0Ooo * OoO0O00 * Oo0Ooo
if 38 - 38: I1Ii111
if 25 - 25: iIii1I11I1II1 % II111iiii / I11i / I1ii11iIi11i
def lisp_get_local_addresses ( ) :
global lisp_myrlocs
if 22 - 22: oO0o * iII111i
if 4 - 4: OoOoOO00 - oO0o + I1IiiI
if 36 - 36: IiII
if 19 - 19: OoOoOO00 . o0oOOo0O0Ooo . OoooooooOO
if 13 - 13: OOooOOo . Oo0Ooo / II111iiii
if 43 - 43: iIii1I11I1II1 % OoO0O00
if 84 - 84: Oo0Ooo
if 44 - 44: OoooooooOO * i11iIiiIii / Oo0Ooo
if 75 - 75: OoooooooOO . OOooOOo + OoO0O00 / Ii1I - I1IiiI % Ii1I
if 89 - 89: iII111i * iIii1I11I1II1 + i11iIiiIii . OoooooooOO
O0O0 = None
ooo = 1
oO0oo = os . getenv ( "LISP_ADDR_SELECT" )
if ( oO0oo != None and oO0oo != "" ) :
oO0oo = oO0oo . split ( ":" )
if ( len ( oO0oo ) == 2 ) :
O0O0 = oO0oo [ 0 ]
ooo = oO0oo [ 1 ]
else :
if ( oO0oo [ 0 ] . isdigit ( ) ) :
ooo = oO0oo [ 0 ]
else :
O0O0 = oO0oo [ 0 ]
if 52 - 52: IiII % ooOoO0o
if 25 - 25: I11i / I11i % OoooooooOO - I1ii11iIi11i * oO0o
ooo = 1 if ( ooo == "" ) else int ( ooo )
if 23 - 23: i11iIiiIii
if 100 - 100: oO0o + O0 . I1IiiI + i1IIi - OoOoOO00 + o0oOOo0O0Ooo
ooOOo = [ None , None , None ]
i1 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
iii1IiiiI1i1 = lisp_address ( LISP_AFI_IPV6 , "" , 128 , 0 )
IIIiI1i1 = None
if 13 - 13: OOooOOo * I11i / O0 * o0oOOo0O0Ooo
for OoO0o0OOOO in netifaces . interfaces ( ) :
if ( O0O0 != None and O0O0 != OoO0o0OOOO ) : continue
IIiiI = netifaces . ifaddresses ( OoO0o0OOOO )
if ( IIiiI == { } ) : continue
if 35 - 35: i1IIi * i11iIiiIii % I1ii11iIi11i / IiII / IiII
if 91 - 91: OoO0O00 * I1Ii111 % OoO0O00 . o0oOOo0O0Ooo * I1ii11iIi11i . OOooOOo
if 13 - 13: I1ii11iIi11i
if 80 - 80: Oo0Ooo % IiII % OoooooooOO * Oo0Ooo % Ii1I
IIIiI1i1 = lisp_get_interface_instance_id ( OoO0o0OOOO , None )
if 41 - 41: OoooooooOO / i1IIi
if 70 - 70: OoOoOO00 % o0oOOo0O0Ooo % i1IIi / I1ii11iIi11i % i11iIiiIii / i1IIi
if 4 - 4: IiII
if 93 - 93: oO0o % i1IIi
if ( IIiiI . has_key ( netifaces . AF_INET ) ) :
IIi1 = IIiiI [ netifaces . AF_INET ]
OO = 0
for IiiIIi1 in IIi1 :
i1 . store_address ( IiiIIi1 [ "addr" ] )
if ( i1 . is_ipv4_loopback ( ) ) : continue
if ( i1 . is_ipv4_link_local ( ) ) : continue
if ( i1 . address == 0 ) : continue
OO += 1
i1 . instance_id = IIIiI1i1
if ( O0O0 == None and
lisp_db_for_lookups . lookup_cache ( i1 , False ) ) : continue
ooOOo [ 0 ] = i1
if ( OO == ooo ) : break
if 61 - 61: I11i . I11i - OoO0O00
if 62 - 62: iII111i . iII111i
if ( IIiiI . has_key ( netifaces . AF_INET6 ) ) :
OoO0oO = IIiiI [ netifaces . AF_INET6 ]
OO = 0
for IiiIIi1 in OoO0oO :
oo0o00OO = IiiIIi1 [ "addr" ]
iii1IiiiI1i1 . store_address ( oo0o00OO )
if ( iii1IiiiI1i1 . is_ipv6_string_link_local ( oo0o00OO ) ) : continue
if ( iii1IiiiI1i1 . is_ipv6_loopback ( ) ) : continue
OO += 1
iii1IiiiI1i1 . instance_id = IIIiI1i1
if ( O0O0 == None and
lisp_db_for_lookups . lookup_cache ( iii1IiiiI1i1 , False ) ) : continue
ooOOo [ 1 ] = iii1IiiiI1i1
if ( OO == ooo ) : break
if 22 - 22: ooOoO0o / ooOoO0o - Ii1I % I11i . OOooOOo + IiII
if 64 - 64: i1IIi % I1ii11iIi11i / Ii1I % OoooooooOO
if 24 - 24: I1Ii111 + OoooooooOO . IiII / OoOoOO00 / I11i
if 65 - 65: OoooooooOO
if 18 - 18: O0 - i1IIi . I1Ii111
if 98 - 98: o0oOOo0O0Ooo
if ( ooOOo [ 0 ] == None ) : continue
if 73 - 73: Oo0Ooo - iII111i . oO0o % i1IIi . O0
ooOOo [ 2 ] = OoO0o0OOOO
break
if 15 - 15: ooOoO0o . iIii1I11I1II1 * I1IiiI % I11i
if 21 - 21: OoO0O00 - I1IiiI . OoooooooOO
Ii1iiI1i1 = ooOOo [ 0 ] . print_address_no_iid ( ) if ooOOo [ 0 ] else "none"
iIi = ooOOo [ 1 ] . print_address_no_iid ( ) if ooOOo [ 1 ] else "none"
OoO0o0OOOO = ooOOo [ 2 ] if ooOOo [ 2 ] else "none"
if 88 - 88: iII111i * OoooooooOO . iIii1I11I1II1
O0O0 = " (user selected)" if O0O0 != None else ""
if 11 - 11: oO0o + I1Ii111 . IiII * OoooooooOO - I1ii11iIi11i - OOooOOo
Ii1iiI1i1 = red ( Ii1iiI1i1 , False )
iIi = red ( iIi , False )
OoO0o0OOOO = bold ( OoO0o0OOOO , False )
lprint ( "Local addresses are | |
[variable for key in var_dict.keys() for variable in model_name.variables() if key == variable.name.split("_")[0]]
for var in selected_variables:
# Temporal dataframe in loop
temp_df = pd.DataFrame(columns=[
'SCENARIO',
'VAR_NAME',
'VAR_VALUE',
'REGION',
'REGION2',
'DAYTYPE',
'EMISSION',
'FUEL',
'DAILYTIMEBRACKET',
'SEASON',
'TIMESLICE',
'MODE_OF_OPERATION',
'STORAGE',
'TECHNOLOGY',
'YEAR',
'FLEXIBLEDEMANDTYPE'])
# Variable name
var_name = var.name.split("_")[0]
# Variable indices
var_concrete_indices_list = var.name.split("_")[1:]
# Variable abstract indices
var_abstract_indices_list = var_dict[var_name]
# Dictionary
abstract_dict = {key: "" for key in ["r", "rr", "ld", "e", "f", "lh", "ls", "l", "m", "s", "t", "y", "fdt"]} # default value: " "
concrete_dict = {key: value for key, value in zip(var_abstract_indices_list, var_concrete_indices_list)}
data_dict = {**abstract_dict, **concrete_dict} # Merge dictionaries
# Write data to temporary dataframe
temp_df.at[0, 'SCENARIO'] = scenario
temp_df.at[0, 'VAR_NAME'] = var.name.split("_")[0]
temp_df.at[0, 'VAR_VALUE'] = var.varValue
temp_df.at[0, 'REGION'] = data_dict["r"]
temp_df.at[0, 'REGION2'] = data_dict["rr"]
temp_df.at[0, 'DAYTYPE'] = data_dict["ld"]
temp_df.at[0, 'EMISSION'] = data_dict["e"]
temp_df.at[0, 'FUEL'] = data_dict["f"]
temp_df.at[0, 'DAILYTIMEBRACKET'] = data_dict["lh"]
temp_df.at[0, 'SEASON'] = data_dict["ls"]
temp_df.at[0, 'TIMESLICE'] = data_dict["l"]
temp_df.at[0, 'MODE_OF_OPERATION'] = data_dict["m"]
temp_df.at[0, 'STORAGE'] = data_dict["s"]
temp_df.at[0, 'TECHNOLOGY'] = data_dict["t"]
temp_df.at[0, 'YEAR'] = data_dict["y"]
temp_df.at[0, 'FLEXIBLEDEMANDTYPE'] = data_dict["fdt"]
df = pd.concat([df, temp_df])
return df
def saveResults(dataframe, fileDir, fileName):
"""
This function saves all results to an Excel file.
"""
_df = dataframe
# Shorten abstract variable names to keep Excel worksheet name limit of 31 characters
_df['VAR_NAME'].replace(
regex={'Total': 'Tot', 'Annual': 'Ann', 'Technology': 'Tech', 'Discounted': 'Disc', 'Production': 'Prod'},
inplace=True)
name_list = _df['VAR_NAME'].unique()
dataframe_list = [_df[_df['VAR_NAME'] == str(name)] for name in name_list]
if not os.path.exists(fileDir):
os.makedirs(fileDir)
writer = pd.ExcelWriter(os.path.join(fileDir, fileName))
for d, name in zip(dataframe_list, name_list):
d.to_excel(writer, sheet_name=name, index=False)
writer.save()
return
# ----------------------------------------------------------------------------------------------------------------------
# LOAD DATA
# ----------------------------------------------------------------------------------------------------------------------
inputPath = os.path.join(inputDir, inputFile)
sets_df, p_df, p_default_df, mcs_df, mcs_num = loadData(inputPath, sheetSets, sheetParams, sheetParamsDefault, sheetMcs, sheetMcsNum)
mcs_parameters = mcs_df['PARAM'].unique() # list of parameters to be included in monte carlo simulation
logging.info("{}\tData is loaded.".format(dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
# ----------------------------------------------------------------------------------------------------------------------
# SETS
# ----------------------------------------------------------------------------------------------------------------------
REGION = [r for r in sets_df['REGION'] if r != 'nan']
REGION2 = [rr for rr in sets_df['REGION2'] if rr != 'nan']
DAYTYPE = [str(int(float(ld))) for ld in sets_df['DAYTYPE'] if ld != 'nan']
EMISSION = [e for e in sets_df['EMISSION'] if e != 'nan']
FUEL = [f for f in sets_df['FUEL'] if f != 'nan']
DAILYTIMEBRACKET = [str(int(float(lh))) for lh in sets_df['DAILYTIMEBRACKET'] if lh != 'nan']
SEASON = [str(int(float(ls))) for ls in sets_df['SEASON'] if ls != 'nan']
TIMESLICE = [l for l in sets_df['TIMESLICE'] if l != 'nan']
MODE_OF_OPERATION = [str(int(float(m))) for m in sets_df['MODE_OF_OPERATION'] if m != 'nan']
STORAGE = [s for s in sets_df['STORAGE'] if s != 'nan']
TECHNOLOGY = [t for t in sets_df['TECHNOLOGY'] if t != 'nan']
YEAR = [str(int(float(y))) for y in sets_df['YEAR'] if y != 'nan']
FLEXIBLEDEMANDTYPE = [fdt for fdt in sets_df['FLEXIBLEDEMANDTYPE'] if fdt != 'nan']
logging.info("{}\tSets are created.".format(dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
# ----------------------------------------------------------------------------------------------------------------------
# PARAMETERS AND DATA
# ----------------------------------------------------------------------------------------------------------------------
######## Global #########
# YearSplit
YearSplit = p_df[p_df['PARAM'] == "YearSplit"][['TIMESLICE', 'YEAR', 'VALUE']].groupby('TIMESLICE')\
.apply(lambda df: df.set_index('YEAR')['VALUE'].to_dict()).to_dict()
# DiscountRate
DiscountRate_default_value = p_default_df[p_default_df['PARAM'] == "DiscountRate"].VALUE.iat[0]
DiscountRate_specified = tuple([(str(r)) for r in p_df[p_df['PARAM'] == "DiscountRate"].REGION])
DiscountRate = {str(r): p_df[(p_df['PARAM'] == "DiscountRate") & (p_df['REGION'] == r)].VALUE.iat[0]\
if (str(r)) in DiscountRate_specified else DiscountRate_default_value for r in REGION}
# DaySplit
DaySplit_default_value = p_default_df[p_default_df['PARAM'] == "DaySplit"].VALUE.iat[0]
DaySplit_specified = tuple([(str(lh), str(y)) for lh, y in zip(
p_df[p_df['PARAM'] == "DaySplit"].DAILYTIMEBRACKET, p_df[p_df['PARAM'] == "DaySplit"].YEAR)])
DaySplit = {str(lh): {str(y): p_df[(p_df['PARAM'] == "DaySplit") & (p_df['DAILYTIMEBRACKET'] == lh) & (p_df['YEAR'] == y)].VALUE.iat[0] if (str(lh), str(y)) in DaySplit_specified else DaySplit_default_value for y in YEAR} for lh in DAILYTIMEBRACKET}
# Conversionls
Conversionls_default_value = p_default_df[p_default_df['PARAM'] == "Conversionls"].VALUE.iat[0]
Conversionls_specified = tuple([(str(l), str(ls)) for l, ls in zip(p_df[p_df['PARAM'] == "Conversionls"].TIMESLICE, p_df[p_df['PARAM'] == "Conversionls"].SEASON)])
Conversionls = {str(l): {str(ls): p_df[(p_df['PARAM'] == "Conversionls") & (p_df['TIMESLICE'] == l) & (p_df['SEASON'] == ls)].VALUE.iat[0] if (str(l), str(ls)) in Conversionls_specified else Conversionls_default_value for ls in SEASON} for l in TIMESLICE}
# Conversionld
Conversionld_default_value = p_default_df[p_default_df['PARAM'] == "Conversionld"].VALUE.iat[0]
Conversionld_specified = tuple([(str(l), str(ld)) for l, ld in zip(p_df[p_df['PARAM'] == "Conversionld"].TIMESLICE, p_df[p_df['PARAM'] == "Conversionld"].DAYTYPE)])
Conversionld = {str(l): {str(ld): p_df[(p_df['PARAM'] == "Conversionld") & (p_df['TIMESLICE'] == l) & (p_df['DAYTYPE'] == ld)].VALUE.iat[0] if (str(l), str(ld)) in Conversionld_specified else Conversionld_default_value for ld in DAYTYPE} for l in TIMESLICE}
# Conversionlh
Conversionlh_default_value = p_default_df[p_default_df['PARAM'] == "Conversionlh"].VALUE.iat[0]
Conversionlh_specified = tuple([(str(l), str(lh)) for l, lh in zip(p_df[p_df['PARAM'] == "Conversionlh"].TIMESLICE, p_df[p_df['PARAM'] == "Conversionlh"].DAILYTIMEBRACKET)])
Conversionlh = {str(l): {str(lh): p_df[(p_df['PARAM'] == "Conversionlh") & (p_df['TIMESLICE'] == l) & (p_df['DAILYTIMEBRACKET'] == lh)].VALUE.iat[0] if (str(l), str(lh)) in Conversionlh_specified else Conversionlh_default_value for lh in DAILYTIMEBRACKET} for l in TIMESLICE}
# DaysInDayType
DaysInDayType_default_value = p_default_df[p_default_df['PARAM'] == "DaysInDayType"].VALUE.iat[0]
DaysInDayType_specified = tuple([(str(r),str(f),str(y)) for r, f, y in zip(p_df[p_df['PARAM'] == "DaysInDayType"].SEASON, p_df[p_df['PARAM'] == "DaysInDayType"].DAYTYPE, p_df[p_df['PARAM'] == "DaysInDayType"].YEAR)])
DaysInDayType = {str(ls): {str(ld): {str(y): p_df[(p_df['PARAM'] == "DaysInDayType") & (p_df['SEASON'] == ls) & (p_df['DAYTYPE'] == ld) & (p_df['YEAR'] == y)].VALUE.iat[0] if (str(ls),str(ld),str(y)) in DaysInDayType_specified else DaysInDayType_default_value for y in YEAR} for ld in DAYTYPE} for ls in SEASON}
# TradeRoute
TradeRoute_default_value = p_default_df[p_default_df['PARAM'] == "TradeRoute"].VALUE.iat[0]
TradeRoute_specified = tuple([(str(r),str(rr),str(f),str(y)) for r, rr, f, y in zip(p_df[p_df['PARAM'] == "TradeRoute"].REGION, p_df[p_df['PARAM'] == "TradeRoute"].REGION2, p_df[p_df['PARAM'] == "TradeRoute"].FUEL, p_df[p_df['PARAM'] == "TradeRoute"].YEAR)])
TradeRoute = {str(r): {str(rr): {str(f): {str(y): p_df[(p_df['PARAM'] == "TradeRoute") & (p_df['REGION'] == r) & (p_df['REGION2'] == rr) & (p_df['FUEL'] == f) & (p_df['YEAR'] == y)].VALUE.iat[0] if (str(r),str(rr),str(f),str(y)) in TradeRoute_specified else TradeRoute_default_value for y in YEAR} for f in FUEL} for rr in REGION2} for r in REGION}
# DepreciationMethod
DepreciationMethod_default_value = p_default_df[p_default_df['PARAM'] == "DepreciationMethod"].VALUE.iat[0]
DepreciationMethod_specified = tuple([(str(r)) for r in p_df[p_df['PARAM'] == "DepreciationMethod"].REGION])
DepreciationMethod = {str(r): p_df[(p_df['PARAM'] == "DepreciationMethod") & (p_df['REGION'] == r)].VALUE.iat[0] if (str(r)) in DepreciationMethod_specified else DepreciationMethod_default_value for r in REGION}
######## Demands #########
# SpecifiedAnnualDemand
SpecifiedAnnualDemand_default_value = p_default_df[p_default_df['PARAM'] == "SpecifiedAnnualDemand"].VALUE.iat[0]
SpecifiedAnnualDemand_specified = tuple([(str(r),str(f),str(y)) for r, f, y in zip(p_df[p_df['PARAM'] == "SpecifiedAnnualDemand"].REGION, p_df[p_df['PARAM'] == "SpecifiedAnnualDemand"].FUEL, p_df[p_df['PARAM'] == "SpecifiedAnnualDemand"].YEAR)])
SpecifiedAnnualDemand = {str(r): {str(f): {str(y): p_df[(p_df['PARAM'] == "SpecifiedAnnualDemand") & (p_df['REGION'] == r) & (p_df['FUEL'] == f) & (p_df['YEAR'] == y)].VALUE.iat[0] if (str(r),str(f),str(y)) in SpecifiedAnnualDemand_specified else SpecifiedAnnualDemand_default_value for y in YEAR} for f in FUEL} for r in REGION}
# SpecifiedDemandProfile
SpecifiedDemandProfile_default_value = p_default_df[p_default_df['PARAM'] == "SpecifiedDemandProfile"].VALUE.iat[0]
SpecifiedDemandProfile_specified = tuple([(str(r),str(f),str(l),str(y)) for r, f, l, y in zip(p_df[p_df['PARAM'] == "SpecifiedDemandProfile"].REGION, p_df[p_df['PARAM'] == "SpecifiedDemandProfile"].FUEL, p_df[p_df['PARAM'] == "SpecifiedDemandProfile"].TIMESLICE, p_df[p_df['PARAM'] == "SpecifiedDemandProfile"].YEAR)])
SpecifiedDemandProfile = {str(r): {str(f): {str(l): {str(y): p_df[(p_df['PARAM'] == "SpecifiedDemandProfile") & (p_df['REGION'] == r) & (p_df['FUEL'] == f) & (p_df['TIMESLICE'] == l) & (p_df['YEAR'] == y)].VALUE.iat[0] if (str(r),str(f),str(l),str(y)) in SpecifiedDemandProfile_specified else SpecifiedDemandProfile_default_value for y in YEAR} for l in TIMESLICE} for f in FUEL} for r in REGION}
# AccumulatedAnnualDemand
AccumulatedAnnualDemand_default_value = p_default_df[p_default_df['PARAM'] == "AccumulatedAnnualDemand"].VALUE.iat[0]
AccumulatedAnnualDemand_specified = tuple([(str(r),str(f),str(y)) for r, f, y in zip(p_df[p_df['PARAM'] == "AccumulatedAnnualDemand"].REGION, p_df[p_df['PARAM'] == "AccumulatedAnnualDemand"].FUEL, p_df[p_df['PARAM'] == "AccumulatedAnnualDemand"].YEAR)])
AccumulatedAnnualDemand = {str(r): {str(f): {str(y): p_df[(p_df['PARAM'] == "AccumulatedAnnualDemand") & (p_df['REGION'] == r) & (p_df['FUEL'] == f) & (p_df['YEAR'] == y)].VALUE.iat[0] if (str(r),str(f),str(y)) in AccumulatedAnnualDemand_specified else AccumulatedAnnualDemand_default_value for y in YEAR} for f in FUEL} for r in REGION}
######### Performance #########
# CapacityToActivityUnit
CapacityToActivityUnit_default_value = p_default_df[p_default_df['PARAM'] == "CapacityToActivityUnit"].VALUE.iat[0]
CapacityToActivityUnit_specified = tuple([(str(r), str(t)) for r, t in zip(p_df[p_df['PARAM'] == "CapacityToActivityUnit"].REGION, p_df[p_df['PARAM'] == "CapacityToActivityUnit"].TECHNOLOGY)])
CapacityToActivityUnit = {str(r): {str(t): p_df[(p_df['PARAM'] == "CapacityToActivityUnit") & (p_df['REGION'] == r) & (p_df['TECHNOLOGY'] == t)].VALUE.iat[0] if (str(r), str(t)) in CapacityToActivityUnit_specified else CapacityToActivityUnit_default_value for t in TECHNOLOGY} for r in REGION}
# TechWithCapacityNeededToMeetPeakTS
TechWithCapacityNeededToMeetPeakTS_default_value = p_default_df[p_default_df['PARAM'] == "TechWithCapacityNeededToMeetPeakTS"].VALUE.iat[0]
TechWithCapacityNeededToMeetPeakTS_specified = tuple([(str(r), str(t)) for r, t in zip(p_df[p_df['PARAM'] == "TechWithCapacityNeededToMeetPeakTS"].REGION, p_df[p_df['PARAM'] == "TechWithCapacityNeededToMeetPeakTS"].TECHNOLOGY)])
TechWithCapacityNeededToMeetPeakTS = {str(r): {str(t): p_df[(p_df['PARAM'] == "TechWithCapacityNeededToMeetPeakTS") & (p_df['REGION'] == r) & (p_df['TECHNOLOGY'] == t)].VALUE.iat[0] if (str(r), str(t)) in TechWithCapacityNeededToMeetPeakTS_specified else TechWithCapacityNeededToMeetPeakTS_default_value for t in TECHNOLOGY} for r in REGION}
# CapacityFactor
CapacityFactor_default_value = p_default_df[p_default_df['PARAM'] == "CapacityFactor"].VALUE.iat[0]
CapacityFactor_specified = tuple([(str(r),str(t),str(l),str(y)) for r, t, l, y in zip(p_df[p_df['PARAM'] == "CapacityFactor"].REGION, p_df[p_df['PARAM'] == "CapacityFactor"].TECHNOLOGY, p_df[p_df['PARAM'] == "CapacityFactor"].TIMESLICE, p_df[p_df['PARAM'] == "CapacityFactor"].YEAR)])
CapacityFactor = {str(r): {str(t): {str(l): {str(y): p_df[(p_df['PARAM'] == "CapacityFactor") & (p_df['REGION'] == r) & (p_df['TECHNOLOGY'] == t) & (p_df['YEAR'] == y) & (p_df['TIMESLICE'] == l)].VALUE.iat[0] if (str(r),str(t),str(l),str(y)) in CapacityFactor_specified else CapacityFactor_default_value for y in YEAR} for l in TIMESLICE} for t in TECHNOLOGY} for r in REGION}
# AvailabilityFactor
AvailabilityFactor_default_value = p_default_df[p_default_df['PARAM'] == "AvailabilityFactor"].VALUE.iat[0]
AvailabilityFactor_specified = tuple([(str(r),str(t),str(y)) for r, t, y in zip(p_df[p_df['PARAM'] == "AvailabilityFactor"].REGION, p_df[p_df['PARAM'] == "AvailabilityFactor"].TECHNOLOGY, p_df[p_df['PARAM'] == "AvailabilityFactor"].YEAR)])
AvailabilityFactor = {str(r): {str(t): {str(y): p_df[(p_df['PARAM'] == "AvailabilityFactor") & (p_df['REGION'] == r) & (p_df['TECHNOLOGY'] == t) & (p_df['YEAR'] == y)].VALUE.iat[0] if (str(r),str(t),str(y)) in AvailabilityFactor_specified else AvailabilityFactor_default_value for y in YEAR} for t in TECHNOLOGY} for r in REGION}
# OperationalLife
OperationalLife_default_value = p_default_df[p_default_df['PARAM'] == "OperationalLife"].VALUE.iat[0]
OperationalLife_specified = tuple([(str(r), str(t)) for r, t in zip(p_df[p_df['PARAM'] == "OperationalLife"].REGION, p_df[p_df['PARAM'] == "OperationalLife"].TECHNOLOGY)])
OperationalLife = {str(r): {str(t): p_df[(p_df['PARAM'] == "OperationalLife") & (p_df['REGION'] == r) & (p_df['TECHNOLOGY'] == t)].VALUE.iat[0] if (str(r), str(t)) in OperationalLife_specified else OperationalLife_default_value for t in TECHNOLOGY} for r in REGION}
# | |
#ATS:test(SELF, "--graphics False --testSPH False --nx1 10 --nx2 100 --testDim 1d --testCase linear", label="RK linear interpolation test -- 1D (serial)")
#ATS:test(SELF, "--graphics False --testSPH False --nx1 10 --nx2 20 --testDim 2d --testCase linear", label="RK linear interpolation test -- 2D (serial)")
#ATS:test(SELF, "--graphics False --testSPH False --nx1 5 --nx2 10 --testDim 3d --testCase linear", label="RK linear interpolation test -- 3D (serial)")
#ATS:test(SELF, "--graphics False --testSPH False --nx1 10 --nx2 100 --testDim 1d --testCase quadratic --correctionOrder QuadraticOrder", label="RK quadratic interpolation test -- 1D (serial)")
#ATS:test(SELF, "--graphics False --testSPH False --nx1 10 --nx2 20 --testDim 2d --testCase quadratic --correctionOrder QuadraticOrder", label="RK quadratic interpolation test -- 2D (serial)")
#ATS:test(SELF, "--graphics False --testSPH False --nx1 5 --nx2 5 --testDim 3d --testCase quadratic --correctionOrder QuadraticOrder", label="RK quadratic interpolation test -- 3D (serial)")
#-------------------------------------------------------------------------------
# A set of tests to compare how different meshless methods interpolate fields.
#-------------------------------------------------------------------------------
from Spheral import *
from SpheralTestUtilities import *
title("Interpolation tests")
#-------------------------------------------------------------------------------
# Generic problem parameters
#-------------------------------------------------------------------------------
commandLine(
# Parameters for seeding nodes.
nx1 = 50,
nx2 = 50,
rho1 = 1.0,
rho2 = 1.0,
eps1 = 0.0,
eps2 = 0.0,
x0 = 0.0,
x1 = 0.5,
x2 = 1.0,
nPerh = 4.01,
hmin = 0.0001,
hmax = 1000.0,
# What order of reproducing kernel should we use (0,1,2)?
correctionOrder = LinearOrder,
# Should we randomly perturb the positions?
ranfrac = 0.2,
seed = 14892042,
# What test problem are we doing?
testDim = "1d",
testCase = "linear",
# Should we compare with SPH?
testSPH = True,
# The fields we're going to interpolate.
# Linear coefficients: y = y0 + m0*x
y0 = 1.0,
m0 = 1.0,
# Quadratic coefficients: y = y2 + m2*x^2
y2 = 1.0,
m2 = 0.5,
gamma = 5.0/3.0,
mu = 1.0,
# Parameters for iterating H.
iterateH = True,
maxHIterations = 200,
Htolerance = 1.0e-4,
# Parameters for passing the test
interpolationTolerance = 5.0e-7,
derivativeTolerance = 5.0e-5,
graphics = True,
plotKernels = False,
outputFile = "None",
)
assert testCase in ("linear", "quadratic", "step")
assert testDim in ("1d", "2d", "3d")
FacetedVolume = {"1d" : Box1d,
"2d" : Polygon,
"3d" : Polyhedron}[testDim]
#-------------------------------------------------------------------------------
# Appropriately set generic object names based on the test dimensionality.
#-------------------------------------------------------------------------------
exec("from Spheral%s import *" % testDim)
## import Spheral
## for name in [x for x in Spheral.__dict__ if testDim in x]:
## exec("%s = Spheral.__dict__['%s']" % (name.replace(testDim, ""), name))
#-------------------------------------------------------------------------------
# Create a random number generator.
#-------------------------------------------------------------------------------
import random
rangen = random.Random()
rangen.seed(seed)
#-------------------------------------------------------------------------------
# Material properties.
#-------------------------------------------------------------------------------
eos = GammaLawGasMKS(gamma, mu)
#-------------------------------------------------------------------------------
# Interpolation kernels.
#-------------------------------------------------------------------------------
WT = TableKernel(WendlandC4Kernel(), 1000)
output("WT")
kernelExtent = WT.kernelExtent
#-------------------------------------------------------------------------------
# Make the NodeList.
#-------------------------------------------------------------------------------
nodes1 = makeFluidNodeList("nodes1", eos,
hmin = hmin,
hmax = hmax,
nPerh = nPerh)
nodes2 = makeFluidNodeList("nodes2", eos,
hmin = hmin,
hmax = hmax,
nPerh = nPerh)
nodeSet = [nodes1, nodes2]
for nodes in nodeSet:
output("nodes")
output("nodes.hmin")
output("nodes.hmax")
output("nodes.nodesPerSmoothingScale")
#-------------------------------------------------------------------------------
# Set the node properties.
#-------------------------------------------------------------------------------
if testDim == "1d":
from DistributeNodes import distributeNodesInRange1d
distributeNodesInRange1d([(nodes1, [(nx1, rho1, (x0, x1))])], nPerh = nPerh)
distributeNodesInRange1d([(nodes2, [(nx2, rho2, (x1, x2))])], nPerh = nPerh)
elif testDim == "2d":
from DistributeNodes import distributeNodes2d
from GenerateNodeDistribution2d import GenerateNodeDistribution2d
gen1 = GenerateNodeDistribution2d(nx1, nx1 + nx2, rho1,
distributionType = "lattice",
xmin = (x0, x0),
xmax = (x1, x2),
nNodePerh = nPerh,
SPH = True)
gen2 = GenerateNodeDistribution2d(nx2, nx1 + nx2, rho2,
distributionType = "lattice",
xmin = (x1, x0),
xmax = (x2, x2),
nNodePerh = nPerh,
SPH = True)
distributeNodes2d((nodes1, gen1),
(nodes2, gen2))
elif testDim == "3d":
from DistributeNodes import distributeNodes3d
from GenerateNodeDistribution3d import GenerateNodeDistribution3d
gen1 = GenerateNodeDistribution3d(nx1, nx1 + nx2, nx1 + nx2, rho1,
distributionType = "lattice",
xmin = (x0, x0, x0),
xmax = (x1, x2, x2),
nNodePerh = nPerh,
SPH = True)
gen2 = GenerateNodeDistribution3d(nx2, nx1 + nx2, nx1 + nx2, rho2,
distributionType = "lattice",
xmin = (x1, x0, x0),
xmax = (x2, x2, x2),
nNodePerh = nPerh,
SPH = True)
distributeNodes3d((nodes1, gen1),
(nodes2, gen2))
else:
raise ValueError, "Only tests cases for 1d,2d and 3d."
for nodes in nodeSet:
output("nodes.name, nodes.numNodes")
# Set node properties.
for nodes, eps0 in ((nodes1, eps1),
(nodes2, eps2)):
eps = nodes.specificThermalEnergy()
for i in xrange(nodes.numInternalNodes):
eps[i] = eps0
#-------------------------------------------------------------------------------
# Optionally randomly jitter the node positions.
#-------------------------------------------------------------------------------
dx1 = (x1 - x0)/nx1
dx2 = (x2 - x1)/nx2
dy = (x2 - x0)/(nx1 + nx2)
dz = (x2 - x0)/(nx1 + nx2)
for nodes, dx in ((nodes1, dx1),
(nodes2, dx2)):
pos = nodes.positions()
for i in xrange(nodes.numInternalNodes):
if testDim == "1d":
pos[i].x += ranfrac * dx * rangen.uniform(-1.0, 1.0)
elif testDim == "2d":
pos[i].x += ranfrac * dx * rangen.uniform(-1.0, 1.0)
pos[i].y += ranfrac * dy * rangen.uniform(-1.0, 1.0)
elif testDim == "3d":
pos[i].x += ranfrac * dx * rangen.uniform(-1.0, 1.0)
pos[i].y += ranfrac * dy * rangen.uniform(-1.0, 1.0)
pos[i].z += ranfrac * dz * rangen.uniform(-1.0, 1.0)
#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node list
#-------------------------------------------------------------------------------
db = DataBase()
for nodes in nodeSet:
db.appendNodeList(nodes)
output("db")
output("db.numNodeLists")
output("db.numFluidNodeLists")
#-------------------------------------------------------------------------------
# Iterate the h to convergence if requested.
#-------------------------------------------------------------------------------
if iterateH:
bounds = vector_of_Boundary()
method = SPHSmoothingScale()
iterateIdealH(db,
bounds,
WT,
method,
maxHIterations,
Htolerance)
#-------------------------------------------------------------------------------
# Initialize our field.
#-------------------------------------------------------------------------------
f = db.newFluidScalarFieldList(name="test field")
pos = db.fluidPosition
for iNodeList, nodes in enumerate(db.nodeLists()):
for i in xrange(nodes.numInternalNodes):
x = pos(iNodeList, i).x
if testCase == "linear":
f[iNodeList][i] = y0 + m0*x
elif testCase == "quadratic":
f[iNodeList][i] = y2 + m2*x*x
elif testCase == "step":
if x < x1:
f[iNodeList][i] = y0
else:
f[iNodeList][i] = 2*y0
#-------------------------------------------------------------------------------
# Prepare variables to accumulate the test values.
#-------------------------------------------------------------------------------
fSPH = db.newFluidScalarFieldList(name="SPH interpolated values")
dfSPH = db.newFluidVectorFieldList(name="SPH derivative values")
A = db.newFluidScalarFieldList(name="A")
B = db.newFluidVectorFieldList(name="B")
C = db.newFluidTensorFieldList(name="C")
gradA = db.newFluidVectorFieldList(name="gradA")
gradB = db.newFluidTensorFieldList(name="gradB")
gradC = db.newFluidThirdRankTensorFieldList(name="gradB")
M0 = db.newFluidScalarFieldList(name="M0")
M1 = db.newFluidVectorFieldList(name="M1")
M2 = db.newFluidSymTensorFieldList(name="M2")
M3 = db.newFluidThirdRankTensorFieldList(name="M3")
M4 = db.newFluidFourthRankTensorFieldList(name="M4")
gradM0 = db.newFluidVectorFieldList(name="grad M0")
gradM1 = db.newFluidTensorFieldList(name="grad M1")
gradM2 = db.newFluidThirdRankTensorFieldList(name="grad M2")
gradM3 = db.newFluidFourthRankTensorFieldList(name="grad M3")
gradM4 = db.newFluidFifthRankTensorFieldList(name="grad M4")
surfacePoint = db.newFluidIntFieldList(name="surface point")
db.updateConnectivityMap(True)
cm = db.connectivityMap()
position = db.fluidPosition
weight = db.fluidMass
weight /= db.fluidMassDensity
H = db.fluidHfield
# Compute the volumes to use as weighting.
#polyvol = db.newFluidFacetedVolumeFieldList(name=FacetedVolume(), "polyvols")
#weight = db.newFluidScalarFieldList(name=1.0, "volume")
#computeHullVolumes(cm, position, polyvol, weight)
computeCRKSPHMoments(cm, WT, weight, position, H, correctionOrder, NodeCoupling(),
M0, M1, M2, M3, M4, gradM0, gradM1, gradM2, gradM3, gradM4)
computeCRKSPHCorrections(M0, M1, M2, M3, M4, gradM0, gradM1, gradM2, gradM3, gradM4, H,
surfacePoint,
correctionOrder,
A, B, C, gradA, gradB, gradC)
#-------------------------------------------------------------------------------
# Measure the interpolated values and gradients.
#-------------------------------------------------------------------------------
if testSPH:
for iNodeList, nodes in enumerate(db.nodeLists()):
for i in xrange(nodes.numInternalNodes):
ri = position(iNodeList, i)
Hi = H(iNodeList, i)
Hdeti = Hi.Determinant()
wi = weight(iNodeList, i)
fi = f(iNodeList, i)
# Self contribution.
W0 = WT.kernelValue(0.0, Hdeti)
fSPH[iNodeList][i] = wi*W0 * fi
# Go over them neighbors.
allneighbors = cm.connectivityForNode(iNodeList, i)
for jNodeList, neighbors in enumerate(allneighbors):
for j in neighbors:
rj = position(jNodeList, j)
Hj = H(jNodeList, j)
Hdetj = Hj.Determinant()
wj = weight(jNodeList, j)
fj = f(jNodeList, j)
# The standard SPH kernel and it's gradient.
rij = ri - rj
etai = Hi*rij
etaj = Hj*rij
Wj = WT.kernelValue(etaj.magnitude(), Hdetj)
gradWj = Hj*etaj.unitVector() * WT.gradValue(etaj.magnitude(), Hdetj)
# Increment our interpolated values.
fSPH[iNodeList][i] += fj * wj*Wj
# Increment the derivatives.
dfSPH[iNodeList][i] += fj * wj*gradWj
#-------------------------------------------------------------------------------
# Check the C++ interpolation and gradient methods.
#-------------------------------------------------------------------------------
fRK = interpolateCRKSPH(f, position, weight, H, A, B, C,
cm, correctionOrder, WT)
dfRK = gradientCRKSPH(f, position, weight, H,
A, B, C, gradA, gradB, gradC,
cm, correctionOrder, WT)
#-------------------------------------------------------------------------------
# Prepare the answer to check against.
#-------------------------------------------------------------------------------
yans = db.newFluidScalarFieldList(name="interpolation answer")
dyans = db.newFluidScalarFieldList(name="derivative answer")
for iNodeList in xrange(db.numNodeLists):
n = yans[iNodeList].numInternalElements
for i in xrange(n):
xi = position(iNodeList, i).x
if testCase == "linear":
yans[iNodeList][i] = y0 + m0*xi
dyans[iNodeList][i] = m0
elif testCase == "quadratic":
yans[iNodeList][i] = y2 + m2*xi*xi
dyans[iNodeList][i] = 2*m2*xi
elif testCase == "step":
if iNodeList == 0:
yans[iNodeList][i] = y0
else:
yans[iNodeList][i] = 2*y0
dyans[iNodeList][i] = 0.0
#-------------------------------------------------------------------------------
# Check our answers accuracy.
#-------------------------------------------------------------------------------
def flattenFieldList(fl):
result = []
for f in fl:
result += list(f.internalValues())
return result
errySPH = flattenFieldList(fSPH - yans)
erryRK = flattenFieldList(fRK - yans)
errdySPH = []
errdyRK = []
for iNodeList in xrange(db.numNodeLists):
n = fSPH[iNodeList].numInternalElements
for i in xrange(n):
errdySPH.append(dfSPH(iNodeList, i).x - dyans(iNodeList, i))
errdyRK.append(dfRK(iNodeList, i).x - dyans(iNodeList, i))
maxySPHerror = max([abs(x) for x in errySPH])
maxdySPHerror = max([abs(x) for x in errdySPH])
maxyRKerror = max([abs(x) for x in erryRK])
maxdyRKerror = max([abs(x) for x in errdyRK])
print "Maximum errors (interpolation): SPH = %g, RK = %g" % (maxySPHerror, maxyRKerror)
print "Maximum errors (derivatives): SPH = %g, RK = %g" % (maxdySPHerror, maxdyRKerror)
# Output timing tables.
Timer.TimerSummary()
#-------------------------------------------------------------------------------
# Plot the things.
#-------------------------------------------------------------------------------
if graphics:
from SpheralMatplotlib import *
xans = [x.x for x in flattenFieldList(position)]
# Interpolated values.
p1 = plotFieldList(fRK,
plotStyle = "g*",
| |
# Copyright [2020] [Toyota Research Institute]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for maccor protcol files to biologic modulo bat protcol files"""
import os
import unittest
import xmltodict
import copy
import pandas as pd
from monty.tempfile import ScratchDir
from pydash import get
from beep.protocol import (
PROTOCOL_SCHEMA_DIR,
BIOLOGIC_TEMPLATE_DIR,
PROCEDURE_TEMPLATE_DIR,
)
from beep.protocol.maccor import Procedure
from beep.protocol.maccor_to_biologic_mb import (
MaccorToBiologicMb,
CycleAdvancementRules,
CycleAdvancementRulesSerializer
)
TEST_DIR = os.path.dirname(__file__)
TEST_FILE_DIR = os.path.join(TEST_DIR, "test_files")
class ConversionTest(unittest.TestCase):
maxDiff = None
def maccor_values_to_biologic_value_and_unit_test(self, func, tests):
for value_str, expected_value_str, expected_unit in tests:
actual_value, actual_unit = func(value_str)
self.assertEqual(actual_value, expected_value_str)
self.assertEqual(actual_unit, expected_unit)
def test_convert_volts(self):
converter = MaccorToBiologicMb()
tests = [
("0.1429", "142.900", "mV"),
("0.1429e3", "142.900", "V"),
("159.3624", "159362.400", "mV"),
("152.9", "152.900", "V")
]
self.maccor_values_to_biologic_value_and_unit_test(
converter._convert_volts,
tests,
)
def test_convert_amps(self):
converter = MaccorToBiologicMb()
tests = [
("0.1429", "142.900", "mA"),
("1.23", "1.230", "A"),
("152.9", "152.900", "A"),
("1.2e-4", "120.000", "\N{Micro Sign}A")
]
self.maccor_values_to_biologic_value_and_unit_test(
converter._convert_amps,
tests,
)
def test_convert_watts(self):
converter = MaccorToBiologicMb()
tests = [
("0.1429", "142.900", "mW"),
("1.23", "1.230", "W"),
("152.9", "152.900", "W"),
("1.2e-5", "12.000", "\N{Micro Sign}W")
]
self.maccor_values_to_biologic_value_and_unit_test(
converter._convert_watts,
tests,
)
def test_convert_ohms(self):
converter = MaccorToBiologicMb()
tests = [
("0.1429", "142.900", "mOhms"),
("1.459e4", "14.590", "kOhms"),
("152.9", "152.900", "Ohms"),
("1.2e-4", "120.000", "\N{Micro Sign}Ohms")
]
self.maccor_values_to_biologic_value_and_unit_test(
converter._convert_ohms,
tests,
)
def test_convert_time(self):
converter = MaccorToBiologicMb()
tests = [
("::.01", "10.000", "ms"),
("03::", "3.000", "h"),
("03:30:", "210.000", "mn"),
("00:00:50", "50.000", "s")
]
self.maccor_values_to_biologic_value_and_unit_test(
converter._convert_time,
tests,
)
def single_step_to_single_seq_test(self, test_step_xml, diff_dict):
"""
test utility for testing proc_step_to_seq
"""
proc = xmltodict.parse(test_step_xml)
test_step = proc["MaccorTestProcedure"]["ProcSteps"]["TestStep"]
converter = MaccorToBiologicMb()
expected = converter._blank_seq.copy()
expected["Ns"] = 0
expected["lim1_seq"] = 1
expected["lim2_seq"] = 1
expected["lim3_seq"] = 1
expected.update(diff_dict)
step_num = 1
seq_nums_by_step_num = {
step_num: [0],
step_num + 1: [1],
}
result = converter._convert_step_parts(
step_parts=[test_step],
step_num=step_num,
seq_nums_by_step_num=seq_nums_by_step_num,
goto_lowerbound=0,
goto_upperbound=3,
end_step_num=4,
)[0]
for key, value in expected.items():
self.assertEqual(
value,
result[key],
msg="Expected {0}: {1} got {0}: {2}".format(key, value, result[key]),
)
def test_partition_steps_into_techniques(self):
converter = MaccorToBiologicMb()
ast = converter.load_maccor_ast(
os.path.join(PROCEDURE_TEMPLATE_DIR, "diagnosticV5.000")
)
steps = get(ast, "MaccorTestProcedure.ProcSteps.TestStep")
self.assertEqual(True, len(steps) > 71)
# existence of looped tech 2
nested_loop_open_idx = 36
nested_loop_open_type = get(steps[nested_loop_open_idx], 'StepType')
self.assertEqual(nested_loop_open_type, "Do 1")
nested_loop_close_idx = 68
nested_loop_close_type = get(steps[nested_loop_close_idx], 'StepType')
self.assertEqual(nested_loop_close_type, "Loop 1")
technique_partitions = converter._partition_steps_into_techniques(steps)
self.assertEqual(3, len(technique_partitions))
partition1, partition2, partition3 = technique_partitions
self.assertEqual(partition1.technique_num, 1)
self.assertEqual(partition2.technique_num, 2)
self.assertEqual(partition3.technique_num, 4)
self.assertEqual(partition1.tech_does_loop, False)
self.assertEqual(partition2.tech_does_loop, True)
self.assertEqual(partition3.tech_does_loop, False)
self.assertEqual(partition2.num_loops, 1000)
self.assertEqual(partition1.step_num_offset, 0)
self.assertEqual(partition2.step_num_offset, nested_loop_open_idx + 1)
self.assertEqual(partition3.step_num_offset, nested_loop_close_idx + 1)
self.assertEqual(len(partition1.steps), 36)
# trim opening/closing loops
self.assertEqual(len(partition2.steps), nested_loop_close_idx - nested_loop_open_idx - 1)
self.assertEqual(len(partition3.steps), 27)
def test_apply_step_mappings_global_noop(self):
xml = (step_with_bounds_template).format(
voltage_v_lowerbound = 2.2,
voltage_v_upperbound = 4.2,
current_a_lowerbound = 0.1,
current_a_upperbound = 1.0,
)
step = get(
xmltodict.parse(xml, process_namespaces=False, strip_whitespace=True),
'TestStep',
)
converter = MaccorToBiologicMb()
# no limits no mappings
unmapped_step = converter._apply_step_mappings([step])[0]
self.assertEqual(step, unmapped_step)
# limits outside of bounds, don't
converter.max_voltage_v = 10.0
converter.min_voltage_v = -10.0
converter.max_current_a = 10.0
converter.min_current_a = -10.0
unmapped_step = converter._apply_step_mappings([step])[0]
self.assertEqual(step, unmapped_step)
def test_apply_step_mappings_global_voltage(self):
xml = (step_with_bounds_template).format(
voltage_v_lowerbound = 2.2,
voltage_v_upperbound = 4.2,
current_a_lowerbound = 0.1,
current_a_upperbound = 1.0,
)
step = get(
xmltodict.parse(xml, process_namespaces=False, strip_whitespace=True),
'TestStep',
)
converter = MaccorToBiologicMb()
converter.max_voltage_v = 3.9
converter.min_voltage_v = 3.1
step_without_voltage_end_entries = converter._apply_step_mappings([step])[0]
end_entries = get(step_without_voltage_end_entries, "Ends.EndEntry")
self.assertEqual(2, len(end_entries))
self.assertEqual("Current", get(end_entries[0], "EndType"))
self.assertEqual("Current", get(end_entries[1], "EndType"))
# check there was not mutation
original_end_entries = get(step, 'Ends.EndEntry')
self.assertEqual(4, len(original_end_entries))
def test_apply_step_mappings_all_global_limits(self):
xml = (step_with_bounds_template).format(
voltage_v_lowerbound = 2.2,
voltage_v_upperbound = 4.2,
current_a_lowerbound = 0.1,
current_a_upperbound = 1.0,
)
step = get(
xmltodict.parse(xml, process_namespaces=False, strip_whitespace=True),
'TestStep',
)
converter = MaccorToBiologicMb()
converter.max_voltage_v = 3.9
converter.min_voltage_v = 3.1
converter.max_current_a = 0.7
converter.min_current_a = 0.3
step_with_no_end_entries = converter._apply_step_mappings([step])[0]
self.assertEqual(None, get(step_with_no_end_entries, "Ends.EndEntry"))
# check there was not mutation
original_end_entries = get(step, 'Ends.EndEntry')
self.assertEqual(4, len(original_end_entries))
def test_rest_step_conversion(self):
xml = (
'<?xml version="1.0" encoding="UTF-8"?>'
"<MaccorTestProcedure>"
" <ProcSteps>"
" <TestStep>"
" <StepType> Rest </StepType>"
" <StepMode> </StepMode>"
" <StepValue></StepValue>"
" <Limits/>"
" <Ends>"
" <EndEntry>"
" <EndType>Voltage </EndType>"
" <SpecialType> </SpecialType>"
" <Oper>>= </Oper>"
" <Step>002</Step>"
" <Value>4.4</Value>"
" </EndEntry>"
" <EndEntry>"
" <EndType>Voltage </EndType>"
" <SpecialType> </SpecialType>"
" <Oper><= </Oper>"
" <Step>002</Step>"
" <Value>2.5</Value>"
" </EndEntry>"
" </Ends>"
" <Reports>"
" <ReportEntry>"
" <ReportType>Voltage</ReportType>"
" <Value>2.2</Value>"
" </ReportEntry>"
" </Reports>"
" <Range>A</Range>"
" <Option1>N</Option1>"
" <Option2>N</Option2>"
" <Option3>N</Option3>"
" <StepNote></StepNote>"
" </TestStep>"
" </ProcSteps>"
"</MaccorTestProcedure>"
)
diff_dict = {
"ctrl_type": "Rest",
"Apply I/C": "I",
"N": "1.00",
"charge/discharge": "Charge",
"lim_nb": 2,
"lim1_type": "Ecell",
"lim1_comp": ">",
"lim1_value": "4.400",
"lim1_value_unit": "V",
"lim2_type": "Ecell",
"lim2_comp": "<",
"lim2_value": "2.500",
"lim2_value_unit": "V",
"rec_nb": 1,
"rec1_type": "Ecell",
"rec1_value": "2.200",
"rec1_value_unit": "V",
}
self.single_step_to_single_seq_test(xml, diff_dict)
pass
def test_discharge_current_step_conversion(self):
xml = (
'<?xml version="1.0" encoding="UTF-8"?>'
"<MaccorTestProcedure>"
" <ProcSteps>"
" <TestStep>"
# mispelling taken directly from sample file
" <StepType>Dischrge</StepType>"
" <StepMode>Current </StepMode>"
" <StepValue>1.0</StepValue>"
" <Limits/>"
" <Ends>"
" <EndEntry>"
" <EndType>StepTime</EndType>"
" <SpecialType> </SpecialType>"
" <Oper> = </Oper>"
" <Step>002</Step>"
" <Value>00:00:30</Value>"
" </EndEntry>"
" <EndEntry>"
" <EndType>Voltage </EndType>"
" <SpecialType> </SpecialType>"
" <Oper><= </Oper>"
" <Step>002</Step>"
" <Value>2.7</Value>"
" </EndEntry>"
" <EndEntry>"
" <EndType>Voltage </EndType>"
" <SpecialType> </SpecialType>"
" <Oper>>= </Oper>"
" <Step>002</Step>"
" <Value>4.4</Value>"
" </EndEntry>"
" </Ends>"
" <Reports>"
" <ReportEntry>"
" <ReportType>Voltage </ReportType>"
" <Value>0.001</Value>"
" </ReportEntry>"
" <ReportEntry>"
" <ReportType>StepTime</ReportType>"
# 10ms
" <Value>::.01</Value>"
" </ReportEntry>"
" </Reports>"
" <Range>A</Range>"
" <Option1>N</Option1>"
" <Option2>N</Option2>"
" <Option3>N</Option3>"
" <StepNote></StepNote>"
" </TestStep>"
" </ProcSteps>"
"</MaccorTestProcedure>"
)
diff_dict = {
"ctrl_type": "CC",
"Apply I/C": "I",
"ctrl1_val": "1.000",
"ctrl1_val_unit": "A",
"ctrl1_val_vs": "<None>",
"N": "15.00",
"charge/discharge": "Discharge",
"lim_nb": 3,
"lim1_type": "Time",
"lim1_comp": ">",
"lim1_value": "30.000",
"lim1_value_unit": "s",
"lim2_type": "Ecell",
"lim2_comp": "<",
"lim2_value": "2.700",
"lim2_value_unit": "V",
"lim3_type": "Ecell",
"lim3_comp": ">",
"lim3_value": "4.400",
"lim3_value_unit": "V",
"rec_nb": 2,
"rec1_type": "Ecell",
"rec1_value": "1.000",
"rec1_value_unit": "mV",
"rec2_type": "Time",
"rec2_value": "10.000",
"rec2_value_unit": "ms",
}
self.single_step_to_single_seq_test(xml, diff_dict)
pass
def test_conversion_with_updated(self):
converter = MaccorToBiologicMb()
with ScratchDir(".") as scratch_dir:
# Generate a protocol that can be used with the existing cells for testing purposes
reg_params = {
'project_name': {0: 'FormDegrade'},
'seq_num': {0: 0},
'template': {0: 'diagnosticV5.000'},
'charge_constant_current_1': {0: 3.0},
'charge_percent_limit_1': {0: 30},
'charge_constant_current_2': {0: 3.0},
'charge_cutoff_voltage': {0: 4.3},
'charge_constant_voltage_time': {0: 60},
'charge_rest_time': {0: 5},
'discharge_constant_current': {0: 2.0},
'discharge_cutoff_voltage': {0: 2.7},
'discharge_rest_time': {0: 15},
'cell_temperature_nominal': {0: 25},
'cell_type': {0: 'LiFun240'},
'capacity_nominal': {0: 0.240},
'diagnostic_type': {0: 'HPPC+RPT'},
'diagnostic_parameter_set': {0: 'LiFunForm'},
'diagnostic_start_cycle': {0: 100},
'diagnostic_interval': {0: 100}
}
protocol_params_df = pd.DataFrame.from_dict(reg_params)
index = 0
protocol_params = protocol_params_df.iloc[index]
diag_params_df = pd.read_csv(
os.path.join(PROCEDURE_TEMPLATE_DIR, "PreDiag_parameters - DP.csv")
)
diagnostic_params = diag_params_df[
diag_params_df["diagnostic_parameter_set"]
== protocol_params["diagnostic_parameter_set"]
].squeeze()
procedure = Procedure.generate_procedure_regcyclev3(index, protocol_params)
procedure.generate_procedure_diagcyclev3(
protocol_params["capacity_nominal"], diagnostic_params
)
procedure.set_skip_to_end_diagnostic(4.4, 2.0, step_key="070", new_step_key="095")
procedure.to_file(os.path.join(scratch_dir, "BioTest_000001.000"))
# Setup the converter and run it
def set_i_range(tech_num, seq, idx):
seq_copy = copy.deepcopy(seq)
seq_copy["I Range"] = "100 mA"
return seq_copy
converter.seq_mappers.append(set_i_range)
converter.min_voltage_v = 2.0
converter.max_voltage_v = 4.4
converter.convert(os.path.join(scratch_dir, "BioTest_000001.000"),
TEST_FILE_DIR, "BioTest_000001")
f = open(os.path.join(TEST_FILE_DIR, "BioTest_000001.mps"), encoding="ISO-8859-1")
file = f.readlines()
control_list = [
'ctrl_type', 'Rest', 'CC', 'Rest', 'CC', 'CV', 'CC', 'Loop', 'CC', 'CV', 'Rest', 'CC',
'Rest', 'CC', 'CC', 'Loop', 'CV', 'CC', 'CC', 'CV', 'CC', 'CC', 'CV', 'CC', 'CC', 'CV',
'CC', 'CC', 'CC', 'CV', 'Rest', 'CC', 'Rest', 'Loop'
]
self.assertListEqual(control_list, file[35].split())
value_list = [
'ctrl1_val', '240.000', '34.300', '4.400', '34.300', '100.000', '80.000', '4.400', '240.000',
'180.000', '80.000', '100.000', '3.000', '80.000', '48.000', '4.400', '48.000', '48.000', '4.400',
'240.000', '48.000', '4.400', '480.000', '720.000', '720.000', '4.300', '480.000', '100.000'
]
self.assertListEqual(value_list, file[37].split())
voltage_min = '\tEcell min = 2.00 V\n'
self.assertEqual(voltage_min, file[9])
voltage_max = '\tEcell max = 4.40 V\n'
self.assertEqual(voltage_max, file[10])
def test_cycle_transition_serialization(self):
cycle_transition_rules = CycleAdvancementRules(
tech_num=2,
tech_does_loop=True,
adv_cycle_on_start = 1,
adv_cycle_on_tech_loop = 1,
adv_cycle_seq_transitions = {(2, 5): 1, (14, 17): 1},
debug_adv_cycle_on_step_transitions = {(72, 71): 1, (72, 75): 1},
)
serializer = CycleAdvancementRulesSerializer()
json_str = serializer.json(cycle_transition_rules)
parsed_cycle_transition_rules = serializer.parse_json(json_str)
self.assertEqual(
cycle_transition_rules.__repr__(),
parsed_cycle_transition_rules.__repr__(),
)
step_with_bounds_template = (
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
"<TestStep>\n"
# mispelling taken directly from sample file
" <StepType>Dischrge</StepType>\n"
" <StepMode>Current </StepMode>\n"
" <StepValue>1.0</StepValue>\n"
" <Limits/>\n"
" <Ends>\n"
" <EndEntry>\n"
" | |
##########################################
# Semiconductor Calculations Package
# Author: <NAME>
# Date: 10/10/2021
##########################################
# Changelog:
# 4/25/2021 - Started Package
# 10/10/2021 - Started periodic table of elements (materials.py)
# 10/19/2021 - Added more functionality
##########################################
from semic.constants.constants import value
CRYSTAL_ORIENTATION = ['Simple Cubic','Face-centered Cubic', #1
'Body-centered Cubic', 'Simple Tetragonal', #3
'Body-centered Tetragonal', 'Simple Orthorhombic', #5
'Base-centered Orthorhombic', 'Body-centered Orthorhombic', #7
'Face-centered Orthorhombic', 'Simple Monoclinic', #9
'Base-centered Monoclinic', 'Triclinic', 'Trigonal', #12
'Hexagonal', ''] #14
CRYSTAL_STRUCTURE = ['Diamond', 'Zincblende', 'Wurtzite', 'Rock-Salt', '', 'Hexagonal']
ALLOYS = ['Binary', 'Ternary', 'Quaternary', '']
GROUP = ['I','II','III','IV','V','VI','VII','VIII',
'II-VI','III-V','IV-IV','IV-VI',ALLOYS, '']
"""
CREATE YOUR OWN SEMICONDUCTOR
"""
class Semiconductor:
"""
Material Properties and Object Parameters for a Custom Semiconductor
"""
group = GROUP[13] #Empty
crystal_structure = CRYSTAL_STRUCTURE[4] #Empty
crystal_orientation = CRYSTAL_ORIENTATION[14] #Empty
def __init__(self):
"""
Custom semiconductor material properties
All properties are initialized to 0 or empty string.
"""
self.abstemp = 0 #Kelvin
self.density = 0 #g cm^-3
self.bandGap = 0 #eV
self.gapType = '' #Direct/Indirect
self.debyeTemp = 0 #Kelvin
self.intrinsicDebyeLength = 0 #microns
self.electronAffinity = 0 #eV
self.dielectricConstant = 0 #Epsilon_R a.k.a K (Kappa)
self.latticeConstant = 0 #Angstroms
self.boltzmannTemp = value("Boltzmann constant in eV/K") * self.abstemp #eV
self.intrinsicCarrierConcentration = 0 #cm^-3
self.conductionDensityOfStates = 0 #cm^-3
self.valenceDensityOfStates = 0 #cm^-3
self.intrinsicResistivity = 0 #Ohm-cm
self.opticalPhononEnergy = 0 #eV
self.electronDriftMobility = 0 #cm^2 V^-1 s^-1
self.holeDriftMobility = 0 #cm^2 V^-1 s^-1
self.approxBreakdownField = 0 #V cm^-1
self.thermalConductivity = 0 #W cm^-1 degC^-1
self.thermalDiffusivity = 0 #cm^2 s^-1
self.linearThermalExpansion = 0 #degC^-1
self.refractionIndex = 0
self.augerRecombinationCoefficientN = 0 #cm^6 s^-1
self.augerRecombinationCoefficientP = 0 #cm^6 s^-1
"""
BEGIN ELEMENTAL SEMICONDUCTORS
"""
class Si:
"""
Material Properties and Object Parameters for Silicon
"""
group = GROUP[3] #Group IV Semiconductor
crystal_structure = CRYSTAL_STRUCTURE[0] #Diamond
crystal_orientation = CRYSTAL_ORIENTATION[1] #Face-centered Cubic
def __init__(self):
"""
Silicon material properties at 300 Kelvin
"""
self.abstemp = 300 #Kelvin
self.density = 2.329 #g cm^-3
self.bandGap = 1.12 #eV
self.gapType = 'Indirect'
self.debyeTemp = 640 #Kelvin
self.intrinsicDebyeLength = 24 #microns
self.electronAffinity = 4.05 #eV
self.dielectricConstant = 11.7 #Epsilon_R a.k.a K (Kappa)
self.latticeConstant = 5.43095 #Angstroms
self.boltzmannTemp = value("Boltzmann constant in eV/K") * self.abstemp #eV
self.intrinsicCarrierConcentration = 1E10 #cm^-3
self.conductionDensityOfStates = 2.8E19 #cm^-3
self.valenceDensityOfStates = 1.0E19 #cm^-3
self.intrinsicResistivity = 2.3E5 #Ohm-cm
self.opticalPhononEnergy = 0.063 #eV
self.electronDriftMobility = 1500 #cm^2 V^-1 s^-1
self.holeDriftMobility = 475 #cm^2 V^-1 s^-1
self.approxBreakdownField = 3E5 #V cm^-1
self.thermalConductivity = 148 #W cm^-1 degC^-1
self.thermalDiffusivity = 0.8 #cm^2 s^-1
self.linearThermalExpansion = 2.6E-6 #degC^-1
self.refractionIndex = 3.42
self.augerRecombinationCoefficientN = 1.1E-30 #cm^6 s^-1
self.augerRecombinationCoefficientP = 3.0E-31 #cm^6 s^-1
def tempDependence(self,temp):
pass
class Ge:
"""
Material Properties and Object Parameters for Germanium
"""
group = GROUP[3] # Group IV Semiconductor
crystal_structure = CRYSTAL_STRUCTURE[0] #Diamond
crystal_orientation = CRYSTAL_ORIENTATION[1] #Face-centered Cubic
def __init__(self):
"""
Germanium material properties at 300 Kelvin
"""
self.abstemp = 300 #Kelvin
self.density = 5.3267 #g cm^-3
self.bandGap = 0.661 #eV
self.gapType = 'Indirect'
self.debyeTemp = 374 #Kelvin
self.intrinsicDebyeLength = 0.68 #microns
self.electronAffinity = 4.0 #eV
self.dielectricConstant = 16.0 #Epsilon_R a.k.a K (Kappa)
self.latticeConstant = 5.658 #Angstroms
self.boltzmannTemp = value("Boltzmann constant in eV/K") * self.abstemp #eV
self.intrinsicCarrierConcentration = 2.4E13 #cm^-3
self.conductionDensityOfStates = 1.04E19 #cm^-3
self.valenceDensityOfStates = 6.0E18 #cm^-3
self.intrinsicResistivity = 46 #Ohm-cm
self.opticalPhononEnergy = 0.037 #eV
self.electronDriftMobility = 3900 #cm^2 V^-1 s^-1
self.holeDriftMobility = 1900 #cm^2 V^-1 s^-1
self.approxBreakdownField = 1E5 #V cm^-1
self.thermalConductivity = 0.6 #W cm^-1 degC^-1
self.thermalDiffusivity = 0.36 #cm^2 s^-1
self.linearThermalExpansion = 5.8E-6 #degC^-1
self.refractionIndex = 4.00
self.augerRecombinationCoefficientN = 1E-30 #cm^6 s^-1
self.augerRecombinationCoefficientP = 1E-30 #cm^6 s^-1
"""
END OF ELEMENTAL SEMICONDUCTORS
"""
"""
BEGIN COMPOUND SEMICONDUCTORS
"""
"""
IV-IV
"""
class SiC:
"""
Material Properties and Object Parameters for Silicon Carbide [Polytypes:(3C,4H,6H)]
"""
group = GROUP[10] # IV-IV Semiconductor
crystal_structure = {"3C":CRYSTAL_STRUCTURE[1],"4H":CRYSTAL_STRUCTURE[2],"6H":CRYSTAL_STRUCTURE[2]} #Zincblende,Wurtzite,Wurtzite
crystal_orientation = {"3C":CRYSTAL_ORIENTATION[1],"4H":CRYSTAL_ORIENTATION[13],"6H":CRYSTAL_ORIENTATION[13]} #Cubic, Hexagonal, Hexagonal
def __init__(self):
"""
Silicon Carbide material properties at 300 Kelvin for 3C,4H,6H
"""
self.abstemp = 300 #Kelvin
self.density = {"3C":3.21,"4H":3.211,"6H":3.211} #g cm^-3
self.bandGap = {"3C":2.36,"4H":3.23,"6H":3.00} #eV
self.gapType = {"3C":"Indirect","4H":"Indirect","6H":"Indirect"}
self.debyeTemp = {"3C":1200,"4H":1300,"6H":1200} #Kelvin
self.intrinsicDebyeLength = "unknown" #microns
self.electronAffinity = "unknown" #eV
self.dielectricConstant = {"3C":{"static":9.72,
"high frequency":6.52},
"4H":{"static":{"\u2225 to c axis":10.03,
"\u27c2 to c axis":9.66},
"high frquency":{"\u2225 to c axis":6.70,
"\u27c2 to c axis":6.52}},
"6H":{"static":{"\u2225 to c axis":10.03,
"\u27c2 to c axis":9.66},
"high frquency":{"\u2225 to c axis":6.70,
"\u27c2 to c axis":6.52}}} #Epsilon_R a.k.a K (Kappa)
self.latticeConstant = {"3C":4.3596,
"4H":{"a":3.0730,"c":10.053},
"6H":{"a":3.0806,"c":15.1173}} #Angstroms
self.boltzmannTemp = value("Boltzmann constant in eV/K") * self.abstemp #eV
self.intrinsicCarrierConcentration = {"3C":"","4H":"","6H":""} #cm^-3
self.conductionDensityOfStates = {"3C":1.5e19,"4H":1.7e19,"6H":8.9e19} #cm^-3
self.valenceDensityOfStates = {"3C":1.2e19,"4H":2.5e19,"6H":2.5e19} #cm^-3
self.intrinsicResistivity = "unknown" #Ohm-cm
self.opticalPhononEnergy = {"3C":102.8,"4H":104.2,"6H":104.2} #eV
self.electronDriftMobility = {"3C":800,"4H":900,"6H":400} #cm^2 V^-1 s^-1
self.holeDriftMobility = {"3C":320,"4H":120,"6H":90} #cm^2 V^-1 s^-1
self.approxBreakdownField = {"3C":1e6,"4H":{"min":3e6,"max":5e6},"6H":{"min":3e6,"max":5e6}} #V cm^-1
self.thermalConductivity = {"3C":3.6,"4H":3.7,"6H":4.9} #W cm^-1 degC^-1
self.thermalDiffusivity = {"3C":1.6,"4H":1.7,"6H":2.2} #cm^2 s^-1
self.linearThermalExpansion = {"3C":3.8e-6,"4H":"unknown",
"6H":{"\u2225 to c axis":4.7e-6,"\u27c2 to c axis":4.3e-6}} #degC^-1
self.refractionIndex = {"3C":2.55,"4H":{"\u2225 to c axis":2.59,"\u27c2 to c axis":2.55},
"6H":{"\u2225 to c axis":2.59,"\u27c2 to c axis":2.55}}
self.augerRecombinationCoefficientN = "unknown" #cm^6 s^-1
self.augerRecombinationCoefficientP = "unknown" #cm^6 s^-1
"""
III-V -> GROUP[9]
"""
class AlN:
"""
Material Properties and Object Parameters for a Aluminum Nitride (Hexagonal Polytype)
"""
group = GROUP[9] #III-V
crystal_structure = CRYSTAL_STRUCTURE[2] #Wurtzite
crystal_orientation = CRYSTAL_ORIENTATION[13] #Hexagonal
def __init__(self):
"""
Aluminum Nitride material properties at 300K
"""
self.abstemp = 300 #Kelvin
self.density = 3.23 #g cm^-3
self.bandGap = 6.2 #eV
self.gapType = 'Direct' #Direct/Indirect
self.debyeTemp = 1150 #Kelvin
self.intrinsicDebyeLength = "TBD" #microns
self.electronAffinity = 0.6 #eV
self.dielectricConstant = {"static":8.5,"high frequency":4.6} #Epsilon_R a.k.a K (Kappa)
self.latticeConstant = {"a":3.112,"c":4.982} #Angstroms
self.boltzmannTemp = value("Boltzmann constant in eV/K") * self.abstemp #eV
self.intrinsicCarrierConcentration = 4.598e-33 #cm^-3
self.conductionDensityOfStates = 6.3e18 #cm^-3
self.valenceDensityOfStates = 4.8e20 #cm^-3
self.intrinsicResistivity = 4.525e48 #Ohm-cm
self.opticalPhononEnergy = 0.099 #eV
self.electronDriftMobility = 300 #cm^2 V^-1 s^-1
self.holeDriftMobility = 14 #cm^2 V^-1 s^-1
self.approxBreakdownField = {"min":1.2e6,"max":1.8e6} #V cm^-1
self.thermalConductivity = 2.85 #W cm^-1 degC^-1
self.thermalDiffusivity = 1.47 #cm^2 s^-1
self.linearThermalExpansion = {"\u03b1\u2090":4.2e-6,"\u03b1_c":5.3e-6} #degC^-1
self.refractionIndex = 2.15
self.augerRecombinationCoefficientN = "unknown" #cm^6 s^-1
self.augerRecombinationCoefficientP = "unknown" #cm^6 s^-1
class AlP:
"""
Material Properties and Object Parameters for a Custom Semiconductor
"""
group = GROUP[9] #III-V
crystal_structure = CRYSTAL_STRUCTURE[4] #Empty
crystal_orientation = CRYSTAL_ORIENTATION[14] #Empty
def __init__(self):
"""
Custom semiconductor material properties
All properties are initialized to 0.
"""
self.abstemp = 0 #Kelvin
self.density = 0 #g cm^-3
self.bandGap = 0 #eV
self.gapType = '' #Direct/Indirect
self.debyeTemp = 0 #Kelvin
self.intrinsicDebyeLength = 0 #microns
self.electronAffinity = 0 #eV
self.dielectricConstant = 0 #Epsilon_R a.k.a K (Kappa)
self.latticeConstant = 0 #Angstroms
self.boltzmannTemp = value("Boltzmann constant in eV/K") * self.abstemp #eV
self.intrinsicCarrierConcentration = 0 #cm^-3
self.conductionDensityOfStates = 0 #cm^-3
self.valenceDensityOfStates = 0 #cm^-3
self.intrinsicResistivity = 0 #Ohm-cm
self.opticalPhononEnergy = 0 #eV
self.electronDriftMobility = 0 #cm^2 V^-1 s^-1
self.holeDriftMobility = 0 #cm^2 V^-1 s^-1
self.approxBreakdownField = 0 #V cm^-1
self.thermalConductivity = 0 #W cm^-1 degC^-1
self.thermalDiffusivity = 0 #cm^2 s^-1
self.linearThermalExpansion = 0 #degC^-1
self.refractionIndex = 0
self.augerRecombinationCoefficientN = 0 #cm^6 s^-1
self.augerRecombinationCoefficientP = 0 #cm^6 s^-1
class AlAs:
"""
Material Properties and Object Parameters for a Custom Semiconductor
"""
group = GROUP[9] #III-V
crystal_structure = CRYSTAL_STRUCTURE[4] #Empty
crystal_orientation = CRYSTAL_ORIENTATION[14] #Empty
def __init__(self):
"""
Custom semiconductor material properties
All properties are initialized to 0.
"""
self.abstemp = 0 #Kelvin
self.density = 0 #g cm^-3
self.bandGap = 0 #eV
self.gapType = '' #Direct/Indirect
self.debyeTemp = 0 #Kelvin
self.intrinsicDebyeLength = 0 #microns
self.electronAffinity = 0 #eV
self.dielectricConstant = 0 #Epsilon_R a.k.a K (Kappa)
self.latticeConstant = 0 #Angstroms
self.boltzmannTemp = value("Boltzmann constant in eV/K") * self.abstemp #eV
self.intrinsicCarrierConcentration = 0 #cm^-3
self.conductionDensityOfStates = 0 #cm^-3
self.valenceDensityOfStates = 0 #cm^-3
self.intrinsicResistivity = 0 #Ohm-cm
self.opticalPhononEnergy = 0 #eV
self.electronDriftMobility = 0 #cm^2 V^-1 s^-1
self.holeDriftMobility = 0 #cm^2 V^-1 s^-1
self.approxBreakdownField = 0 #V cm^-1
self.thermalConductivity = 0 #W cm^-1 degC^-1
self.thermalDiffusivity = 0 #cm^2 s^-1
self.linearThermalExpansion = 0 #degC^-1
self.refractionIndex = 0
self.augerRecombinationCoefficientN = 0 #cm^6 s^-1
self.augerRecombinationCoefficientP = 0 #cm^6 s^-1
class AlSb:
"""
Material Properties and Object Parameters for a Custom Semiconductor
"""
group = GROUP[9] #III-V
crystal_structure = CRYSTAL_STRUCTURE[4] #Empty
crystal_orientation = CRYSTAL_ORIENTATION[14] #Empty
def __init__(self):
"""
Custom semiconductor material properties
All properties are initialized to 0.
"""
self.abstemp = 0 #Kelvin
self.density = 0 #g cm^-3
self.bandGap = 0 #eV
self.gapType = '' #Direct/Indirect
self.debyeTemp = 0 #Kelvin
self.intrinsicDebyeLength = 0 #microns
| |
<reponame>kraupn3r/intranet<gh_stars>0
from django.test import TestCase, Client, override_settings
from django.urls import reverse
from django.http import HttpResponseForbidden
from django.contrib.auth.models import Permission, Group, User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils import timezone
from accounts.models import UserProfile
from news.models import KnowledgeCategory, DocumentF, DocQuestion, DocFile, \
NewsFile, DocumentF, News, NotificationReadFlag, Notification
from datetime import date, timedelta
import json
import tempfile
import shutil
MEDIA_ROOT = tempfile.mkdtemp()
@override_settings(MEDIA_ROOT=MEDIA_ROOT)
class TestKnowledgeCategoryListView(TestCase):
@classmethod
def tearDownClass(cls):
shutil.rmtree(MEDIA_ROOT, ignore_errors=True)
super().tearDownClass()
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username='testuser1', password='<PASSWORD>')
test_user2 = User.objects.create_user(username='testuser2', password='<PASSWORD>')
test_user2.save()
test_user1.save()
test_user1_userprofile = UserProfile.objects.create(
user=test_user1,
name='<NAME>',
telephone='11',
email='<EMAIL>',
employee_id='2',
departament='HR',
location='WAW'
)
test_user2_userprofile = UserProfile.objects.create(
user=test_user2,
name='<NAME>',
telephone='222222222',
email='<EMAIL>',
employee_id='3',
departament='sal',
location='PZN'
)
test_user1_userprofile.save()
test_user2_userprofile.save()
cls.test_category = KnowledgeCategory.objects.create(title='Test Category')
cls.test_category_2 = KnowledgeCategory.objects.create(title='Test Category 2')
i = 0
file_test_category = cls.test_category
while i < 11:
if i%2 == 0:
test_location = 'WAW'
elif i%3 == 0:
test_location = 'non'
else:
test_location = 'PZN'
if i%4 == 0:
test_departament = 'HR'
elif i%3 == 0:
test_departament = 'non'
else:
test_departament = 'sal'
instance = DocFile.objects.create(
file = SimpleUploadedFile(
'best_dasdasasdds.txt',
b'these are the file contents!'
),
title = 'test title',
date_created = timezone.now(),
target_departament = test_departament,
target_location = test_location,
category = file_test_category,
author = test_user1)
instance.save()
instance = DocumentF.objects.create(
title = 'test title',
body = 'test body',
author = test_user1,
date_created = timezone.now(),
target_departament = test_departament,
target_location = test_location,
category = file_test_category)
instance.save()
i +=1
def test_view_redirect_if_not_logged_in(self):
response = self.client.get(reverse('news:knowledge'))
self.assertEquals(response.status_code, 302)
self.assertRedirects(response, '/accounts/login/?next=/knowledge/')
def test_view_if_logged_in(self):
login = self.client.login(username='testuser1', password='<PASSWORD>')
response = self.client.get(reverse('news:knowledge'))
self.assertEqual(str(response.context['user']), 'testuser1')
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'news/knowledgecategory_list.html')
def test_view_if_files_filtered(self):
login = self.client.login(username='testuser1', password='<PASSWORD>')
response = self.client.get(reverse('news:knowledge'))
self.assertEquals(response.context['files'].count(),8)
def test_view_if_files_filtered_2(self):
login = self.client.login(username='testuser2', password='<PASSWORD>')
response = self.client.get(reverse('news:knowledge'))
self.assertEquals(response.context['files'].count(),6)
def test_view_if_documents_filtered(self):
login = self.client.login(username='testuser1', password='<PASSWORD>')
response = self.client.get(reverse('news:knowledge'))
self.assertEquals(response.context['docs'].count(),8)
def test_view_if_documents_filtered_2(self):
login = self.client.login(username='testuser2', password='<PASSWORD>')
response = self.client.get(reverse('news:knowledge'))
self.assertEquals(response.context['docs'].count(),6)
@override_settings(MEDIA_ROOT=MEDIA_ROOT)
class TestKnowledgeCategoryDetailView(TestCase):
@classmethod
def tearDownClass(cls):
shutil.rmtree(MEDIA_ROOT, ignore_errors=True)
super().tearDownClass()
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username='testuser1', password='<PASSWORD>')
test_user2 = User.objects.create_user(username='testuser2', password='<PASSWORD>')
test_user2.save()
test_user1.save()
test_user1_userprofile = UserProfile.objects.create(
user=test_user1,
name='<NAME>',
telephone='11',
email='<EMAIL>',
employee_id='2',
departament='HR',
location='WAW'
)
test_user2_userprofile = UserProfile.objects.create(
user=test_user2,
name='<NAME>',
telephone='222222222',
email='<EMAIL>',
employee_id='3',
departament='sal',
location='PZN'
)
test_user1_userprofile.save()
test_user2_userprofile.save()
cls.test_category = KnowledgeCategory.objects.create(title='Test Category')
cls.test_category_2 = KnowledgeCategory.objects.create(title='Test Category 2')
cls.test_category.save()
cls.test_category_2.save()
i = 0
file_test_category = cls.test_category
while i < 11:
if i%2 == 0:
test_location = 'WAW'
file_test_category = cls.test_category_2
elif i%3 == 0:
test_location = 'non'
file_test_category = cls.test_category_2
else:
test_location = 'PZN'
if i%4 == 0:
test_departament = 'HR'
elif i%3 == 0:
test_departament = 'non'
else:
test_departament = 'sal'
instance = DocFile.objects.create(
file = SimpleUploadedFile(
'best_file_eva.txt',
b'these are the file contents!'
),
title = 'test title',
date_created = timezone.now(),
target_departament = test_departament,
target_location = test_location,
category = file_test_category,
author = test_user1)
instance.save()
instance = DocumentF.objects.create(
title = 'test title',
body = 'test body',
author = test_user1,
date_created = timezone.now(),
target_departament = test_departament,
target_location = test_location,
category = file_test_category)
instance.save()
file_test_category = cls.test_category
i +=1
def test_view_redirect_if_not_logged_in(self):
response = self.client.get(reverse('news:knowledgedetail',kwargs={'pk':self.test_category.pk}))
self.assertEquals(response.status_code, 302)
self.assertRedirects(response, '/accounts/login/?next=/knowledge/1')
def test_view_if_logged_in(self):
login = self.client.login(username='testuser1', password='<PASSWORD>')
response = self.client.get(reverse('news:knowledgedetail',kwargs={'pk':self.test_category.pk}))
self.assertEqual(str(response.context['user']), 'testuser1')
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'news/knowledgecategory_detail.html')
def test_view_if_files_filtered_by_category(self):
login = self.client.login(username='testuser1', password='<PASSWORD>')
response = self.client.get(reverse('news:knowledgedetail',kwargs={'pk':self.test_category_2.pk}))
self.assertEquals(response.context['files'].count(),8)
def test_view_if_files_filtered_by_category_2(self):
login = self.client.login(username='testuser2', password='<PASSWORD>')
response = self.client.get(reverse('news:knowledgedetail',kwargs={'pk':self.test_category_2.pk}))
self.assertEquals(response.context['files'].count(),3)
def test_view_if_documents_filtered_by_category(self):
login = self.client.login(username='testuser1', password='<PASSWORD>')
response = self.client.get(reverse('news:knowledgedetail',kwargs={'pk':self.test_category_2.pk}))
self.assertEquals(response.context['docs'].count(),8)
def test_view_if_documents_filtered_by_category_2(self):
login = self.client.login(username='testuser2', password='<PASSWORD>')
response = self.client.get(reverse('news:knowledgedetail',kwargs={'pk':self.test_category_2.pk}))
self.assertEquals(response.context['docs'].count(),3)
def test_view_if_categories_queryset(self):
login = self.client.login(username='testuser2', password='<PASSWORD>')
response = self.client.get(reverse('news:knowledgedetail',kwargs={'pk':self.test_category_2.pk}))
self.assertEquals(response.context['categories'].count(),2)
class TestQuestionsListView(TestCase):
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username='testuser1', password='<PASSWORD>')
test_user2 = User.objects.create_user(username='testuser2', password='<PASSWORD>')
test_user2.save()
test_user1.save()
test_user1_userprofile = UserProfile.objects.create(
user=test_user1,
name='<NAME>',
telephone='11',
email='<EMAIL>',
employee_id='2',
departament='HR',
location='WAW'
)
test_user2_userprofile = UserProfile.objects.create(
user=test_user2,
name='<NAME>',
telephone='222222222',
email='<EMAIL>',
employee_id='3',
departament='sal',
location='PZN'
)
test_user1_userprofile.save()
test_user2_userprofile.save()
cls.test_category = KnowledgeCategory.objects.create(title='Test Category')
cls.test_category_2 = KnowledgeCategory.objects.create(title='Test Category 2')
cls.test_category.save()
cls.test_category_2.save()
i = 0
file_test_category = cls.test_category
while i < 11:
if i%2 == 0:
test_location = 'WAW'
file_test_category = cls.test_category_2
elif i%3 == 0:
test_location = 'non'
file_test_category = cls.test_category_2
else:
test_location = 'PZN'
if i%4 == 0:
test_departament = 'HR'
elif i%3 == 0:
test_departament = 'non'
else:
test_departament = 'sal'
# print('%s %s %s' %(test_location,test_departament,file_test_category))
instance = DocQuestion.objects.create(
title = 'test title',
body = 'test body',
answer = 'test answer',
date_created = timezone.now(),
target_departament = test_departament,
target_location = test_location,
category = file_test_category)
instance.save()
file_test_category = cls.test_category
i +=1
def test_view_redirect_if_not_logged_in(self):
response = self.client.get(reverse('news:faq'))
self.assertEquals(response.status_code, 302)
self.assertRedirects(response, '/accounts/login/?next=/faq/')
def test_view_if_logged_in(self):
login = self.client.login(username='testuser1', password='<PASSWORD>')
response = self.client.get(reverse('news:faq'))
self.assertEqual(str(response.context['user']), 'testuser1')
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'news/docquestion_list.html')
# def test_view_queryset_length(self):
# login = self.client.login(username='testuser1', password='<PASSWORD>')
# response = self.client.get(reverse('news:faq'))
# self.assertEquals(response.context['object_list'].count(),6)
def test_view_if_files_filtered_by_category_2(self):
login = self.client.login(username='testuser2', password='<PASSWORD>')
response = self.client.get('%s?category=%s' % (reverse('news:faq'),2))
self.assertEquals(response.context['object_list'].count(),3)
def test_view_if_files_filtered_by_category_1(self):
login = self.client.login(username='testuser2', password='<PASSWORD>')
response = self.client.get('%s?category=%s' % (reverse('news:faq'),1))
self.assertEquals(response.context['object_list'].count(),3)
def test_view_if_filtered_by_location_depratament_1(self):
login = self.client.login(username='testuser1', password='<PASSWORD>')
response = self.client.get(reverse('news:faq'))
self.assertEquals(response.context['object_list'].count(),8)
def test_view_if_filtered_bylocation_depratament_2(self):
login = self.client.login(username='testuser2', password='<PASSWORD>')
response = self.client.get(reverse('news:faq'))
self.assertEquals(response.context['object_list'].count(),6)
def test_view_categories_queryset(self):
login = self.client.login(username='testuser2', password='<PASSWORD>')
response = self.client.get(reverse('news:faq'))
self.assertEquals(response.context['categories'].count(),2)
#
#
# class TestUnansweredQuestionsListView(TestCase):
#
#
# @classmethod
# def setUpTestData(cls):
# test_user1 = User.objects.create_user(username='testuser1', password='<PASSWORD>')
# test_user2 = User.objects.create_user(username='testuser2', password='<PASSWORD>')
# test_user2.save()
# test_user1.save()
#
# test_user1_userprofile = UserProfile.objects.create(
# user=test_user1,
# name='Test User1',
# telephone='11',
# email='<EMAIL>',
# employee_id='2',
# departament='HR',
# location='WAW'
# )
#
# test_user2_userprofile = UserProfile.objects.create(
# user=test_user2,
# name='Test User2',
# telephone='222222222',
# email='<EMAIL>',
# employee_id='3',
# departament='sal',
# location='PZN'
# )
#
# test_user1_userprofile.save()
# test_user2_userprofile.save()
# UserQuestion.objects.create(
# title='test title',
# body='test body',
# author= test_user1
# )
# permission = Permission.objects.get(name='Can view user question')
# test_user2.user_permissions.add(permission)
# test_user2.save()
#
# def test_view_redirect_if_not_logged_in(self):
# response = self.client.get(reverse('news:pending_faq'))
# self.assertEquals(response.status_code, 302)
# self.assertRedirects(response, '/accounts/login/?next=/faq/pending/')
#
# def test_view_if_logged_in_no_permission(self):
# login = self.client.login(username='testuser1', password='<PASSWORD>')
# response = self.client.get(reverse('news:pending_faq'))
# self.assertEquals(response.status_code, 403)
#
# def test_view_if_logged_in_with_permission(self):
# login = self.client.login(username='testuser2', password='<PASSWORD>')
# response = self.client.get(reverse('news:pending_faq'))
# self.assertEquals(response.status_code, 200)
# self.assertEqual(str(response.context['user']), 'testuser2')
# self.assertTemplateUsed(response, 'news/pending_questions.html')
#
# def test_view_queryset_length(self):
# login = self.client.login(username='testuser2', password='<PASSWORD>')
# response = self.client.get(reverse('news:pending_faq'))
# self.assertEqual(response.context['object_list'].count(), 1)
#
class TestUnpublishedNewsListView(TestCase):
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username='testuser1', password='<PASSWORD>')
test_user2 = User.objects.create_user(username='testuser2', password='<PASSWORD>')
test_user2.save()
test_user1.save()
test_user1_userprofile = UserProfile.objects.create(
user=test_user1,
name='<NAME>',
telephone='11',
email='<EMAIL>',
employee_id='2',
departament='HR',
location='WAW'
)
test_user2_userprofile = UserProfile.objects.create(
user=test_user2,
name='<NAME>',
telephone='222222222',
email='<EMAIL>',
employee_id='3',
departament='sal',
location='PZN'
)
test_user1_userprofile.save()
test_user2_userprofile.save()
i = 0
while i < 4:
instance = News.objects.create(
title='test title',
body='test body',
author= test_user1
)
instance.save()
i +=1
instance.publish()
permission = Permission.objects.get(name='Can add news')
test_user2.user_permissions.add(permission)
test_user2.save()
def test_view_redirect_if_not_logged_in(self):
response = self.client.get(reverse('news:unpublished'))
self.assertEquals(response.status_code, 302)
self.assertRedirects(response, '/accounts/login/?next=/news/unpublished/')
def test_view_if_logged_in_no_permission(self):
login = self.client.login(username='testuser1', password='<PASSWORD>')
response = self.client.get(reverse('news:unpublished'))
self.assertEquals(response.status_code, 403)
def test_view_if_logged_in_with_permission(self):
login = self.client.login(username='testuser2', password='<PASSWORD>')
response = self.client.get(reverse('news:unpublished'))
self.assertEquals(response.status_code, 200)
self.assertEqual(str(response.context['user']), 'testuser2')
self.assertTemplateUsed(response, 'news/news_list.html')
def test_view_queryset_length(self):
login = self.client.login(username='testuser2', password='<PASSWORD>')
response = self.client.get(reverse('news:unpublished'))
self.assertEqual(str(response.context['object_list'].count()), '3')
class TestNewsDetailView(TestCase):
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username='testuser1', password='<PASSWORD>')
test_user2 = User.objects.create_user(username='testuser2', password='<PASSWORD>')
test_user3 = User.objects.create_user(username='testuser3', password='<PASSWORD>')
test_user2.save()
test_user1.save()
test_user3.save()
test_user1_userprofile = UserProfile.objects.create(
user=test_user1,
name='Test User1',
telephone='11',
email='<EMAIL>',
employee_id='2',
departament='sal',
location='WAW'
)
test_user2_userprofile = UserProfile.objects.create(
user=test_user2,
name='Test User2',
telephone='222222222',
email='<EMAIL>',
employee_id='3',
departament='sal',
location='PZN'
)
test_user3_userprofile = UserProfile.objects.create(
user=test_user3,
name='Test User3',
telephone='333333333',
email='<EMAIL>',
employee_id='4',
departament='mar',
location='PZN'
)
test_user1_userprofile.save()
test_user2_userprofile.save()
test_user3_userprofile.save()
cls.test_news = News.objects.create(
title='test title',
body='test body',
author= test_user1,
target_location = 'PZN',
target_departament = 'sal'
)
cls.test_news.publish()
def test_view_redirect_if_not_logged_in(self):
response = self.client.get(reverse('news:newsdetail',kwargs={'pk':self.test_news.pk}))
self.assertEquals(response.status_code, 302)
self.assertRedirects(response, '/accounts/login/?next=/news/1/')
def test_view_if_logged_in(self):
login = self.client.login(username='testuser2', password='<PASSWORD>')
response = self.client.get(reverse('news:newsdetail',kwargs={'pk':self.test_news.pk}))
self.assertEquals(response.status_code, 200)
self.assertEqual(str(response.context['user']), 'testuser2')
self.assertTemplateUsed(response, 'news/news_detail.html')
def test_view_wrong_departament(self):
login = self.client.login(username='testuser1', password='<PASSWORD>')
response = self.client.get(reverse('news:newsdetail',kwargs={'pk':self.test_news.pk}))
self.assertEquals(response.status_code, 403)
def test_view_wrong_location(self):
login = self.client.login(username='testuser3', password='1ddsSRUkw+tuK')
response = self.client.get(reverse('news:newsdetail',kwargs={'pk':self.test_news.pk}))
self.assertEquals(response.status_code, 403)
class TestDocDetailView(TestCase):
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username='testuser1', password='<PASSWORD>')
test_user2 = User.objects.create_user(username='testuser2', password='<PASSWORD>')
test_user3 = User.objects.create_user(username='testuser3', password='<PASSWORD>')
test_user2.save()
test_user1.save()
test_user3.save()
test_user1_userprofile = UserProfile.objects.create(
user=test_user1,
name='<NAME>',
telephone='11',
email='<EMAIL>',
employee_id='2',
departament='sal',
location='WAW'
)
test_user2_userprofile = UserProfile.objects.create(
user=test_user2,
name='<NAME>',
telephone='222222222',
email='<EMAIL>',
employee_id='3',
departament='sal',
location='PZN'
)
test_user3_userprofile = UserProfile.objects.create(
user=test_user3,
name='<NAME>',
telephone='333333333',
email='<EMAIL>',
employee_id='4',
departament='mar',
location='PZN'
)
test_user1_userprofile.save()
test_user2_userprofile.save()
test_user3_userprofile.save()
cls.test_category = KnowledgeCategory.objects.create(title='Test Category')
cls.test_category_2 = KnowledgeCategory.objects.create(title='Test Category 2')
cls.test_category.save()
cls.test_category_2.save()
cls.test_document = DocumentF.objects.create(
title='test title',
body='test body',
author= test_user1,
target_location = 'PZN',
target_departament = 'sal'
)
def test_view_redirect_if_not_logged_in(self):
response = self.client.get(reverse('news:docdetail',kwargs={'pk':self.test_document.pk}))
self.assertEquals(response.status_code, 302)
self.assertRedirects(response, '/accounts/login/?next=/docs/1/')
def test_view_if_logged_in(self):
login = self.client.login(username='testuser2', password='<PASSWORD>')
response = self.client.get(reverse('news:docdetail',kwargs={'pk':self.test_document.pk}))
self.assertEquals(response.status_code, 200)
self.assertEqual(str(response.context['user']), 'testuser2')
self.assertTemplateUsed(response, 'news/documentf_detail.html')
def test_view_wrong_departament(self):
login = self.client.login(username='testuser1', password='<PASSWORD>')
response = self.client.get(reverse('news:docdetail',kwargs={'pk':self.test_document.pk}))
self.assertEquals(response.status_code, 403)
def test_view_wrong_location(self):
login = self.client.login(username='testuser3', password='<PASSWORD>')
response = self.client.get(reverse('news:docdetail',kwargs={'pk':self.test_document.pk}))
self.assertEquals(response.status_code, 403)
class TestNewsListView(TestCase):
@classmethod
def setUpTestData(cls):
test_user1 = User.objects.create_user(username='testuser1', password='<PASSWORD>')
test_user2 = User.objects.create_user(username='testuser2', password='<PASSWORD>')
test_user2.save()
test_user1.save()
test_user1_userprofile = UserProfile.objects.create(
user=test_user1,
name='Test User1',
telephone='11',
email='<EMAIL>',
employee_id='2',
departament='HR',
location='WAW'
)
test_user2_userprofile = UserProfile.objects.create(
user=test_user2,
name='Test User2',
telephone='222222222',
email='<EMAIL>',
employee_id='3',
departament='sal',
location='PZN'
)
test_user1_userprofile.save()
test_user2_userprofile.save()
i = 0
while i < 4:
instance = News.objects.create(
title='test title',
body='test body',
author= test_user1
)
instance.save()
i +=1
instance.publish()
instance.published_date = None
instance.save()
i = | |
0.7596159994793779,
0.7597883706231141,
0.7597948488066834,
0.8688173538580035,
0.9187403328725916,
0.9608491069399443,
0.9684692578819227,
0.9813240682601707,
0.9921667766998682,
0.9927483395545365,
0.9942199403686246,
0.99670245673367,
0.9987963966947381,
0.9988238582873724,
0.9989172273011664,
0.9991565382206393,
0.9995706614552863,
1.0],
'JPN': [0.004090584579096454,
0.006686084695597842,
0.007762230881900124,
0.008063984045382363,
0.008127811942646838,
0.01960080703543676,
0.029393167239552397,
0.03357212476509902,
0.03476105747293133,
0.07253452077348113,
0.10477469483477583,
0.11853341236660793,
0.21180686787091654,
0.2914170539115903,
0.44496289873730765,
0.5713457283203401,
0.7023992333744452,
0.7363734315381336,
0.7402878501919297,
0.740541542824921,
0.7405523751745511,
0.848421870531537,
0.9043498613099947,
0.9503839154095908,
0.9600497316262563,
0.9759615031897049,
0.989058404093471,
0.989893656726669,
0.9919561337279852,
0.9953513630768761,
0.998145959652793,
0.9981908002219105,
0.9983397270669316,
0.9987128509144281,
0.9993450638352656,
1.0],
'KOR': [0.0034198360299983097,
0.0052393885999864566,
0.005854368946682373,
0.005994439836376831,
0.0060184715199030035,
0.016958245652865172,
0.024515193716159935,
0.027125277952692133,
0.027726273633729332,
0.0665239040647838,
0.09332442796801321,
0.10258102525897371,
0.20577713164813133,
0.2770626634003252,
0.460053845345606,
0.6222972372787013,
0.7487033989776701,
0.7733246139391629,
0.775456029820345,
0.7755598183696484,
0.7755631240243075,
0.8876371825514368,
0.9312964385483837,
0.9700055485006772,
0.9756748076175421,
0.9857277491755985,
0.9946408744964239,
0.9950089579440256,
0.9959880068755391,
0.997724092929836,
0.9992633391773469,
0.9992780575275833,
0.9993305984582445,
0.9994718802024913,
0.9997278945800702,
1.0],
'MEX': [0.0009692275603902136,
0.0016622392909336182,
0.001962569753903412,
0.0020502740262215583,
0.0020695661749829154,
0.006262494013691925,
0.009975812056094852,
0.01162009636224697,
0.012105497082678048,
0.03128275034326965,
0.04826640550634315,
0.055786891854900045,
0.1215703473723644,
0.17982913494544658,
0.33026649463104774,
0.5022808636783435,
0.6355103833506878,
0.6613077993026701,
0.6635278834316273,
0.6636353529208666,
0.6636387552146538,
0.8159771890505538,
0.8749721918517843,
0.9424287611840657,
0.9500442837804324,
0.9674598953960196,
0.9873734025852499,
0.9878649373056665,
0.9895510417741447,
0.9934069216818916,
0.9978158447767889,
0.9978353807200797,
0.9979253156327387,
0.9982371763957074,
0.9989658880600532,
1.0],
'NGA': [0.0019309980405509974,
0.0029722048295434734,
0.003317486390496028,
0.003394525686596309,
0.003407465306800013,
0.010820877439697148,
0.015829375948067,
0.01752124582246853,
0.01790225471061122,
0.048060999648449496,
0.06843623662808306,
0.07531898793010033,
0.16733632218055774,
0.22950319933183239,
0.4166725664040263,
0.6070300259825273,
0.7334815814860609,
0.7544815411470444,
0.7560315358958261,
0.7560958882619744,
0.756097630178258,
0.8847030598056278,
0.9274183719835433,
0.9708612624441083,
0.9755904507833136,
0.9852099338451138,
0.9949932674469013,
0.995255061417346,
0.9960538208926758,
0.9976785506966658,
0.9993309548417827,
0.9993398457486738,
0.9993762172620296,
0.9994882247313162,
0.9997203033454837,
1.0],
'PAN': [0.0071127241732578915,
0.009906992014960862,
0.010624611286568596,
0.010748642220405056,
0.010764781338714557,
0.03010834949739542,
0.04023421799427311,
0.04288453597012306,
0.04334699410397955,
0.10271861701599463,
0.13379815937971726,
0.1419328364035363,
0.27860576994493397,
0.35015059253246866,
0.5598971554709424,
0.7208420460376521,
0.8306390646933821,
0.8493650168861452,
0.8507844494184829,
0.8508449707014532,
0.8508466539491121,
0.9350972319062875,
0.9638352124881744,
0.985886735407369,
0.989154256129283,
0.9941687913630944,
0.9980165961287221,
0.9982023553827121,
0.9986299714883539,
0.9992862170674269,
0.9997897741766788,
0.9997962569052931,
0.9998162687021376,
0.9998627767596534,
0.9999355179119218,
1.0],
'PER': [0.0007740219304772036,
0.0013105651448093382,
0.0015331314654942795,
0.0015952779635993713,
0.0016083434431571968,
0.005235571597954025,
0.008303789564264167,
0.009601469374719269,
0.009967365346246992,
0.02755869093461127,
0.04243892976106956,
0.04873241533213127,
0.11271820339455332,
0.16684282311609183,
0.32200191192962524,
0.510124490164967,
0.6413712393677775,
0.6642628419307035,
0.6660373636569156,
0.6661147401373665,
0.6661169419044396,
0.8252470130710247,
0.880756842717572,
0.9480597145658518,
0.9545142691424934,
0.9701659136269203,
0.9891427440307898,
0.989518004367996,
0.9908829571274738,
0.9941928300359308,
0.9982058841852661,
0.998219284049288,
0.9982846522806476,
0.9985247480767281,
0.9991183630578332,
1.0],
'POL': [0.002039994894019024,
0.0036576069389687017,
0.004476449900989275,
0.004757081574632249,
0.004829666640360622,
0.011420509762300485,
0.018303874200770818,
0.021898308563430374,
0.023149630597682463,
0.04779058503365881,
0.07352517867463837,
0.08696356458592877,
0.15605676984232456,
0.22821653805447192,
0.357374711831282,
0.4780944974560521,
0.6129850970143369,
0.6506663142129617,
0.655344588093901,
0.655671302955425,
0.6556863693834968,
0.7817640599028143,
0.8522027836673511,
0.9180394812907737,
0.9311573553707416,
0.9556790236551348,
0.978598599334248,
0.9798200767488099,
0.983245097883428,
0.9896476011867943,
0.9956318049242975,
0.9957026588020322,
0.9959700637164899,
0.9967317035825958,
0.9982004261520945,
1.0],
'SEN': [0.002108944325457404,
0.0032474040038073023,
0.003627198664411245,
0.003712468425849852,
0.0037268815190782774,
0.011597518405419558,
0.01694957960424137,
0.018769289971368324,
0.019181759821421385,
0.05052346550939278,
0.07183593733852585,
0.07908221582747735,
0.17268669755018037,
0.2363380795029294,
0.42270953581539433,
0.6082472317076009,
0.7349804877708898,
0.7566220713250674,
0.7582645697444722,
0.7583346899873455,
0.7583366427021235,
0.8845029387012118,
0.9275924721259955,
0.9704892381162815,
0.9753946894918188,
0.9851617017107129,
0.9948850197482477,
0.9951642459464121,
0.9959981770611634,
0.9976585778609982,
0.99931155061098,
0.9993213077198341,
0.9993603848765594,
0.9994782107635988,
0.9997173074024657,
1.0],
'SRB': [0.00181808341224426,
0.0030372601273555904,
0.003546760918528122,
0.0036904075499015337,
0.003720928082861222,
0.01031845796413991,
0.01596622149409455,
0.018383583254046407,
0.019073371090415504,
0.045348983863306463,
0.06784201537909282,
0.0774695066795458,
0.15595431527624798,
0.22314061797200996,
0.3794291438560118,
0.5350395336864122,
0.6688290962229544,
0.6975862490442216,
0.7003334284295422,
0.700481050291654,
0.7004862505460339,
0.8336953000510627,
0.8909600510699712,
0.9479763302829888,
0.9561821083975199,
0.9725224548694251,
0.9887919005633733,
0.9893798258862537,
0.9911359488293894,
0.9946329550776188,
0.9981147878126093,
0.9981408013101642,
0.9982451552174852,
0.9985606300847948,
0.9992040480719506,
1.0],
'SWE': [0.0008590248642047124,
0.0015269523865696789,
0.001842054456699105,
0.001942307076116965,
0.001966341030702364,
0.005697186180811465,
0.009300009504884276,
0.0110396067519187,
0.011599574738075507,
0.02886457801137798,
0.04553714253242987,
0.0535873700159798,
0.11350951755239563,
0.17137547073097173,
0.31002449417399486,
0.47042888964414664,
0.6043202508296505,
0.632260408809054,
0.6348517383134852,
0.63498692656441,
0.6349915467543253,
0.7898917567688744,
0.8545402344679398,
0.9293326832416275,
0.9383264856998123,
0.9591365197045009,
0.9832118492044385,
0.9838374515816003,
0.9860087478941465,
0.9910327352797291,
0.996845034850416,
0.996871885848139,
0.9969970171929007,
0.9974364139334856,
0.9984769810691595,
1.0],
'SWI': [0.0007496614254825036,
0.0013032134722607616,
0.0015485969303060002,
0.0016218705235208802,
0.0016383494276239845,
0.005104499996800162,
0.00824228223823255,
0.009662543302798554,
0.01009111383155973,
0.026827485173891698,
0.041978324047134516,
0.048836080542485456,
0.10944498661526854,
0.1643120633196203,
0.31063766840256773,
0.48727163270130297,
0.6197349737861206,
0.6445695756717423,
0.6466389362989212,
0.646735928694917,
0.6467388997480401,
0.8066393153096472,
0.8665964797492073,
0.9389725456952,
0.9464665048885026,
0.9645588643201365,
0.9863986863261043,
0.9868670158820116,
0.9885630192440606,
0.9926576108520418,
0.9976003134466567,
0.9976183211194258,
0.9977058207091436,
0.9980260293695097,
0.9988153900978624,
1.0],
'TUN': [0.0032360322395607196,
0.004913648253032073,
0.0054631249371801935,
0.005584343494049476,
0.005604482885247497,
0.016240652031650222,
0.023353195566131676,
0.02573132037937816,
0.026261414627655726,
0.06464526149479367,
0.0903130351175667,
0.09889522080925124,
0.20278504806097813,
0.27225751591540054,
0.45971651385886453,
0.6288421965150733,
0.7541984470081466,
0.7774270147970277,
0.7793400208964012,
0.7794286411121565,
0.7794313234540835,
0.8925278494808453,
0.9344415204971104,
0.9722560735514879,
0.9774338212088121,
0.9867765587329999,
0.995205585234745,
0.9955253981997383,
0.9963910048649851,
0.9979529071452459,
0.9993620568381887,
0.9993742072028325,
0.9994183291892788,
0.999538993320438,
0.9997612549315237,
1.0]},
'KOR': {'BEL': [0.0003704943922598731,
0.0009849065506249996,
0.0016464436760176718,
0.0021352167479735534,
0.0024092855708786106,
0.0036308460033339057,
0.006420224247195616,
0.009604934057945718,
0.012028981209195815,
0.01732758225504895,
0.029426699161200515,
0.0432405934931101,
0.06047787396069557,
0.09983842775907249,
0.13722236304651245,
0.17776120037075258,
0.263125733299446,
0.30806475944769135,
0.3185792280155663,
0.319963028645921,
0.32008670667128786,
0.4126553163440825,
0.5101183499453398,
0.6158064759788704,
0.6500118902200348,
0.7241960397949293,
0.8046407321081043,
0.810643060992081,
0.8301696918020703,
0.8725187331114053,
0.9184416912825172,
0.9191203295718228,
0.9221171776029151,
0.9321608004470251,
0.9552588176214285,
1.0],
'COL': [0.00018069508380263356,
0.00044056769475679766,
0.0006664360549049836,
0.0007996748527761543,
0.0008590521337179357,
0.001757305208385859,
0.0033761462103444224,
0.0048348922272122416,
0.0057112146245954015,
0.010702723457336112,
0.01969847241490721,
0.027804588423041017,
0.04860760134610239,
0.08609900686851106,
0.1438992113328642,
0.22419680208274767,
0.32836492969319375,
0.36214862951697036,
0.36701827161475403,
0.3674131009417627,
0.36743434729105906,
0.5121474976606871,
0.6060139373249116,
0.7364157071310191,
0.7567108184661732,
0.8130998419762395,
0.891436982381847,
0.8936310122480042,
0.9027750272520184,
0.9281812479187965,
0.9634762495283554,
0.9636249111717716,
0.9644601063001647,
0.9680049024800399,
0.9782160606498,
1.0],
'ENG': [0.00010842420162959841,
0.0002765152985071346,
0.00043208844519704927,
0.0005297684906655907,
0.0005760928512451711,
0.0011741897860366552,
0.0023209345030221163,
0.0034202742332888653,
0.004122870504072922,
0.007752426477800412,
0.014711455852070836,
0.02138280768994092,
0.037902299514891186,
0.06957549306528216,
0.11969982638684816,
0.19574480457245952,
0.2918493172113073,
0.32221318070998217,
0.3264768917297218,
0.32681366764129544,
0.32683130550757516,
0.4726340540503795,
0.5647657268079763,
0.7045411727996134,
0.7239469458044594,
0.7828289891968857,
0.8721605255701537,
0.8742042528500568,
0.8835060342076576,
0.9117300010338354,
0.954549342447094,
0.9546840917524595,
0.9555105864209715,
0.959339591487385,
0.9713736776815195,
1.0],
'JPN': [0.003449038692848102,
0.007010113221784472,
0.009568200770309922,
0.010827598359148222,
0.011297905006981855,
0.018763148882480252,
0.03010680137230036,
0.0387253042608755,
0.043090658489127014,
0.06563204984978169,
0.09988434146901938,
0.12590801193368115,
0.17695600845076806,
0.25452490538820677,
0.3315949172230289,
0.3897733681856583,
0.5068834648175892,
0.5658175482548988,
0.5789987865008277,
0.5806571067843344,
0.5807984138050606,
0.669202241077488,
0.7581783188451536,
0.825344396208001,
0.8551950737151384,
0.9002623089783768,
0.9342825702234767,
0.9392898877519132,
0.950629628216991,
0.9677498678243345,
0.9806735588521626,
0.9812130281823251,
0.9828700725651381,
0.9867308283319971,
0.9928956941698154,
1.0],
'PAN': [0.0073275572231369145,
0.012109345166688629,
0.01425691213237776,
0.014911765585059533,
0.015062642203094894,
0.031216772433381442,
0.046271663444791916,
0.0532868892766511,
0.05546617632406024,
0.10093907443608681,
0.14331768072997628,
0.16306511652115915,
0.25906770687557346,
0.34853761419927815,
0.48365835259385354,
0.5787475261093462,
0.704673710986683,
0.7463645876961583,
0.7524991487310755,
0.7530068970362088,
0.7530348101040791,
0.8416534558146905,
0.9003321021356383,
0.9416263109148497,
0.9545776242133429,
0.9728062054381974,
0.9856342934402759,
0.9870635741639507,
0.9900810781611311,
0.9943281236491994,
0.9973169175985325,
0.9974158697040238,
0.9976975460118713,
0.9983033833165083,
0.9991880969265228,
1.0],
'POL': [0.0015156559976325613,
0.0034658768110723266,
0.00517265564210646,
0.006198546465263892,
0.006666691460055094,
0.010410599926725014,
0.01737178776578636,
0.02384338510344742,
0.02785434540400718,
0.04069149183767658,
0.06456007421298054,
0.08674994435080306,
0.11976203519647903,
0.18114263981935583,
0.23773871422131324,
0.28625299422686845,
0.3914841739880401,
0.44854779109551907,
0.4623006072170409,
0.4641650390211155,
0.46433699590058725,
0.5545413956614275,
0.6523715129526113,
0.7362317056142538,
0.7715985386977168,
0.8322315935960087,
0.8842063822012447,
0.8905991624461594,
0.9070388784914676,
0.9352232003145484,
0.9593828632441798,
0.9601290057761912,
0.9627350631289947,
0.9696454360375893,
0.9822303263762586,
1.0],
'SEN': [0.001966139501124663,
0.003737665383162308,
0.004771138710906759,
0.0051804954371334555,
0.005303010382713505,
0.011279340790197356,
0.018514434079694556,
0.022893925451371103,
0.024661232280934895,
0.046487228375708914,
0.07291031865266971,
0.08890454170560043,
0.1486871431995564,
0.22106143802702577,
0.3302266562944564,
0.4298964982013913,
0.5620546093299411,
0.605863664313129,
0.6123179982154219,
0.612852884546633,
0.6128823278192831,
0.7335451016584088,
0.8135420393930954,
0.8865807074194109,
0.9042594823039208,
0.9365415722555168,
0.9660157111155586,
0.9679691551489648,
0.9733197455954568,
0.983090119573076,
0.9920106498040082,
0.9921460683320437,
0.9926461949455406,
0.994041812182794,
0.9966860686258122,
1.0],
'TUN': [0.003045652613047465,
0.005666835490555041,
0.007165869219007599,
0.00774883963930655,
0.007920252138125142,
0.015991827590852634,
0.025601229423033497,
0.03132133971086571,
0.03359131532269406,
0.06030589192039139,
0.09211022836094376,
0.11104213642852356,
0.17735526888085187,
0.25630262435279105,
0.3660412538623715,
0.45684202258867374,
0.5874884612729641,
0.6344828099451018,
0.6419957716602279,
0.6426713865984472,
0.6427118710275495,
0.7508123377173722,
0.8285811781756803,
0.8929292616912579,
0.9115785568991547,
0.9424404393310462,
0.9679764120480358,
0.970212501783344,
0.9757631087431998,
0.984948559319808,
0.9925488544682056,
0.9927177081453616,
0.9932834034745557,
0.9947163022611646,
0.9971844404361292,
1.0]},
'KSA': {'ARG': [2.014601307518869e-05,
6.629931295371197e-05,
0.00012913508621436058,
0.00018741202295390023,
0.00022830638858484676,
0.0003573197469905418,
0.0007242843683753974,
0.001246180065891732,
0.0017410054581564675,
0.0026772583235778794,
0.0053403288964673995,
0.009127737799615574,
0.014223543994660985,
0.028718016614582332,
0.04720820712935329,
0.08075414001872988,
0.13334749814409472,
0.15396148231934803,
0.15755244140386623,
0.15790431080515924,
0.15792736976831123,
0.25334516769154114,
0.3281432428301721,
0.46384607438567205,
0.4833908279718798,
0.5543091878036579,
0.6829732238848975,
0.6855267481035193,
0.6994249854419917,
0.7498548970932076,
0.8413476499946687,
0.8415586835323088,
0.8431106156559491,
0.8517457873030466,
0.8844764480156689,
1.0],
'AUS': [0.0009041124040426584,
0.002081035137303587,
0.0030824095550721436,
0.003664925542499288,
0.003921699779227717,
0.006670084019706898,
0.011592088609608036,
0.015999428898166825,
0.018630423115307344,
0.029361613625270348,
0.0485798030498803,
0.06578846244406814,
0.0972137762880241,
0.15349249332508136,
0.21484327258076427,
0.27472999533506126,
0.38460139273393273,
0.43499538413628625,
0.44526821999340915,
0.44644616351219013,
0.4465371834823537,
0.5537866429754685,
0.6521694517420131,
0.7482044828185797,
0.7782875468157955,
0.837017888469415,
0.8943467069546288,
0.8989460406736328,
0.9124147704905714,
0.9387094027082975,
0.9643765477495451,
0.9648254238426442,
0.9666055792050677,
0.9719551825113079,
0.9829464573309026,
1.0],
'BEL': [4.0685396604624475e-05,
0.00013795082972073978,
0.0002828628959491153,
0.0004310928134695121,
0.0005461891740029669,
0.0007354652302937985,
0.0013341621321172853,
0.002281027750720109,
0.003279367641864156,
0.0043961588352244615,
0.007928668126421881,
0.013515487742562154,
0.018457565179647136,
0.034089793091286374,
0.048669736975367923,
0.07017635705256671,
0.11629400871552951,
0.1410170682580333,
0.1469076030687164,
0.14769706240592428,
0.14776899342894448,
0.2157963343469592,
0.2887334393116933,
0.39632168645988375,
0.4223887609079256,
0.49929093160697424,
0.6127279778319649,
0.6173860484436811,
0.6379991637756925,
0.6988112551770967,
0.788514107003421,
0.7890511254870778,
0.792278067436888,
0.8069973783098955,
0.8530939998563196,
1.0],
'BRA': [3.3235679823081145e-06,
1.2873113958311802e-05,
2.8779019293266393e-05,
4.680639279079831e-05,
6.225829547583632e-05,
9.06265507499854e-05,
0.00018911796268589144,
0.00036009355021115075,
0.0005579629275509121,
0.0008231542791346328,
0.0017438690146112693,
0.0033421783468373474,
0.005201469276306828,
0.011656719593020757,
0.020347206015403567,
0.040657248155815014,
0.07082964578402896,
0.08203559962131389,
0.08388531674725845,
0.08405706183842405,
0.08406770177407395,
0.15458189827842253,
0.20695949455947385,
0.329368199432628,
0.34233680588911475,
0.402953192782091,
0.5446163071763567,
0.5462218099615624,
0.5574781980298167,
0.610091528263266,
0.7330511535653149,
0.7331765295741298,
0.7343633838781667,
0.7428601922150656,
0.7842518316961203,
1.0],
'COL': [2.2773146754207897e-05,
6.959024538651749e-05,
0.0001259190376736044,
0.00017193178194764038,
0.00020033047250876535,
0.00035892220728983,
0.0007548399028245616,
0.001249035964977252,
0.001660282629964641,
0.0028590743081858246,
0.005851808067752541,
0.009587425989683854,
0.01638365731483586,
0.03335016725490119,
0.05903645401853863,
0.10757698303658689,
0.17170173377686218,
0.19287982835414585,
0.195988433480155,
0.1962450985455238,
0.1962591726996025,
0.31743859239251915,
0.39748098418196837,
0.5487406846237594,
0.5663640965298715,
0.6329716015302949,
0.7588427933862477,
0.7607829193636765,
0.7717819487881283,
0.8133526679707687,
0.891910721898698,
0.892044697543909,
0.893068775975383,
0.8989832082739979,
0.9221739512479834,
1.0],
'CRC': [0.0003377108522814102,
0.00073030191053047,
0.0010063141226121077,
0.001137695353555343,
0.0011848925147551444,
0.0027825545452022336,
0.005100062374762094,
0.006780906770421995,
0.00759362977294662,
0.015880697567131424,
0.027901603197430555,
0.036620137651436,
0.06885888307286135,
0.11562318209574818,
0.1992343163079238,
0.3076569871169507,
0.4289401211480407,
0.462857378698153,
0.4670729702885285,
0.46736769656444294,
0.467381303251038,
0.6246551094210421,
0.7126194621295906,
0.8266871655104076,
0.8430868604879466,
0.885619442940565,
0.9407735349089349,
0.942302279150636,
0.9482494727354714,
0.9636735096980573,
0.9836746171809072,
0.9837633957539077,
0.9842282511657774,
0.986065234198573,
0.9909800758879932,
1.0],
'CRO': [7.594076541102026e-05,
0.00021132034608830965,
0.00035636942187035364,
0.00046206249114464276,
0.0005202941311291063,
0.0009345235678717103,
0.0018586387645571974,
0.002889455047751133,
0.0036560133105513495,
0.006210518605535831,
0.011909430739044066,
0.018266356201131363,
0.03008136146744939,
0.05643976322996858,
0.09287065253423755,
0.14903692862060894,
0.23031154468745685,
0.25971336583835364,
0.26444064248154897,
0.26486817616032915,
0.26489394716044645,
0.3901967495899631,
0.48085557432581,
0.6206262106463138,
0.642490639169659,
0.7099083471842164,
0.8138476600522192,
0.8164842110777006,
0.8286786912818093,
0.8662796697346461,
0.9242498971222125,
0.9244501828637759,
0.9257005240370675,
0.9316023717641109,
0.9505483576824715,
1.0],
'DEN': [0.00012951949454714329,
0.00034508105502033053,
0.0005636376974823292,
0.0007144588960235577,
0.0007931780776035202,
0.0014248570732995703,
0.0027605243542797777,
0.004172639603984463,
0.005167932122734655,
0.008720974789630664,
0.016233782513861494,
0.024176591704859935,
0.03916535056671621,
0.07085865512597797,
0.11301273981365084,
0.17228939081033756,
0.2614230046563258,
0.29493030046192825,
0.30052858188216947,
0.3010547113171185,
0.3010877200403811,
0.4264265136393593,
0.52066176007052,
0.6531744182310277,
0.6767911597440452,
0.7432104097274512,
0.8366084927340875,
0.8395678444672001,
0.8520520766235765,
0.8871624806557409,
0.93653437857708,
0.9367684381545903,
0.9381018185972174,
0.9438469709491734,
0.9606952173400788,
1.0],
'EGY': [0.0003877903576862931,
0.0008842772477997832,
0.0012766519652602877,
0.0014872766473396542,
0.0015727182947706676,
0.0032096265597192507,
0.0058962681355618635,
0.008101047447758948,
0.009307275044125898,
0.017246608159015683,
0.030277358194111276,
0.040970979687249796,
0.0698514971506852,
0.11725280888610592,
0.1872909575052067,
0.2722157215472936,
0.3871686520226196,
0.4260683074232411,
0.43191875298882126,
0.43241369538720154,
0.43244152928330665,
0.5718277165118634,
0.6661632785262861,
0.7805498735863913,
0.8018317486356615,
0.8534424280315916,
0.9160229624427456,
0.9184235322593728,
0.9271559657658182,
0.9483329919913753,
0.9740111972377666,
0.9741812386466381,
0.9750154563939197,
0.9781082878890554,
0.9858966854061741,
1.0],
'ENG': [1.290269562538679e-05,
4.141252030257348e-05,
7.794027416225867e-05,
0.00010969953812406923,
0.00013055918360213418,
0.0002299810953105576,
0.0004940371687531892,
0.000844692314705452,
0.0011551289826442581,
0.001975847192422092,
0.004155604392347671,
0.007050228511820756,
0.012131440250911644,
0.02562670298625162,
0.04659915729913457,
0.08988054985773886,
0.14558159041463306,
0.16350272081102546,
0.16606534530679393,
0.16627146860039177,
0.16628246891405646,
0.28123413676843684,
0.35520273083076626,
0.5078536179829266,
0.5237192799307775,
0.5892040279457226,
0.7243465841364445,
0.7260481118742258,
0.7365825713422857,
0.7800630404211697,
0.8697948010801702,
0.8699091344339485,
0.8708632532678031,
0.8768780510018994,
0.9026091672156566,
1.0],
'ESP': [5.741311723478815e-06,
2.1944587474006293e-05,
4.892764654443027e-05,
7.958294098213581e-05,
0.00010595004055791964,
0.00014793163072015925,
0.0002944333532264729,
0.0005500543839493814,
0.000847398379774079,
0.0011970398751376223,
0.00241717186948297,
0.004546098360426549,
0.006730076649617435,
0.01435143115396972,
0.02344600608459067,
0.04238192993924616,
0.07411895770903053,
0.0874169480710432,
0.08989336211195366,
0.09015276991068688,
0.0901709989284928,
0.1562510461881278,
0.21162685755610844,
0.3269255004781979,
0.3423940148625459,
0.4068083745794453,
0.5409262877365272,
0.5430867521283603,
0.5565817388187344,
0.6127778870363108,
0.7297845631801407,
0.7299761432374121,
0.7315945264715944,
0.741944875675303,
0.7871049430020618,
1.0],
'FRA': [1.4900186789634156e-05,
5.048806976102118e-05,
0.00010078443863238514,
0.00014920258903117242,
0.0001844670051792252,
0.0002845379398236506,
0.0005799503148391496,
0.001015983372901324,
0.0014450442243367162,
0.002202697030414647,
0.004439310645306142,
0.007740585597492774,
0.012042817134033186,
0.024743132571196695,
0.041029568825833966,
0.07185636227285265,
0.11993440642839331,
0.13868026085690235,
0.14192874813468476,
0.14224539823293525,
0.14226603608906147,
0.23326764708284156,
0.304231555137434,
0.4385512860834399,
0.45699739266472833,
0.5268265816404522,
0.6589985283740141,
0.661395936136186,
0.6750092999324013,
0.7265437605478187,
0.8240876398813137,
0.8242846822931743,
0.8257963426673851,
0.8345705053630537,
0.8692597003289058,
1.0],
'GER': [9.285842288356685e-06,
3.486271677831074e-05,
7.708439257619396e-05,
0.00012473870503399733,
0.00016549623237453804,
0.00022512261203147776,
0.00043231564897509416,
0.0007922985525566173,
0.0012092613771205484,
0.0016603295959778965,
0.0032277263534451982,
0.005950964760201547,
0.008510182751052588,
0.01740309476040101,
0.027083222142296328,
0.04539054618907657,
0.07902758740800483,
0.09447837829192535,
0.09763266469482103,
0.09799488605729115,
0.09802291833983773,
0.16163821897333533,
0.22008014320255714,
0.3306071231246185,
0.34850354360628494,
0.4161959522099315,
0.5442177034662014,
0.5469578756138568,
0.5625047431721023,
0.6213100731171401,
0.7325243359578477,
0.7327921839160983,
0.7348502685556455,
0.7468335842729715,
0.79453920146012,
1.0],
'ICE': [0.0005865700748281918,
0.0014111167803510308,
0.002157903387735785,
0.0026201753000575007,
0.0028369812133332973,
0.004792308177893157,
0.008517280475676552,
0.012065387485589609,
0.014318480456918192,
0.022548639840342246,
0.03822740675004815,
0.05316173064078525,
0.07914288035548672,
0.12863795954476728,
0.18331653939157266,
0.2408533906643961,
0.3450181666344012,
0.3921631715871066,
0.40164665675137984,
0.4027197137201946,
0.4028014765924333,
0.512411368430239,
0.6116303000784362,
0.7160358082259616,
0.7459734845687038,
0.8089787651013117,
0.8752775875206905,
0.8797941875783323,
0.894052290833946,
0.9240591552678521,
0.9556346003678277,
0.9560692143020325,
0.9579268585725232,
0.9639426682745674,
0.9772577964881562,
1.0],
'IRN': [4.409475804824242e-05,
| |
None)
_ElementMap.update({
__Value.name() : __Value
})
_AttributeMap.update({
__ID.name() : __ID,
__Notes.name() : __Notes,
__XPosition.name() : __XPosition,
__YPosition.name() : __YPosition
})
Namespace.addCategoryObject('typeBinding', u'DomainModelMetric', DomainModelMetric_)
# Complex type {avm}AnalysisConstruct with content type EMPTY
class AnalysisConstruct_ (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {avm}AnalysisConstruct with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = True
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'AnalysisConstruct')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 315, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
_ElementMap.update({
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'AnalysisConstruct', AnalysisConstruct_)
# Complex type {avm}Design with content type ELEMENT_ONLY
class Design_ (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {avm}Design with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'Design')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 324, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element RootContainer uses Python identifier RootContainer
__RootContainer = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'RootContainer'), 'RootContainer', '__avm_Design__RootContainer', False, pyxb.utils.utility.Location(u'avm.xsd', 326, 6), )
RootContainer = property(__RootContainer.value, __RootContainer.set, None, None)
# Element DomainFeature uses Python identifier DomainFeature
__DomainFeature = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'DomainFeature'), 'DomainFeature', '__avm_Design__DomainFeature', True, pyxb.utils.utility.Location(u'avm.xsd', 327, 6), )
DomainFeature = property(__DomainFeature.value, __DomainFeature.set, None, None)
# Element ResourceDependency uses Python identifier ResourceDependency
__ResourceDependency = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'ResourceDependency'), 'ResourceDependency', '__avm_Design__ResourceDependency', True, pyxb.utils.utility.Location(u'avm.xsd', 328, 6), )
ResourceDependency = property(__ResourceDependency.value, __ResourceDependency.set, None, None)
# Attribute SchemaVersion uses Python identifier SchemaVersion
__SchemaVersion = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'SchemaVersion'), 'SchemaVersion', '__avm_Design__SchemaVersion', pyxb.binding.datatypes.string)
__SchemaVersion._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 330, 4)
__SchemaVersion._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 330, 4)
SchemaVersion = property(__SchemaVersion.value, __SchemaVersion.set, None, None)
# Attribute DesignID uses Python identifier DesignID
__DesignID = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'DesignID'), 'DesignID', '__avm_Design__DesignID', pyxb.binding.datatypes.string)
__DesignID._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 331, 4)
__DesignID._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 331, 4)
DesignID = property(__DesignID.value, __DesignID.set, None, None)
# Attribute Name uses Python identifier Name
__Name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Name'), 'Name', '__avm_Design__Name', pyxb.binding.datatypes.string)
__Name._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 332, 4)
__Name._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 332, 4)
Name = property(__Name.value, __Name.set, None, None)
# Attribute DesignSpaceSrcID uses Python identifier DesignSpaceSrcID
__DesignSpaceSrcID = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'DesignSpaceSrcID'), 'DesignSpaceSrcID', '__avm_Design__DesignSpaceSrcID', pyxb.binding.datatypes.string)
__DesignSpaceSrcID._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 333, 4)
__DesignSpaceSrcID._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 333, 4)
DesignSpaceSrcID = property(__DesignSpaceSrcID.value, __DesignSpaceSrcID.set, None, None)
_ElementMap.update({
__RootContainer.name() : __RootContainer,
__DomainFeature.name() : __DomainFeature,
__ResourceDependency.name() : __ResourceDependency
})
_AttributeMap.update({
__SchemaVersion.name() : __SchemaVersion,
__DesignID.name() : __DesignID,
__Name.name() : __Name,
__DesignSpaceSrcID.name() : __DesignSpaceSrcID
})
Namespace.addCategoryObject('typeBinding', u'Design', Design_)
# Complex type {avm}Container with content type ELEMENT_ONLY
class Container_ (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {avm}Container with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = True
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'Container')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 335, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Container uses Python identifier Container
__Container = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Container'), 'Container', '__avm_Container__Container', True, pyxb.utils.utility.Location(u'avm.xsd', 337, 6), )
Container = property(__Container.value, __Container.set, None, None)
# Element Property uses Python identifier Property
__Property = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Property'), 'Property', '__avm_Container__Property', True, pyxb.utils.utility.Location(u'avm.xsd', 338, 6), )
Property = property(__Property.value, __Property.set, None, None)
# Element ComponentInstance uses Python identifier ComponentInstance
__ComponentInstance = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'ComponentInstance'), 'ComponentInstance', '__avm_Container__ComponentInstance', True, pyxb.utils.utility.Location(u'avm.xsd', 339, 6), )
ComponentInstance = property(__ComponentInstance.value, __ComponentInstance.set, None, None)
# Element Port uses Python identifier Port
__Port = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Port'), 'Port', '__avm_Container__Port', True, pyxb.utils.utility.Location(u'avm.xsd', 340, 6), )
Port = property(__Port.value, __Port.set, None, None)
# Element Connector uses Python identifier Connector
__Connector = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Connector'), 'Connector', '__avm_Container__Connector', True, pyxb.utils.utility.Location(u'avm.xsd', 341, 6), )
Connector = property(__Connector.value, __Connector.set, None, None)
# Element JoinData uses Python identifier JoinData
__JoinData = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'JoinData'), 'JoinData', '__avm_Container__JoinData', True, pyxb.utils.utility.Location(u'avm.xsd', 342, 6), )
JoinData = property(__JoinData.value, __JoinData.set, None, None)
# Element Formula uses Python identifier Formula
__Formula = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Formula'), 'Formula', '__avm_Container__Formula', True, pyxb.utils.utility.Location(u'avm.xsd', 343, 6), )
Formula = property(__Formula.value, __Formula.set, None, None)
# Element ContainerFeature uses Python identifier ContainerFeature
__ContainerFeature = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'ContainerFeature'), 'ContainerFeature', '__avm_Container__ContainerFeature', True, pyxb.utils.utility.Location(u'avm.xsd', 344, 6), )
ContainerFeature = property(__ContainerFeature.value, __ContainerFeature.set, None, None)
# Element ResourceDependency uses Python identifier ResourceDependency
__ResourceDependency = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'ResourceDependency'), 'ResourceDependency', '__avm_Container__ResourceDependency', True, pyxb.utils.utility.Location(u'avm.xsd', 345, 6), )
ResourceDependency = property(__ResourceDependency.value, __ResourceDependency.set, None, None)
# Element DomainModel uses Python identifier DomainModel
__DomainModel = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'DomainModel'), 'DomainModel', '__avm_Container__DomainModel', True, pyxb.utils.utility.Location(u'avm.xsd', 346, 6), )
DomainModel = property(__DomainModel.value, __DomainModel.set, None, None)
# Element Resource uses Python identifier Resource
__Resource = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Resource'), 'Resource', '__avm_Container__Resource', True, pyxb.utils.utility.Location(u'avm.xsd', 347, 6), )
Resource = property(__Resource.value, __Resource.set, None, None)
# Element Classifications uses Python identifier Classifications
__Classifications = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Classifications'), 'Classifications', '__avm_Container__Classifications', True, pyxb.utils.utility.Location(u'avm.xsd', 348, 6), )
Classifications = property(__Classifications.value, __Classifications.set, None, None)
# Attribute XPosition uses Python identifier XPosition
__XPosition = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'XPosition'), 'XPosition', '__avm_Container__XPosition', pyxb.binding.datatypes.unsignedInt)
__XPosition._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 350, 4)
__XPosition._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 350, 4)
XPosition = property(__XPosition.value, __XPosition.set, None, None)
# Attribute Name uses Python identifier Name
__Name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Name'), 'Name', '__avm_Container__Name', pyxb.binding.datatypes.string)
__Name._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 351, 4)
__Name._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 351, 4)
Name = property(__Name.value, __Name.set, None, None)
# Attribute YPosition uses Python identifier YPosition
__YPosition = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'YPosition'), 'YPosition', '__avm_Container__YPosition', pyxb.binding.datatypes.unsignedInt)
__YPosition._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 352, 4)
__YPosition._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 352, 4)
YPosition = property(__YPosition.value, __YPosition.set, None, None)
# Attribute ID uses Python identifier ID
__ID = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'ID'), 'ID', '__avm_Container__ID', pyxb.binding.datatypes.ID)
__ID._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 353, 4)
__ID._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 353, 4)
ID = property(__ID.value, __ID.set, None, None)
# Attribute Description uses Python identifier Description
__Description = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Description'), 'Description', '__avm_Container__Description', pyxb.binding.datatypes.string)
__Description._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 354, 4)
__Description._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 354, 4)
Description = property(__Description.value, __Description.set, None, None)
_ElementMap.update({
__Container.name() : __Container,
__Property.name() : __Property,
__ComponentInstance.name() : __ComponentInstance,
__Port.name() : __Port,
__Connector.name() : __Connector,
__JoinData.name() : __JoinData,
__Formula.name() : __Formula,
__ContainerFeature.name() : __ContainerFeature,
__ResourceDependency.name() : __ResourceDependency,
__DomainModel.name() : __DomainModel,
__Resource.name() : __Resource,
__Classifications.name() : __Classifications
})
_AttributeMap.update({
__XPosition.name() : __XPosition,
__Name.name() : __Name,
__YPosition.name() : __YPosition,
__ID.name() : __ID,
__Description.name() : __Description
})
Namespace.addCategoryObject('typeBinding', u'Container', Container_)
# Complex type {avm}ComponentInstance with content type ELEMENT_ONLY
class ComponentInstance_ (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {avm}ComponentInstance with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'ComponentInstance')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 375, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element PortInstance uses Python identifier PortInstance
__PortInstance = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'PortInstance'), 'PortInstance', '__avm_ComponentInstance__PortInstance', True, pyxb.utils.utility.Location(u'avm.xsd', 377, 6), )
PortInstance = property(__PortInstance.value, __PortInstance.set, None, None)
# Element PrimitivePropertyInstance uses Python identifier PrimitivePropertyInstance
__PrimitivePropertyInstance = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'PrimitivePropertyInstance'), 'PrimitivePropertyInstance', '__avm_ComponentInstance__PrimitivePropertyInstance', True, pyxb.utils.utility.Location(u'avm.xsd', 378, 6), )
PrimitivePropertyInstance = property(__PrimitivePropertyInstance.value, __PrimitivePropertyInstance.set, None, None)
# Element ConnectorInstance uses Python identifier ConnectorInstance
__ConnectorInstance = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'ConnectorInstance'), 'ConnectorInstance', '__avm_ComponentInstance__ConnectorInstance', True, pyxb.utils.utility.Location(u'avm.xsd', 379, 6), )
ConnectorInstance = property(__ConnectorInstance.value, __ConnectorInstance.set, None, None)
# Attribute ComponentID uses Python identifier ComponentID
__ComponentID = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'ComponentID'), 'ComponentID', '__avm_ComponentInstance__ComponentID', pyxb.binding.datatypes.string)
__ComponentID._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 381, 4)
__ComponentID._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 381, 4)
ComponentID = property(__ComponentID.value, __ComponentID.set, None, None)
# Attribute ID uses Python identifier ID
__ID = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'ID'), 'ID', '__avm_ComponentInstance__ID', pyxb.binding.datatypes.ID)
__ID._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 382, 4)
__ID._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 382, 4)
ID = property(__ID.value, __ID.set, None, None)
# Attribute Name uses Python identifier Name
__Name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Name'), 'Name', '__avm_ComponentInstance__Name', pyxb.binding.datatypes.string)
__Name._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 383, 4)
__Name._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 383, 4)
Name = property(__Name.value, __Name.set, None, None)
# Attribute DesignSpaceSrcComponentID uses Python identifier DesignSpaceSrcComponentID
__DesignSpaceSrcComponentID = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'DesignSpaceSrcComponentID'), 'DesignSpaceSrcComponentID', '__avm_ComponentInstance__DesignSpaceSrcComponentID', pyxb.binding.datatypes.string)
__DesignSpaceSrcComponentID._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 384, 4)
__DesignSpaceSrcComponentID._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 384, 4)
DesignSpaceSrcComponentID = property(__DesignSpaceSrcComponentID.value, __DesignSpaceSrcComponentID.set, None, None)
# Attribute XPosition uses Python identifier XPosition
__XPosition = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'XPosition'), 'XPosition', '__avm_ComponentInstance__XPosition', pyxb.binding.datatypes.unsignedInt)
__XPosition._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 385, 4)
__XPosition._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 385, 4)
XPosition = property(__XPosition.value, __XPosition.set, None, None)
# Attribute YPosition uses Python identifier YPosition
__YPosition = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'YPosition'), 'YPosition', '__avm_ComponentInstance__YPosition', pyxb.binding.datatypes.unsignedInt)
__YPosition._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 386, 4)
__YPosition._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 386, 4)
YPosition = property(__YPosition.value, __YPosition.set, None, None)
_ElementMap.update({
__PortInstance.name() : __PortInstance,
__PrimitivePropertyInstance.name() : __PrimitivePropertyInstance,
__ConnectorInstance.name() : __ConnectorInstance
})
_AttributeMap.update({
__ComponentID.name() : __ComponentID,
__ID.name() : __ID,
__Name.name() : __Name,
__DesignSpaceSrcComponentID.name() : __DesignSpaceSrcComponentID,
__XPosition.name() : __XPosition,
__YPosition.name() : __YPosition
})
Namespace.addCategoryObject('typeBinding', u'ComponentInstance', ComponentInstance_)
# Complex type {avm}ComponentPrimitivePropertyInstance with content | |
<reponame>OverLordGoldDragon/dummy<filename>keras_adamw/optimizers_225tf.py
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend_config
from tensorflow.python.keras.optimizer_v2.optimizer_v2 import OptimizerV2
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.ops import array_ops, control_flow_ops, math_ops, state_ops
from tensorflow.python.util.tf_export import keras_export
import tensorflow.keras.backend as K
from .utils_225tf import _apply_weight_decays, _compute_eta_t
from .utils_225tf import _apply_lr_multiplier, _check_args
@keras_export('keras.optimizers.AdamW')
class AdamW(OptimizerV2):
"""AdamW optimizer.
Default parameters follow those provided in the original paper.
For extended documentation, see optimizer_v2.Adam.__doc__.
# Arguments
learning_rate: A Tensor or a floating point value. The learning rate.
beta_1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor. The exponential decay
rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
amsgrad: boolean. Whether to apply AMSGrad variant of this algorithm from
the paper "On the Convergence of Adam and beyond".
name: Optional name for the operations created when applying gradients.
Defaults to "Adam". @compatibility(eager) When eager execution is
enabled, `learning_rate`, `beta_1`, `beta_2`, and `epsilon` can each be
a callable that takes no arguments and returns the actual value to use.
This can be useful for changing these values across different
invocations of optimizer functions. @end_compatibility
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
batch_size: int >= 1. Train input batch size; used for normalization
total_iterations: int >= 0. Total expected iterations / weight updates
throughout training, used for normalization; <1>
weight_decays: dict / None. Name-value pairs specifying weight decays,
as {<weight matrix name>:<weight decay value>}; <2>
lr_multipliers: dict / None. Name-value pairs specifying per-layer lr
multipliers, as {<layer name>:<multiplier value>}; <2>
use_cosine_annealing: bool. If True, multiplies lr each train iteration
as a function of eta_min, eta_max, total_iterations,
and t_cur (current); [2]-Appendix, 2
eta_min, eta_max: int, int. Min & max values of cosine annealing
lr multiplier; [2]-Appendix, 2
t_cur: int. Value to initialize t_cur to - used for 'warm restarts'.
To be used together with use_cosine_annealing==True
total_iterations_wd: int / None. If not None, weight_decays will be
applied according to total_iterations_wd instead of
total_iterations, contrary to authors' scheme. Set to
sum(total_iterations) over all restarts to normalize over
all epochs. May yield improvement over `None`.
init_verbose: bool. If True, print weight-name--weight-decay, and
lr-multiplier--layer-name value pairs set during
optimizer initialization (recommended)
# <1> - if using 'warm restarts', then refers to total expected iterations
for a given restart; can be an estimate, and training won't stop
at iterations == total_iterations. [2]-Appendix, pg 1
# <2> - [AdamW Keras Implementation - Github repository]
(https://github.com/OverLordGoldDragon/keras_adamw)
# References
- [1][Adam - A Method for Stochastic Optimization]
(http://arxiv.org/abs/1412.6980v8)
- [2][Fixing Weight Decay Regularization in Adam]
(https://arxiv.org/abs/1711.05101)
"""
def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999,
epsilon=None, decay=0., amsgrad=False,
batch_size=32, total_iterations=0,
total_iterations_wd=None, use_cosine_annealing=False,
weight_decays=None, lr_multipliers=None, init_verbose=True,
eta_min=0, eta_max=1, t_cur=0, name="AdamW", **kwargs):
eta_t = kwargs.pop('eta_t', 1.)
super(AdamW, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self.eta_min = K.constant(eta_min, name='eta_min')
self.eta_max = K.constant(eta_max, name='eta_max')
self.eta_t = K.variable(eta_t, dtype='float32', name='eta_t')
self.t_cur = K.variable(t_cur, dtype='int64', name='t_cur')
self.batch_size = batch_size
self.total_iterations = total_iterations
self.total_iterations_wd = total_iterations_wd or total_iterations
self.lr_multipliers = lr_multipliers
self.weight_decays = weight_decays or {}
self.init_verbose = init_verbose
self.use_cosine_annealing = use_cosine_annealing
self.epsilon = epsilon or backend_config.epsilon()
self.amsgrad = amsgrad
_check_args(total_iterations, use_cosine_annealing, self.weight_decays)
self._updates_processed = 0 # to track num calls to '_resource_apply_...'
self._init_notified = False
self._init_lr = kwargs.get('lr', learning_rate)
def _create_slots(self, var_list):
# Create slots for the first and second moments.
# Separate for-loops to respect the ordering of slot variables from v1.
for var in var_list:
self.add_slot(var, 'm')
for var in var_list:
self.add_slot(var, 'v')
if self.amsgrad:
for var in var_list:
self.add_slot(var, 'vhat')
self._updates_per_iter = len(var_list)
def _resource_apply_dense(self, grad, var):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
local_step = math_ops.cast(self.iterations + 1, var_dtype)
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype))
beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype))
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_power = math_ops.pow(beta_2_t, local_step)
epsilon_t = ops.convert_to_tensor(self.epsilon, var_dtype)
total_iterations = self.total_iterations
lr_t = lr_t * math_ops.sqrt(1 - beta_2_power) / (1 - beta_1_power)
# Learning rate multipliers
if self.lr_multipliers is not None:
lr_t = _apply_lr_multiplier(self, lr_t, var)
# Cosine annealing
if self.use_cosine_annealing and total_iterations != 0:
self.eta_t = _compute_eta_t(self)
m_t = state_ops.assign(m,
beta_1_t * m + (1.0 - beta_1_t) * grad,
use_locking=self._use_locking)
v_t = state_ops.assign(v,
beta_2_t * v + (1.0 - beta_2_t
) * math_ops.square(grad),
use_locking=self._use_locking)
if self.amsgrad:
vhat = self.get_slot(var, 'vhat')
vhat_t = state_ops.assign(vhat,
math_ops.maximum(vhat, v_t),
use_locking=self._use_locking)
var_delta = m_t / (math_ops.sqrt(vhat_t) + epsilon_t)
else:
var_delta = m_t / (math_ops.sqrt(v_t) + epsilon_t)
var_t = math_ops.sub(var, self.eta_t * lr_t * var_delta)
# Weight decays
if var.name in self.weight_decays.keys() and total_iterations != 0:
var_t = _apply_weight_decays(self, var, var_t)
iteration_done = self._updates_processed == (self._updates_per_iter - 1)
_up = self._updates_processed
self._updates_processed = (_up + 1) if not iteration_done else 0
if iteration_done and not self._init_notified:
self._init_notified = True
var_update = state_ops.assign(var, var_t, use_locking=self._use_locking)
t_cur = state_ops.assign_add(self.t_cur, int(iteration_done),
use_locking=self._use_locking)
updates = [var_update, m_t, v_t, t_cur]
if self.amsgrad:
updates.append(vhat_t)
return control_flow_ops.group(*updates)
def _resource_apply_sparse(self, grad, var, indices):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
local_step = math_ops.cast(self.iterations + 1, var_dtype)
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype))
beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype))
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_power = math_ops.pow(beta_2_t, local_step)
epsilon_t = ops.convert_to_tensor(self.epsilon, var_dtype)
total_iterations = self.total_iterations
lr_t = lr_t * math_ops.sqrt(1 - beta_2_power) / (1 - beta_1_power)
# Learning rate multipliers
if self.lr_multipliers is not None:
lr_t = _apply_lr_multiplier(self, lr_t, var)
# Cosine annealing
if self.use_cosine_annealing and total_iterations != 0:
self.eta_t = _compute_eta_t(self)
m_scaled_g_values = grad * (1 - beta_1_t)
m_t = state_ops.assign(m, m * beta_1_t, use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)
v_scaled_g_values = (grad * grad) * (1 - beta_2_t)
v_t = state_ops.assign(v, v * beta_2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)
if self.amsgrad:
v_hat = self.get_slot(var, 'vhat')
v_hat_t = math_ops.maximum(v_hat, v_t)
with ops.control_dependencies([v_hat_t]):
v_hat_t = state_ops.assign(
v_hat, v_hat_t, use_locking=self._use_locking)
v_hat_sqrt = math_ops.sqrt(v_hat_t)
var_delta = m_t / (v_hat_sqrt + epsilon_t)
else:
v_sqrt = math_ops.sqrt(v_t)
var_delta = m_t / (v_sqrt + epsilon_t)
var_t = math_ops.sub(var, self.eta_t * lr_t * var_delta)
# Weight decays
if var.name in self.weight_decays.keys() and total_iterations != 0:
var_t = _apply_weight_decays(self, var, var_t)
iteration_done = self._updates_processed == (self._updates_per_iter - 1)
_up = self._updates_processed
self._updates_processed = (_up + 1) if not iteration_done else 0
if iteration_done and not self._init_notified:
self._init_notified = True
var_update = state_ops.assign(var, var_t, use_locking=self._use_locking)
t_cur = state_ops.assign_add(self.t_cur, int(iteration_done),
use_locking=self._use_locking)
updates = [var_update, m_t, v_t, t_cur]
return control_flow_ops.group(*updates)
def set_weights(self, weights):
params = self.weights
# If the weights are generated by Keras V1 optimizer, it includes vhats
# even without amsgrad, i.e, V1 optimizer has 3x + 1 variables, while V2
# optimizer has 2x + 1 variables. Filter vhats out for compatibility.
num_vars = int((len(params) - 1) / 2)
if len(weights) == 3 * num_vars + 1:
weights = weights[:len(params)]
super(AdamW, self).set_weights(weights)
def get_config(self):
config = super(AdamW, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'decay': self._serialize_hyperparameter('decay'),
'beta_1': self._serialize_hyperparameter('beta_1'),
'beta_2': self._serialize_hyperparameter('beta_2'),
'epsilon': self.epsilon,
'amsgrad': self.amsgrad,
'batch_size': int(K.get_value(self.batch_size)),
'total_iterations': int(self.total_iterations),
'weight_decays': self.weight_decays,
'use_cosine_annealing': self.use_cosine_annealing,
't_cur': int(K.get_value(self.t_cur)),
'eta_t': int(K.get_value(self.eta_t)),
'eta_min': int(K.get_value(self.eta_min)),
'eta_max': int(K.get_value(self.eta_max)),
'init_verbose': self.init_verbose
})
return config
@keras_export('keras.optimizers.NadamW')
class NadamW(OptimizerV2):
"""Nesterov Adam optimizer.
Much like Adam is essentially RMSprop with momentum,
Nadam is Adam RMSprop with Nesterov momentum.
Default parameters follow those provided in the paper.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
learning_rate: A Tensor or a floating point value. The learning rate.
beta_1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor. The exponential decay
rate for the exponentially weighted infinity norm.
| |
reason why the rule was denied.
:param session: The database session in use.
:raises: RuleNotFound if no Rule can be found.
"""
try:
rule = session.query(models.ReplicationRule).filter_by(id=rule_id).one()
if rule.state == RuleState.WAITING_APPROVAL:
with open('%s/rule_denied_user.tmpl' % config_get('common', 'mailtemplatedir'), 'r') as templatefile:
template = Template(templatefile.read())
email = get_account(account=rule.account, session=session).email
if approver:
approver_email = get_account(account=approver, session=session).email
if approver_email:
approver = '%s (%s)' % (approver, approver_email)
else:
approver = 'AUTOMATIC'
if email:
email_body = template.safe_substitute({'rule_id': str(rule.id),
'rse_expression': rule.rse_expression,
'comment': rule.comments,
'scope': rule.scope.external,
'name': rule.name,
'did_type': rule.did_type,
'approver': approver,
'reason': reason})
add_message(event_type='email',
payload={'body': email_body,
'to': [email],
'subject': '[RUCIO] Replication rule %s has been denied' % (str(rule.id))},
session=session)
delete_rule(rule_id=rule_id, ignore_rule_lock=True, session=session)
# Also notify the other approvers
with open('%s/rule_denied_admin.tmpl' % config_get('common', 'mailtemplatedir'), 'r') as templatefile:
template = Template(templatefile.read())
email_body = template.safe_substitute({'rule_id': str(rule.id),
'approver': approver,
'reason': reason})
vo = rule.account.vo
recipents = __create_recipents_list(rse_expression=rule.rse_expression, filter_={'vo': vo}, session=session)
for recipent in recipents:
add_message(event_type='email',
payload={'body': email_body,
'to': [recipent[0]],
'subject': 'Re: [RUCIO] Request to approve replication rule %s' % (str(rule.id))},
session=session)
except NoResultFound:
raise RuleNotFound('No rule with the id %s found' % rule_id)
except StatementError:
raise RucioException('Badly formatted rule id (%s)' % rule_id)
@transactional_session
def examine_rule(rule_id, session=None):
"""
Examine a replication rule for transfer errors.
:param rule_id: Replication rule id
:param session: Session of the db.
:returns: Dictionary of informations
"""
result = {'rule_error': None,
'transfers': []}
try:
rule = session.query(models.ReplicationRule).filter_by(id=rule_id).one()
if rule.state == RuleState.OK:
result['rule_error'] = 'This replication rule is OK'
elif rule.state == RuleState.REPLICATING:
result['rule_error'] = 'This replication rule is currently REPLICATING'
elif rule.state == RuleState.SUSPENDED:
result['rule_error'] = 'This replication rule is SUSPENDED'
else:
result['rule_error'] = rule.error
# Get the stuck locks
stuck_locks = session.query(models.ReplicaLock).filter_by(rule_id=rule_id, state=LockState.STUCK).all()
for lock in stuck_locks:
# Get the count of requests in the request_history for each lock
transfers = session.query(models.RequestHistory).filter_by(scope=lock.scope, name=lock.name, dest_rse_id=lock.rse_id).order_by(models.RequestHistory.created_at.desc()).all() # pylint: disable=no-member
transfer_cnt = len(transfers)
# Get the error of the last request that has been tried and also the SOURCE used for the last request
last_error, last_source, last_time, sources = None, None, None, []
if transfers:
last_request = transfers[0]
last_error = last_request.state
last_time = last_request.created_at
last_source = None if last_request.source_rse_id is None else get_rse_name(rse_id=last_request.source_rse_id, session=session)
available_replicas = session.query(models.RSEFileAssociation).filter_by(scope=lock.scope, name=lock.name, state=ReplicaState.AVAILABLE).all()
for replica in available_replicas:
sources.append((get_rse_name(rse_id=replica.rse_id, session=session),
True if get_rse(rse_id=replica.rse_id, session=session).availability >= 4 else False))
result['transfers'].append({'scope': lock.scope,
'name': lock.name,
'rse_id': lock.rse_id,
'rse': get_rse_name(rse_id=lock.rse_id, session=session),
'attempts': transfer_cnt,
'last_error': str(last_error),
'last_source': last_source,
'sources': sources,
'last_time': last_time})
return result
except NoResultFound:
raise RuleNotFound('No rule with the id %s found' % (rule_id))
except StatementError:
raise RucioException('Badly formatted rule id (%s)' % (rule_id))
@transactional_session
def get_evaluation_backlog(expiration_time=600, session=None):
"""
Counts the number of entries in the rule evaluation backlog.
(Number of files to be evaluated)
:returns: Tuple (Count, Datetime of oldest entry)
"""
result = REGION.get('rule_evaluation_backlog', expiration_time=expiration_time)
if result is NO_VALUE:
result = session.query(func.count(models.UpdatedDID.created_at), func.min(models.UpdatedDID.created_at)).one()
REGION.set('rule_evaluation_backlog', result)
return result
@transactional_session
def release_parent_rule(child_rule_id, remove_parent_expiration=False, session=None):
"""
Release a potential parent rule, because the child_rule is OK.
:param child_rule_id: The child rule id.
:param remove_parant_expiration: If true, removes the expiration of the parent rule.
:param session: The Database session
"""
session.flush()
parent_rules = session.query(models.ReplicationRule).filter_by(child_rule_id=child_rule_id).\
with_hint(models.ReplicationRule, "index(RULES RULES_CHILD_RULE_ID_IDX)", 'oracle').all()
for rule in parent_rules:
if remove_parent_expiration:
rule.expires_at = None
rule.child_rule_id = None
insert_rule_history(rule=rule, recent=True, longterm=False, session=session)
@transactional_session
def __find_missing_locks_and_create_them(datasetfiles, locks, replicas, source_replicas, rseselector, rule, source_rses, session=None, logger=logging.log):
"""
Find missing locks for a rule and create them.
:param datasetfiles: Dict holding all datasets and files.
:param locks: Dict holding locks.
:param replicas: Dict holding replicas.
:param source_replicas: Dict holding source replicas.
:param rseselector: The RSESelector to be used.
:param rule: The rule.
:param source_rses: RSE ids for eglible source RSEs.
:param session: Session of the db.
:param logger: Optional decorated logger that can be passed from the calling daemons or servers.
:raises: InsufficientAccountLimit, IntegrityError, InsufficientTargetRSEs
:attention: This method modifies the contents of the locks and replicas input parameters.
"""
logger(logging.DEBUG, "Finding missing locks for rule %s [%d/%d/%d]", str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt)
mod_datasetfiles = [] # List of Datasets and their files in the Tree [{'scope':, 'name':, 'files': []}]
# Files are in the format [{'scope':, 'name':, 'bytes':, 'md5':, 'adler32':}]
for dataset in datasetfiles:
mod_files = []
preferred_rse_ids = []
for file in dataset['files']:
if len([lock for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id]) < rule.copies:
mod_files.append(file)
else:
preferred_rse_ids = [lock.rse_id for lock in locks[(file['scope'], file['name'])] if lock.rule_id == rule.id]
if mod_files:
logger(logging.DEBUG, 'Found missing locks for rule %s, creating them now', str(rule.id))
mod_datasetfiles.append({'scope': dataset['scope'], 'name': dataset['name'], 'files': mod_files})
__create_locks_replicas_transfers(datasetfiles=mod_datasetfiles,
locks=locks,
replicas=replicas,
source_replicas=source_replicas,
rseselector=rseselector,
rule=rule,
preferred_rse_ids=preferred_rse_ids,
source_rses=source_rses,
session=session)
logger(logging.DEBUG, "Finished finding missing locks for rule %s [%d/%d/%d]", str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt)
@transactional_session
def __find_surplus_locks_and_remove_them(datasetfiles, locks, replicas, source_replicas, rseselector, rule, source_rses, session=None, logger=logging.log):
"""
Find surplocks locks for a rule and delete them.
:param datasetfiles: Dict holding all datasets and files.
:param locks: Dict holding locks.
:param replicas: Dict holding replicas.
:param source_replicas: Dict holding all source replicas.
:param rseselector: The RSESelector to be used.
:param rule: The rule.
:param source_rses: RSE ids for eglible source RSEs.
:param session: Session of the db.
:param logger: Optional decorated logger that can be passed from the calling daemons or servers.
:raises: InsufficientAccountLimit, IntegrityError, InsufficientTargetRSEs
:attention: This method modifies the contents of the locks and replicas input parameters.
"""
logger(logging.DEBUG, "Finding surplus locks for rule %s [%d/%d/%d]", str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt)
account_counter_decreases = {} # {'rse_id': [file_size, file_size, file_size]}
# Put all the files in one dictionary
files = {}
for ds in datasetfiles:
for file in ds['files']:
files[(file['scope'], file['name'])] = True
for key in locks:
if key not in files:
# The lock needs to be removed
for lock in deepcopy(locks[key]):
if lock.rule_id == rule.id:
__delete_lock_and_update_replica(lock=lock, purge_replicas=rule.purge_replicas, nowait=True, session=session)
if lock.rse_id not in account_counter_decreases:
account_counter_decreases[lock.rse_id] = []
account_counter_decreases[lock.rse_id].append(lock.bytes)
if lock.state == LockState.OK:
rule.locks_ok_cnt -= 1
elif lock.state == LockState.REPLICATING:
rule.locks_replicating_cnt -= 1
elif lock.state == LockState.STUCK:
rule.locks_stuck_cnt -= 1
locks[key].remove(lock)
logger(logging.DEBUG, "Finished finding surplus locks for rule %s [%d/%d/%d]", str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt)
@transactional_session
def __find_stuck_locks_and_repair_them(datasetfiles, locks, replicas, source_replicas, rseselector, rule, source_rses, session=None, logger=logging.log):
"""
Find stuck locks for a rule and repair them.
:param datasetfiles: Dict holding all datasets and files.
:param locks: Dict holding locks.
:param replicas: Dict holding replicas.
:param source_replicas: Dict holding source replicas.
:param rseselector: The RSESelector to be used.
:param rule: The rule.
:param source_rses: RSE ids of eglible source RSEs.
:param session: Session of the db.
:param logger: Optional decorated logger that can be passed from the calling daemons or servers.
:raises: InsufficientAccountLimit, IntegrityError, InsufficientTargetRSEs
:attention: This method modifies the contents of the locks and replicas input parameters.
"""
logger(logging.DEBUG, "Finding and repairing stuck locks for rule %s [%d/%d/%d]", str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt)
replicas_to_create, locks_to_create, transfers_to_create,\
locks_to_delete = repair_stuck_locks_and_apply_rule_grouping(datasetfiles=datasetfiles,
locks=locks,
replicas=replicas,
source_replicas=source_replicas,
rseselector=rseselector,
rule=rule,
source_rses=source_rses,
session=session)
# Add the replicas
session.add_all([item for sublist in replicas_to_create.values() for item in sublist])
session.flush()
# Add the locks
session.add_all([item for sublist in locks_to_create.values() for item in sublist])
session.flush()
# Increase rse_counters
for rse_id in replicas_to_create.keys():
rse_counter.increase(rse_id=rse_id, files=len(replicas_to_create[rse_id]), bytes_=sum([replica.bytes for replica in replicas_to_create[rse_id]]), session=session)
# Increase account_counters
for rse_id in locks_to_create.keys():
account_counter.increase(rse_id=rse_id, account=rule.account, files=len(locks_to_create[rse_id]), bytes_=sum([lock.bytes for lock in locks_to_create[rse_id]]), session=session)
# Decrease account_counters
for rse_id in locks_to_delete:
account_counter.decrease(rse_id=rse_id, account=rule.account, files=len(locks_to_delete[rse_id]), bytes_=sum([lock.bytes for lock in locks_to_delete[rse_id]]), session=session)
# Delete the locks:
for lock in [item for sublist in locks_to_delete.values() for item in sublist]:
session.delete(lock)
# Add the transfers
request_core.queue_requests(requests=transfers_to_create, session=session)
session.flush()
logger(logging.DEBUG, "Finished finding and repairing stuck locks for rule %s [%d/%d/%d]", str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt)
@transactional_session
def __evaluate_did_detach(eval_did, session=None, logger=logging.log):
"""
Evaluate a parent did which has children removed.
:param eval_did: The did object in use.
:param session: The database session in use.
:param logger: Optional decorated logger that can be passed from the calling daemons or servers.
"""
logger(logging.INFO, "Re-Evaluating did %s:%s for DETACH", eval_did.scope, eval_did.name)
force_epoch = config_get('rules', 'force_epoch_when_detach', default=False, session=session)
with record_timer_block('rule.evaluate_did_detach'):
# Get all parent DID's
parent_dids = rucio.core.did.list_all_parent_dids(scope=eval_did.scope, name=eval_did.name, session=session)
# Get all RR from parents and eval_did
rules = session.query(models.ReplicationRule).filter_by(scope=eval_did.scope, name=eval_did.name).with_for_update(nowait=True).all()
for did in parent_dids:
rules.extend(session.query(models.ReplicationRule).filter_by(scope=did['scope'], name=did['name']).with_for_update(nowait=True).all())
# Iterate rules and delete | |
<gh_stars>0
import numpy as np
import torch
from mmcv.parallel import collate, scatter
from mmpose.datasets.pipelines import Compose
from .inference import LoadImage, _box2cs, _xywh2xyxy, _xyxy2xywh
def extract_pose_sequence(pose_results, frame_idx, causal, seq_len, step=1):
"""Extract the target frame from 2D pose results, and pad the sequence to a
fixed length.
Args:
pose_results (List[List[Dict]]): Multi-frame pose detection results
stored in a nested list. Each element of the outer list is the
pose detection results of a single frame, and each element of the
inner list is the pose information of one person, which contains:
keypoints (ndarray[K, 2 or 3]): x, y, [score]
track_id (int): unique id of each person, required when
``with_track_id==True```
bbox ((4, ) or (5, )): left, right, top, bottom, [score]
frame_idx (int): The index of the frame in the original video.
causal (bool): If True, the target frame is the last frame in
a sequence. Otherwise, the target frame is in the middle of a
sequence.
seq_len (int): The number of frames in the input sequence.
step (int): Step size to extract frames from the video.
Returns:
List[List[Dict]]: Multi-frame pose detection results stored in a
nested list with a length of seq_len.
int: The target frame index in the padded sequence.
"""
if causal:
frames_left = seq_len - 1
frames_right = 0
else:
frames_left = (seq_len - 1) // 2
frames_right = frames_left
num_frames = len(pose_results)
# get the padded sequence
pad_left = max(0, frames_left - frame_idx // step)
pad_right = max(0, frames_right - (num_frames - 1 - frame_idx) // step)
start = max(frame_idx % step, frame_idx - frames_left * step)
end = min(num_frames - (num_frames - 1 - frame_idx) % step,
frame_idx + frames_right * step + 1)
pose_results_seq = [pose_results[0]] * pad_left + \
pose_results[start:end:step] + [pose_results[-1]] * pad_right
return pose_results_seq
def _gather_pose_lifter_inputs(pose_results,
bbox_center,
bbox_scale,
norm_pose_2d=False):
"""Gather input data (keypoints and track_id) for pose lifter model.
Notes:
T: The temporal length of the pose detection results
N: The number of the person instances
K: The number of the keypoints
C: The channel number of each keypoint
Args:
pose_results (List[List[Dict]]): Multi-frame pose detection results
stored in a nested list. Each element of the outer list is the
pose detection results of a single frame, and each element of the
inner list is the pose information of one person, which contains:
keypoints (ndarray[K, 2 or 3]): x, y, [score]
track_id (int): unique id of each person, required when
``with_track_id==True```
bbox ((4, ) or (5, )): left, right, top, bottom, [score]
bbox_center (ndarray[1, 2]): x, y. The average center coordinate of the
bboxes in the dataset.
bbox_scale (int|float): The average scale of the bboxes in the dataset.
norm_pose_2d (bool): If True, scale the bbox (along with the 2D
pose) to bbox_scale, and move the bbox (along with the 2D pose) to
bbox_center. Default: False.
Returns:
List[List[dict]]: Multi-frame pose detection results
stored in a nested list. Each element of the outer list is the
pose detection results of a single frame, and each element of the
inner list is the pose information of one person, which contains:
keypoints (ndarray[K, 2 or 3]): x, y, [score]
track_id (int): unique id of each person, required when
``with_track_id==True```
"""
sequence_inputs = []
for frame in pose_results:
frame_inputs = []
for res in frame:
inputs = dict()
if norm_pose_2d:
bbox = res['bbox']
center = np.array([[(bbox[0] + bbox[2]) / 2,
(bbox[1] + bbox[3]) / 2]])
scale = max(bbox[2] - bbox[0], bbox[3] - bbox[1])
inputs['keypoints'] = (res['keypoints'][:, :2] - center) \
/ scale * bbox_scale + bbox_center
else:
inputs['keypoints'] = res['keypoints'][:, :2]
if res['keypoints'].shape[1] == 3:
inputs['keypoints'] = np.concatenate(
[inputs['keypoints'], res['keypoints'][:, 2:]], axis=1)
if 'track_id' in res:
inputs['track_id'] = res['track_id']
frame_inputs.append(inputs)
sequence_inputs.append(frame_inputs)
return sequence_inputs
def _collate_pose_sequence(pose_results, with_track_id=True, target_frame=-1):
"""Reorganize multi-frame pose detection results into individual pose
sequences.
Notes:
T: The temporal length of the pose detection results
N: The number of the person instances
K: The number of the keypoints
C: The channel number of each keypoint
Args:
pose_results (List[List[Dict]]): Multi-frame pose detection results
stored in a nested list. Each element of the outer list is the
pose detection results of a single frame, and each element of the
inner list is the pose information of one person, which contains:
keypoints (ndarray[K, 2 or 3]): x, y, [score]
track_id (int): unique id of each person, required when
``with_track_id==True```
with_track_id (bool): If True, the element in pose_results is expected
to contain "track_id", which will be used to gather the pose
sequence of a person from multiple frames. Otherwise, the pose
results in each frame are expected to have a consistent number and
order of identities. Default is True.
target_frame (int): The index of the target frame. Default: -1.
"""
T = len(pose_results)
assert T > 0
target_frame = (T + target_frame) % T # convert negative index to positive
N = len(pose_results[target_frame]) # use identities in the target frame
if N == 0:
return []
K, C = pose_results[target_frame][0]['keypoints'].shape
track_ids = None
if with_track_id:
track_ids = [res['track_id'] for res in pose_results[target_frame]]
pose_sequences = []
for idx in range(N):
pose_seq = dict()
# gather static information
for k, v in pose_results[target_frame][idx].items():
if k != 'keypoints':
pose_seq[k] = v
# gather keypoints
if not with_track_id:
pose_seq['keypoints'] = np.stack(
[frame[idx]['keypoints'] for frame in pose_results])
else:
keypoints = np.zeros((T, K, C), dtype=np.float32)
keypoints[target_frame] = pose_results[target_frame][idx][
'keypoints']
# find the left most frame containing track_ids[idx]
for frame_idx in range(target_frame - 1, -1, -1):
contains_idx = False
for res in pose_results[frame_idx]:
if res['track_id'] == track_ids[idx]:
keypoints[frame_idx] = res['keypoints']
contains_idx = True
break
if not contains_idx:
# replicate the left most frame
keypoints[:frame_idx + 1] = keypoints[frame_idx + 1]
break
# find the right most frame containing track_idx[idx]
for frame_idx in range(target_frame + 1, T):
contains_idx = False
for res in pose_results[frame_idx]:
if res['track_id'] == track_ids[idx]:
keypoints[frame_idx] = res['keypoints']
contains_idx = True
break
if not contains_idx:
# replicate the right most frame
keypoints[frame_idx + 1:] = keypoints[frame_idx]
break
pose_seq['keypoints'] = keypoints
pose_sequences.append(pose_seq)
return pose_sequences
def inference_pose_lifter_model(model,
pose_results_2d,
dataset,
with_track_id=True,
image_size=None,
norm_pose_2d=False):
"""Inference 3D pose from 2D pose sequences using a pose lifter model.
Args:
model (nn.Module): The loaded pose lifter model
pose_results_2d (List[List[dict]]): The 2D pose sequences stored in a
nested list. Each element of the outer list is the 2D pose results
of a single frame, and each element of the inner list is the 2D
pose of one person, which contains:
- "keypoints" (ndarray[K, 2 or 3]): x, y, [score]
- "track_id" (int)
dataset (str): Dataset name, e.g. 'Body3DH36MDataset'
with_track_id: If True, the element in pose_results_2d is expected to
contain "track_id", which will be used to gather the pose sequence
of a person from multiple frames. Otherwise, the pose results in
each frame are expected to have a consistent number and order of
identities. Default is True.
image_size (Tuple|List): image width, image height. If None, image size
will not be contained in dict ``data``.
norm_pose_2d (bool): If True, scale the bbox (along with the 2D
pose) to the average bbox scale of the dataset, and move the bbox
(along with the 2D pose) to the average bbox center of the dataset.
Returns:
List[dict]: 3D pose inference results. Each element is the result of
an instance, which contains:
- "keypoints_3d" (ndarray[K,3]): predicted 3D keypoints
- "keypoints" (ndarray[K, 2 or 3]): from the last frame in
``pose_results_2d``.
- "track_id" (int): from the last frame in ``pose_results_2d``.
If there is no valid instance, an empty list will be returned.
"""
cfg = model.cfg
test_pipeline = Compose(cfg.test_pipeline)
flip_pairs = None
if dataset == 'Body3DH36MDataset':
flip_pairs = [[1, 4], [2, 5], [3, 6], [11, 14], [12, 15], [13, 16]]
bbox_center = np.array([[528, 427]], dtype=np.float32)
bbox_scale = 400
else:
raise NotImplementedError()
target_idx = -1 if model.causal else len(pose_results_2d) // 2
pose_lifter_inputs = _gather_pose_lifter_inputs(pose_results_2d,
bbox_center, bbox_scale,
norm_pose_2d)
pose_sequences_2d = _collate_pose_sequence(pose_lifter_inputs,
with_track_id, target_idx)
if not pose_sequences_2d:
return []
batch_data = []
for seq in | |
bool odd { _ind.size() & 1 }; \
const int bend = odd ? \
static_cast<int>( 0.5 * ( _ind.size() + 1 ) ) : \
static_cast<int>( 0.5 * _ind.size() );",
)
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``cp.imag(hilbert(x))``, and the
original signal from ``cp.real(hilbert(x))``.
Examples
---------
In this example we use the Hilbert transform to determine the amplitude
envelope and instantaneous frequency of an amplitude-modulated signal.
>>> import cupy as cp
>>> import matplotlib.pyplot as plt
>>> from cusignal import hilbert, chirp
>>> duration = 1.0
>>> fs = 400.0
>>> samples = int(fs*duration)
>>> t = cp.arange(samples) / fs
We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
apply an amplitude modulation.
>>> signal = chirp(t, 20.0, t[-1], 100.0)
>>> signal *= (1.0 + 0.5 * cp.sin(2.0*cp.pi*3.0*t) )
The amplitude envelope is given by magnitude of the analytic signal. The
instantaneous frequency can be obtained by differentiating the
instantaneous phase in respect to time. The instantaneous phase corresponds
to the phase angle of the analytic signal.
>>> analytic_signal = hilbert(signal)
>>> amplitude_envelope = cp.abs(analytic_signal)
>>> instantaneous_phase = cp.unwrap(cp.angle(analytic_signal))
>>> instantaneous_frequency = (cp.diff(instantaneous_phase) /
... (2.0*cp.pi) * fs)
>>> fig = plt.figure()
>>> ax0 = fig.add_subplot(211)
>>> ax0.plot(cp.asnumpy(t), cp.asnumpy(signal), label='signal')
>>> ax0.plot(cp.asnumpy(t), cp.asnumpy(amplitude_envelope), \
label='envelope')
>>> ax0.set_xlabel("time in seconds")
>>> ax0.legend()
>>> ax1 = fig.add_subplot(212)
>>> ax1.plot(t[1:], instantaneous_frequency)
>>> ax1.set_xlabel("time in seconds")
>>> ax1.set_ylim(0.0, 120.0)
References
----------
.. [1] Wikipedia, "Analytic signal".
https://en.wikipedia.org/wiki/Analytic_signal
.. [2] <NAME>, "Time-Frequency Analysis", 1995. Chapter 2.
.. [3] <NAME>, <NAME>. Discrete-Time Signal
Processing, Third Edition, 2009. Chapter 12.
ISBN 13: 978-1292-02572-8
"""
x = cp.asarray(x)
if cp.iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = cp.fft.fft(x, N, axis=axis)
h = _hilbert_kernel(size=N)
if x.ndim > 1:
ind = [cp.newaxis] * x.ndim
ind[axis] = slice(None)
h = h[tuple(ind)]
x = cp.fft.ifft(Xf * h, axis=axis)
return x
_hilbert2_kernel = cp.ElementwiseKernel(
"",
"float64 h1, float64 h2",
"""
if ( !odd ) {
if ( ( i == 0 ) || ( i == bend ) ) {
h1 = 1.0;
h2 = 1.0;
} else if ( i > 0 && i < bend ) {
h1 = 2.0;
h2 = 2.0;
} else {
h1 = 0.0;
h2 = 0.0;
}
} else {
if ( i == 0 ) {
h1 = 1.0;
h2 = 1.0;
} else if ( i > 0 && i < bend) {
h1 = 2.0;
h2 = 2.0;
} else {
h1 = 0.0;
h2 = 0.0;
}
}
""",
"_hilbert2_kernel",
options=("-std=c++11",),
loop_prep="const bool odd { _ind.size() & 1 }; \
const int bend = odd ? \
static_cast<int>( 0.5 * ( _ind.size() + 1 ) ) : \
static_cast<int>( 0.5 * _ind.size() );",
)
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
https://en.wikipedia.org/wiki/Analytic_signal
"""
x = cp.atleast_2d(x)
if x.ndim > 2:
raise ValueError("x must be 2-D.")
if cp.iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or cp.any(cp.asarray(N) <= 0):
raise ValueError(
"When given as a tuple, N must hold exactly two positive integers"
)
Xf = cp.fft.fft2(x, N, axes=(0, 1))
h1, h2 = _hilbert2_kernel(size=N[1])
h = h1[:, cp.newaxis] * h2[cp.newaxis, :]
k = x.ndim
while k > 2:
h = h[:, cp.newaxis]
k -= 1
x = cp.fft.ifft2(Xf * h, axes=(0, 1))
return x
_detrend_A_kernel = cp.ElementwiseKernel(
"",
"float64 A",
"""
if ( i & 1 ) {
const int new_i { i >> 1 };
A = new_i * den;
} else {
A = 1.0;
}
""",
"_detrend_A_kernel",
options=("-std=c++11",),
loop_prep="const double den { 1.0 / _ind.size() };",
)
def detrend(data, axis=-1, type="linear", bp=0, overwrite_data=False):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
overwrite_data : bool, optional
If True, perform in place detrending and avoid a copy. Default is False
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> import cusignal
>>> import cupy as cp
>>> randgen = cp.random.RandomState(9)
>>> npoints = 1000
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*cp.linspace(0, 1, npoints) + noise
>>> (cusignal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ["linear", "l", "constant", "c"]:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = cp.asarray(data)
dtype = data.dtype.char
if dtype not in "dfDF":
dtype = "d"
if type in ["constant", "c"]:
ret = data - cp.expand_dims(cp.mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = np.sort(np.unique(np.r_[0, bp, N]))
if np.any(bp > N):
raise ValueError(
"Breakpoints must be less than length of \
data along given axis."
)
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = np.r_[axis, 0:axis, axis + 1 : rnk]
newdata = cp.reshape(
cp.transpose(data, tuple(newdims)), (N, _prod(dshape) // N)
)
if not overwrite_data:
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in "dfDF":
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = int(bp[m + 1] - bp[m])
A = _detrend_A_kernel(size=Npts * 2)
A = cp.reshape(A, (Npts, 2))
sl = slice(bp[m], bp[m + 1])
coef, _, _, _ = cp.linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - cp.dot(A, coef)
# Put data back in original shape.
tdshape = np.take(dshape, newdims, 0)
ret = cp.reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = cp.transpose(ret, tuple(olddims))
return ret
_freq_shift_kernel = cp.ElementwiseKernel(
"T x, float64 freq, float64 fs",
"complex128 out",
"""
thrust::complex<double> temp(0, neg2pi * freq / fs * i);
out = x * exp(temp);
""",
"_freq_shift_kernel",
options=("-std=c++11",),
loop_prep="const double neg2pi { -1 * 2 * M_PI };",
)
def freq_shift(x, freq, fs):
"""
Frequency shift signal by freq at fs sample rate
Parameters
----------
x : array_like, complex valued
The data to be | |
#!/usr/bin/env pythonw
# -*- coding: utf-8 -*-
from __future__ import print_function
from builtins import str
from builtins import range
import wx
import sys
import os
import scipy
from scipy import *
#------------------------------------------------------------------------
# def main():
#------------------------------------------------------------------------
"""
NAME
tdt_magic.py.py
DESCRIPTION
converts TDT formatted files to magic_measurements format files
SYNTAX
tdt_magic.py -WD <PATH>
INPUT:
TDT formatted files with suffix .tdt
OUTPUT:
combined measurement file saved in <PATH>
Log:
Initial revision 4/24/2014
some bug fix 06/12/2015
"""
#===========================================
# GUI
#===========================================
class convert_tdt_files_to_MagIC(wx.Frame):
""""""
title = "Convert tdt files to MagIC format"
def __init__(self,WD):
wx.Frame.__init__(self, None, wx.ID_ANY, self.title)
self.panel = wx.Panel(self)
self.max_files=10
os.chdir(WD)
self.WD=os.getcwd()+"/"
self.create_menu()
self.InitUI()
def InitUI(self):
pnl = self.panel
#---sizer infor ----
TEXT1="Instructions:\n"
TEXT2="1. Put all individual tdt files from the same location in one folder.\n"
TEXT3=" Each tdt file file should end with '.tdt'\n"
TEXT4="2. If there are more than one location use multiple folders. One folder for each location.\n"
TEXT5="3. If the magnetization in in units are mA/m (as in the original TT program) volume is required to convert to moment.\n\n"
TEXT6="For more information check the help menubar option.\n"
TEXT7="(for support contact <EMAIL>)"
TEXT=TEXT1+TEXT2+TEXT3+TEXT4+TEXT5+TEXT6+TEXT7
bSizer_info = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.HORIZONTAL )
bSizer_info.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_LEFT)
#---sizer 0 ----
TEXT="output file:"
bSizer0 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer0.Add(wx.StaticText(self.panel,label=TEXT),wx.ALIGN_LEFT)
bSizer0.AddSpacer(5)
self.output_file_path = wx.TextCtrl(self.panel, id=-1, size=(1000,25))
#self.output_file_path.SetEditable(False)
bSizer0.Add(self.output_file_path,wx.ALIGN_LEFT)
self.output_file_path.SetValue(os.path.join(self.WD, "magic_measurements.txt"))
#---sizer 1 ----
TEXT="\n choose a path\n with no spaces in name"
bSizer1 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer1.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
bSizer1.AddSpacer(5)
for i in range(self.max_files):
command= "self.dir_path_%i = wx.TextCtrl(self.panel, id=-1, size=(100,25), style=wx.TE_READONLY)"%i
exec(command)
command= "self.add_dir_button_%i = wx.Button(self.panel, id=-1, label='add',name='add_%i')"%(i,i)
exec(command)
command= "self.Bind(wx.EVT_BUTTON, self.on_add_dir_button_i, self.add_dir_button_%i)"%i
#print command
exec(command)
command="bSizer1_%i = wx.BoxSizer(wx.HORIZONTAL)"%i
exec(command)
command="bSizer1_%i.Add(wx.StaticText(pnl,label=('%i '[:2])),wx.ALIGN_LEFT)"%(i,i+1)
exec(command)
command="bSizer1_%i.Add(self.dir_path_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer1_%i.Add(self.add_dir_button_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer1.Add(bSizer1_%i,wx.ALIGN_TOP)" %i
exec(command)
bSizer1.AddSpacer(5)
#---sizer 1a ----
TEXT="\n\nexperiment:"
bSizer1a = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer1a.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
self.experiments_names=['Thellier','ATRM 6 positions','NLT']
bSizer1a.AddSpacer(5)
for i in range(self.max_files):
command="self.protocol_info_%i = wx.ComboBox(self.panel, -1, self.experiments_names[0], size=(100,25), choices=self.experiments_names, style=wx.CB_DROPDOWN|wx.CB_READONLY)"%i
exec(command)
command="bSizer1a.Add(self.protocol_info_%i,wx.ALIGN_TOP)"%i
exec(command)
bSizer1a.AddSpacer(5)
#---sizer 1b ----
TEXT="\nBlab direction\n dec, inc: "
bSizer1b = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer1b.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
bSizer1b.AddSpacer(5)
for i in range(self.max_files):
#command= "self.file_info_Blab_%i = wx.TextCtrl(self.panel, id=-1, size=(40,25))"%i
#exec command
command= "self.file_info_Blab_dec_%i = wx.TextCtrl(self.panel, id=-1, size=(40,25))"%i
exec(command)
command= "self.file_info_Blab_dec_%i.SetValue('0')"%i
exec(command)
command= "self.file_info_Blab_inc_%i = wx.TextCtrl(self.panel, id=-1, size=(40,25))"%i
exec(command)
command= "self.file_info_Blab_inc_%i.SetValue('90')"%i
exec(command)
command="bSizer_blab%i = wx.BoxSizer(wx.HORIZONTAL)"%i
exec(command)
#command="bSizer_blab%i.Add(self.file_info_Blab_%i ,wx.ALIGN_LEFT)" %(i,i)
#exec command
command="bSizer_blab%i.Add(self.file_info_Blab_dec_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer_blab%i.Add(self.file_info_Blab_inc_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer1b.Add(bSizer_blab%i,wx.ALIGN_TOP)" %i
exec(command)
bSizer1b.AddSpacer(5)
#---sizer 1c ----
TEXT="\nmoment\nunits:"
bSizer1c = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer1c.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
self.moment_units_names=['mA/m','emu','Am^2']
bSizer1c.AddSpacer(5)
for i in range(self.max_files):
command="self.moment_units_%i = wx.ComboBox(self.panel, -1, self.moment_units_names[0], size=(80,25), choices=self.moment_units_names, style=wx.CB_DROPDOWN|wx.CB_READONLY)"%i
exec(command)
command="bSizer1c.Add(self.moment_units_%i,wx.ALIGN_TOP)"%i
exec(command)
bSizer1c.AddSpacer(5)
#---sizer 1d ----
TEXT="\nvolume\n[cubic m]:"
bSizer1d = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer1d.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
bSizer1d.AddSpacer(5)
for i in range(self.max_files):
command= "self.sample_volume_%i = wx.TextCtrl(self.panel, id=-1, size=(80,25))"%i
exec(command)
command= "self.sample_volume_%i.SetValue('1.287555e-5')"%i
exec(command)
command="bSizer1d.Add(self.sample_volume_%i,wx.ALIGN_TOP)"%i
exec(command)
bSizer1d.AddSpacer(5)
#---sizer 1e ----
TEXT="\nuser\nname:"
bSizer1e = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer1e.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
bSizer1e.AddSpacer(5)
for i in range(self.max_files):
command= "self.file_info_user_%i = wx.TextCtrl(self.panel, id=-1, size=(60,25))"%i
exec(command)
command="bSizer1e.Add(self.file_info_user_%i,wx.ALIGN_TOP)" %i
exec(command)
bSizer1e.AddSpacer(5)
#---sizer 2 ----
TEXT="\nlocation\nname:"
bSizer2 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer2.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
bSizer2.AddSpacer(5)
for i in range(self.max_files):
command= "self.file_location_%i = wx.TextCtrl(self.panel, id=-1, size=(60,25))"%i
exec(command)
command="bSizer2.Add(self.file_location_%i,wx.ALIGN_TOP)" %i
exec(command)
bSizer2.AddSpacer(5)
## #---sizer 3 ----
##
## missing
#---sizer 4 ----
TEXT="\nsample-specimen\nnaming convention:"
bSizer4 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer4.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
self.sample_naming_conventions=['sample=specimen','no. of terminate characters','charceter delimited']
bSizer4.AddSpacer(5)
for i in range(self.max_files):
command="self.sample_naming_convention_%i = wx.ComboBox(self.panel, -1, self.sample_naming_conventions[0], size=(150,25), choices=self.sample_naming_conventions, style=wx.CB_DROPDOWN|wx.CB_READONLY)"%i
exec(command)
command="self.sample_naming_convention_char_%i = wx.TextCtrl(self.panel, id=-1, size=(40,25))"%i
exec(command)
command="bSizer4_%i = wx.BoxSizer(wx.HORIZONTAL)"%i
exec(command)
command="bSizer4_%i.Add(self.sample_naming_convention_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer4_%i.Add(self.sample_naming_convention_char_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer4.Add(bSizer4_%i,wx.ALIGN_TOP)"%i
exec(command)
bSizer4.AddSpacer(5)
#---sizer 5 ----
TEXT="\nsite-sample\nnaming convention:"
bSizer5 = wx.StaticBoxSizer( wx.StaticBox( self.panel, wx.ID_ANY, "" ), wx.VERTICAL )
bSizer5.Add(wx.StaticText(pnl,label=TEXT),wx.ALIGN_TOP)
self.site_naming_conventions=['site=sample','no. of terminate characters','charceter delimited']
bSizer5.AddSpacer(5)
for i in range(self.max_files):
command="self.site_naming_convention_char_%i = wx.TextCtrl(self.panel, id=-1, size=(40,25))"%i
exec(command)
command="self.site_naming_convention_%i = wx.ComboBox(self.panel, -1, self.site_naming_conventions[0], size=(150,25), choices=self.site_naming_conventions, style=wx.CB_DROPDOWN|wx.CB_READONLY)"%i
exec(command)
command="bSizer5_%i = wx.BoxSizer(wx.HORIZONTAL)"%i
exec(command)
command="bSizer5_%i.Add(self.site_naming_convention_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer5_%i.Add(self.site_naming_convention_char_%i,wx.ALIGN_LEFT)" %(i,i)
exec(command)
command="bSizer5.Add(bSizer5_%i,wx.ALIGN_TOP)"%i
exec(command)
bSizer5.AddSpacer(5)
#------------------
self.okButton = wx.Button(self.panel, wx.ID_OK, "&OK")
self.Bind(wx.EVT_BUTTON, self.on_okButton, self.okButton)
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
#hbox1.Add(self.add_file_button)
#hbox1.Add(self.remove_file_button )
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox2.Add(self.okButton)
hbox2.Add(self.cancelButton )
#------
vbox=wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.AddSpacer(1)
hbox.Add(bSizer1, flag=wx.ALIGN_LEFT)
hbox.AddSpacer(1)
hbox.Add(bSizer1a, flag=wx.ALIGN_LEFT)
hbox.AddSpacer(1)
hbox.Add(bSizer1b, flag=wx.ALIGN_LEFT)
hbox.AddSpacer(1)
hbox.Add(bSizer1c, flag=wx.ALIGN_LEFT)
hbox.AddSpacer(1)
hbox.Add(bSizer1d, flag=wx.ALIGN_LEFT)
hbox.AddSpacer(1)
hbox.Add(bSizer1e, flag=wx.ALIGN_LEFT)
hbox.AddSpacer(1)
hbox.Add(bSizer2, flag=wx.ALIGN_LEFT)
hbox.AddSpacer(1)
## hbox.Add(bSizer3, flag=wx.ALIGN_LEFT)
## hbox.AddSpacer(5)
hbox.Add(bSizer4, flag=wx.ALIGN_LEFT)
hbox.AddSpacer(1)
hbox.Add(bSizer5, flag=wx.ALIGN_LEFT)
hbox.AddSpacer(1)
#-----
vbox.AddSpacer(5)
vbox.Add(bSizer_info,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(2)
vbox.Add(hbox)
vbox.AddSpacer(5)
vbox.Add(hbox1,flag=wx.ALIGN_CENTER_HORIZONTAL)
#vbox.AddSpacer(20)
vbox.AddSpacer(5)
vbox.Add(bSizer0, flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.Add(hbox2,flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(5)
self.panel.SetSizer(vbox)
vbox.Fit(self)
self.Show()
self.Centre()
def create_menu(self):
""" Create menu
"""
self.menubar = wx.MenuBar()
menu_about = wx.Menu()
menu_help = menu_about.Append(-1, "&Some notes", "")
self.Bind(wx.EVT_MENU, self.on_menu_help, menu_help)
self.menubar.Append(menu_about, "& Instructions")
self.SetMenuBar(self.menubar)
def on_menu_help (self,event):
dia = message_box("Help")
dia.Show()
dia.Center()
def on_add_dir_button_i(self,event):
dlg = wx.DirDialog(
None,message="choose directtory with tdt files",
defaultPath ="./",
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
FILE = dlg.GetPath()
# fin=open(FILE,'r')
button = event.GetEventObject()
name=button.GetName()
i=int((name).split("_")[-1])
#print "The button's name is " + button.GetName()
command="self.dir_path_%i.SetValue(FILE)"%i
exec(command)
def read_generic_file(self,path):
Data={}
Fin=open(path,'r')
header=Fin.readline().strip('\n').split('\t')
for line in Fin.readlines():
tmp_data={}
l=line.strip('\n').split('\t')
if len(l)<len(header):
continue
else:
for i in range(len(header)):
tmp_data[header[i]]=l[i]
specimen=tmp_data['Specimen']
if specimen not in list(Data.keys()):
Data[specimen]=[]
# check dupliactes
if len(Data[specimen]) >0:
if tmp_data['Treatment (aka field)']==Data[specimen][-1]['Treatment (aka field)']:
print("-W- WARNING: duplicate measurements specimen %s, Treatment %s. keeping onlt the last one"%(tmp_data['Specimen'],tmp_data['Treatment (aka field)']))
Data[specimen].pop()
Data[specimen].append(tmp_data)
return(Data)
def on_okButton(self,event):
DIRS_data={}
for i in range(self.max_files):
# read directiory path
dirpath=""
command="dirpath=self.dir_path_%i.GetValue()"%i
exec(command)
if dirpath!="":
dir_name=str(dirpath.split("/")[-1])
DIRS_data[dir_name]={}
DIRS_data[dir_name]['path']=str(dirpath)
else:
continue
# get experiment
command="experiment=self.protocol_info_%i.GetValue()"%i
exec(command)
DIRS_data[dir_name]['experiment']=str(experiment)
# get location
user_name=""
command="location_name=self.file_location_%i.GetValue()"%i
exec(command)
DIRS_data[dir_name]['er_location_name']=str(location_name)
# get Blab direction
labfield_DI=["0.","90."]
command="labfield_DI[0]=self.file_info_Blab_dec_%i.GetValue()"%i
exec(command)
command="labfield_DI[1]=self.file_info_Blab_inc_%i.GetValue()"%i
exec(command)
DIRS_data[dir_name]['labfield_DI']=labfield_DI
# get Moment units
command="moment_units=self.moment_units_%i.GetValue()"%i
exec(command)
DIRS_data[dir_name]['moment_units']=moment_units
# get sample volume
command="sample_volume=self.sample_volume_%i.GetValue()"%i
exec(command)
DIRS_data[dir_name]['sample_volume']=sample_volume
# get User_name
user_name=""
command="user_name=self.file_info_user_%i.GetValue()"%i
exec(command)
DIRS_data[dir_name]['user_name']=user_name
# get sample-specimen naming convention
sample_naming_convenstion=["",""]
command="sample_naming_convenstion[0]=str(self.sample_naming_convention_%i.GetValue())"%i
exec(command)
command="sample_naming_convenstion[1]=str(self.sample_naming_convention_char_%i.GetValue())"%i
exec(command)
DIRS_data[dir_name]["sample_naming_convenstion"]=sample_naming_convenstion
# get site-sample naming convention
site_naming_convenstion=["",""]
command="site_naming_convenstion[0]=str(self.site_naming_convention_%i.GetValue())"%i
exec(command)
command="site_naming_convenstion[1]=str(self.site_naming_convention_char_%i.GetValue())"%i
exec(command)
DIRS_data[dir_name]["site_naming_convenstion"]=site_naming_convenstion
#print "DIRS_data",DIRS_data
self.convert_2_magic(DIRS_data)
def on_cancelButton(self,event):
self.Destroy()
def get_sample_name(self,specimen,sample_naming_convenstion):
if sample_naming_convenstion[0]=="sample=specimen":
sample=specimen
elif sample_naming_convenstion[0]=="no. of terminate characters":
n=int(sample_naming_convenstion[1])*-1
sample=specimen[:n]
elif sample_naming_convenstion[0]=="charceter delimited":
d=sample_naming_convenstion[1]
sample_splitted=specimen.split(d)
if len(sample_splitted)==1:
sample=sample_splitted[0]
else:
sample=d.join(sample_splitted[:-1])
return sample
def get_site_name(self,sample,site_naming_convenstion):
if site_naming_convenstion[0]=="site=sample":
site=sample
elif site_naming_convenstion[0]=="no. of terminate characters":
n=int(site_naming_convenstion[1])*-1
site=sample[:n]
elif site_naming_convenstion[0]=="charceter delimited":
d=site_naming_convenstion[1]
site_splitted=sample.split(d)
if len(site_splitted)==1:
site=site_splitted[0]
else:
site=d.join(site_splitted[:-1])
return site
#===========================================
# Convert to MagIC format
#===========================================
def convert_2_magic(self,DIRS_data):
#--------------------------------------
# Read the files
#
# Database structure
# Thellier_type experiment:
#
# 1) Each file contains the data one specimen
# 2) First line is the header: "Thellier-tdt"
# 3) Second line in header inlucdes 4 fields:
# [Blab] ,[unknown_1] , [unknown_2] , [unknown_3] , [unknown_4]
# 4) Body includes 5 fields
# [specimen_name], [treatments], [moment],[meas_dec],[meas_dec
# Tretment: XXX.0 (zerofield)
# XXX.1 (infield)
# XXX.2 (pTRM check)
# XXX.3 (Tail check)
# XXX.4 (Additivity check; Krasa et al., 2003)
# XXX.5 (Original Thellier-Thellier protocol. )
# (where .5 is for the second direction and .1 in the first)
# XXX = temperature in degrees
#
#
# IMPORTANT ASSUMPTION:
# (1) lab field is always in Z direction (theta=0, phi=90)
# (2) Thermal demagnetization - NO MICROWAVE
# (3) if if XXX <50 then assuming that this is NRM (273K)
#
# -------------------------------------
#
# ATRM in six positions
#
# Tretment: XXX.0 zerofield
# XXX.1 +x
# XXX.2 +y
# XXX.3 +z
# XXX.4 -x
# XXX.5 -y
# XXX.6 -z
# XXX.7 alteration check
# IMPORTANT REMARKS:
#
# (1) If the program check if the direction of the magnetization fits the coding above
# if not, an error message will appear
# (2) Alteration ckeck can be in any direction
# (3) the order of the measurements is not important
#
# For questions and support: <EMAIL>
# -------------------------------------------------------------
magic_measurements_headers=[]
er_specimens_headers=[]
MagRecs=[]
ErRecs=[]
Data={}
for dir_name in list(DIRS_data.keys()):
#-----------------------------------
# First, read all files and sort data by specimen and by Experiment type
#-----------------------------------
for files in os.listdir(DIRS_data[dir_name]["path"]):
if files.endswith(".tdt"):
print("Open file: ", DIRS_data[dir_name]["path"]+"/"+files)
fin=open(DIRS_data[dir_name]["path"]+"/"+files,'r')
header_codes=['labfield','core_azimuth','core_plunge','bedding_dip_direction','bedding_dip']
body_codes=['specimen_name','treatment','moment','dec','inc']
tmp_body=[]
tmp_header_data={}
line_number=0
continue_reading=True
line=fin.readline() # ignore first line
for line in fin.readlines():
if "END" in line:
break
if line.strip('\n') =="":
break
this_line=line.strip('\n').split()
if len(this_line)<5:
continue
#---------------------------------------------------
# fix muxworthy funky data format
| |
<reponame>bokulich-lab/q2-fondue
# ----------------------------------------------------------------------------
# Copyright (c) 2022, Bokulich Laboratories.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import dotenv
import re
import pandas as pd
from pyzotero import zotero, zotero_errors
from q2_fondue.entrezpy_clients._utils import set_up_logger
logger = set_up_logger('INFO', logger_name=__name__)
class NoAccessionIDs(Exception):
pass
def _get_collection_id(zot: zotero.Zotero, col_name: str) -> str:
"""
Returns collection ID given the name of a Zotero collection
Args:
zot (zotero.Zotero): Zotero instance
col_name (str): Name of collection
Returns:
str: Collection ID.
"""
# get all collections in this zot instance
# note w/o zot.everything only max. 100 items are retrieved
all_col = zot.everything(zot.collections())
# retrieve name and key of all collections
name_key = {x['data']['name']: x['key'] for x in all_col}
# return col_name's key
try:
col_id = name_key[col_name]
except KeyError:
raise KeyError(
f'Provided collection name {col_name} does not '
f'exist in this library')
return col_id
def _find_doi_in_extra(item: dict) -> str:
"""Finds DOI in 'extra' field of `item` or returns an empty string.
Args:
item (dict): Zotero item.
Returns:
str: DOI
"""
doi_regex = r'10\.\d+/[-;()\w.]+'
if 'extra' in item['data'].keys():
doi_id = re.findall(doi_regex, item['data']['extra'])
if len(doi_id) > 0:
return doi_id[0]
else:
return ''
else:
return ''
def _find_doi_in_arxiv_url(item: dict) -> str:
"""Finds arXiv DOI in 'url' field of `item` or returns an empty string.
Args:
item (dict): Zotero item.
Returns:
str: DOI
"""
reg_arxiv_id = r'https*://arxiv.org/abs/(.*)'
if 'url' in item['data'].keys():
arxiv_id = re.findall(reg_arxiv_id, item['data']['url'])
if len(arxiv_id) > 0:
doi_prefix = '10.48550/arXiv.'
return [doi_prefix+x for x in arxiv_id][0]
else:
return ''
else:
return ''
def _get_parent_and_doi(items: list, on_no_dois: str = 'ignore') -> dict:
"""
Extract parent keys and DOI for all `items` containing
this information.
Args:
items (list): List of Zotero items.
Returns:
dict: Dictionary with parent keys and DOI as corresponding values.
"""
parent_doi = {}
for item in items:
item_key = item['key']
# fetch DOI for items with field DOI (e.g. JournalArticles)
doi = item['data'].get('DOI', '')
parent_doi.update({item_key: doi}) if doi else False
# fetch DOI with "Extra" field and a DOI within (e.g. Reports from
# bioRxiv and medRxiv, Books)
doi = _find_doi_in_extra(item)
parent_doi.update({item_key: doi}) if doi else False
# if arXiv ID present - create DOI from it as described in
# https://blog.arxiv.org/2022/02/17/new-arxiv-articles-are-
# now-automatically-assigned-dois/
doi = _find_doi_in_arxiv_url(item)
parent_doi.update({item_key: doi}) if doi else False
if len(parent_doi) == 0 and on_no_dois == 'error':
raise KeyError(
'This collection has no items with associated DOI names.')
return parent_doi
def _get_attachment_keys(items: list) -> list:
"""Retrieves attachment keys of attachments in provided list of items.
Args:
items (list): List of Zotero items.
Returns:
list: List of attachment keys.
"""
attach = [x for x in items if x['data']['itemType'] == 'attachment']
if len(attach) == 0:
raise KeyError(
'No attachments exist in this collection')
else:
attach_keys = sorted(list(set([x['key'] for x in attach])))
return attach_keys
def _link_attach_and_doi(
items: list, attach_key: str, parent_doi: dict,
on_no_dois: str = 'ignore') -> str:
"""
Matches given `attach_key` in `items` to corresponding DOI name
linked via parent ID in `parent_doi` dictionary.
Args:
items (list): List of Zotero items.
attach_key (str): Key of attachment to be matched.
parent_doi (dict): Known parent ID and DOI matches.
Returns:
str: Matching DOI name
"""
attach_item = [x for x in items if x['key'] == attach_key]
parent_key = attach_item[0]['data']['parentItem']
if parent_key not in parent_doi and on_no_dois == 'error':
raise KeyError(
f'Attachment {attach_key} does not contain a matching DOI '
f'parent in this collection')
elif parent_key not in parent_doi and on_no_dois == 'ignore':
return ''
else:
return parent_doi[parent_key]
def _expand_dict(id_dict: dict, keys: list, value2link: str) -> dict:
"""
Creates new entries with key from `keys` and associated
`value2link` in existing dictionary `id_dict`.
Args:
id_dict (dict): Existing dictionary with some keys and values.
keys (list): List of keys to be added individually to `id_dict`.
value2link (str): Value to assign to each of the `keys`.
Returns:
dict: Dictionary expanded with new keys and associated value.
"""
for key in keys:
if key in id_dict and value2link not in id_dict[key]:
# attach to already scraped accession IDs
id_dict[key].append(value2link)
elif key not in id_dict:
id_dict[key] = [value2link]
return id_dict
def _find_special_id(txt: str, pattern: str, split_str: str) -> list:
"""Creates an accession ID from starting characters in `pattern` and
digits following `split_str` in `txt`.
Args:
txt (str): Text to search for ID
pattern (str): Pattern containing at the start the character prefix and
at the end the remaining digits of the accession ID
split_str (str): String separating the digit part of the ID
Returns:
list: List with accession ID.
"""
match = re.findall(f'({pattern})', txt)
ids = []
if len(match) != 0:
for match in match:
split_match = match.split(split_str)
prefix = re.findall("[A-Z]+", split_match[0])[0]
number = split_match[-1].strip()
ids += [prefix + number]
return ids
def _find_hyphen_sequence(
txt: str, pattern: str, after_hyphen: str) -> list:
"""Return all accession IDs from a hyphenated accession ID sequence, both
'SRX100006-7' (below `after_hyphen` option '\\d+') and
'SRX100006-SRX100007' (below `after_hyphen` option 'pattern')
yield 'SRX100006, SRX100007'.
Args:
txt (str): Text to scrape through.
pattern (str): Accession ID pattern to search for.
after_hyphen (str): Pattern given after the hyphen (supported
options include '\\d+' and pattern)
Returns:
list: List of accession IDs with hyphenated IDs included.
"""
# source of hyphens: https://stackoverflow.com/a/48923796 with \u00ad added
hyphens = r'[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\
\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D\
\u00AD]'
pattern_hyphen = pattern + r'\s*' + hyphens + r'\s*' + after_hyphen
ids = []
matches = re.findall(f'({pattern_hyphen})', txt)
if len(matches) > 0:
for match in matches:
split_match = re.split(hyphens, match)
if after_hyphen == r'\d+':
# 3.a) "SRX100006-7" > "SRX100006, SRX100007"
nb_digits = len(split_match[-1])
base = split_match[0][:-nb_digits]
start = split_match[0][-nb_digits:]
end = split_match[-1][-nb_digits:]
elif after_hyphen == pattern:
# 3.b) "SRX100006-SRX100007" > "SRX100006, SRX100007"
prefix_digit_split = re.split(r'(\d+)', split_match[0])
base = prefix_digit_split[0]
start = prefix_digit_split[1]
nb_digits = len(start)
end = re.split(r'(\d+)', split_match[-1])[1]
for i in range(int(start), int(end) + 1):
filling_zeros = nb_digits - len(str(i))
ids += [base + filling_zeros * str(0) + str(i)]
return ids
def _find_accession_ids(txt: str, id_type: str) -> list:
"""Returns list of run, study, BioProject, experiment and
sample IDs found in `txt`.
Searching for these patterns of accession IDs that are all also
supported by other q2fondue actions:
BioProject ID: PRJ(E|D|N)[A-Z][0-9]+
Study ID: (E|D|S)RP[0-9]{6,}
Run ID: (E|D|S)RR[0-9]{6,}
Experiment ID: (E|D|S)RX[0-9]{6,}
Sample ID: (E|D|S)RS[0-9]{6,}
Args:
txt (str): Some text to search
id_type (str): Type of ID to search for 'run', 'study', 'bioproject',
'experiment' or 'sample'.
Returns:
list: List of run, study, BioProject, experiment or sample IDs found.
"""
# DEFAULT: Find plain accession ID: PREFIX12345 or PREFIX 12345
patterns = {
'run': r'[EDS]RR\s?\d+', 'study': r'[EDS]RP\s?\d+',
'bioproject': r'PRJ[EDN][A-Z]\s?\d+',
'experiment': r'[EDS]RX\s?\d+',
'sample': r'[EDS]RS\s?\d+',
}
pattern = patterns[id_type]
ids = re.findall(f'({pattern})', txt)
# remove potential whitespace
ids = [x.replace(' ', '') for x in ids]
# SPECIAL case 1: get IDs after comma:
# "PREFIX12345, 56789" yields "PREFIX56789"
for nb_comma in range(1, 11):
pattern_comma = pattern + nb_comma * r',\s\d+'
ids_match = _find_special_id(txt, pattern_comma, ',')
if len(ids_match) == 0:
pattern_comma = pattern + (nb_comma - 1) * r',\s\d*'
break
else:
ids += ids_match
# SPECIAL case 2: get IDs after and:
# "PREFIX12345, 56789 and 67899" yields "PREFIX67899"
pattern_and = pattern_comma + r'\sand\s\d+'
ids += _find_special_id(txt, pattern_and, 'and')
# SPECIAL case 3: hyphenated sequence of IDs
# "SRX100006-7" and "SRX100006-SRX100007" both yield "SRX100006, SRX100007"
for after_hyphen_pattern in [r'\d+', pattern]:
ids += _find_hyphen_sequence(txt, pattern, after_hyphen_pattern)
return list(set(ids))
def scrape_collection(
collection_name: str, on_no_dois: str = 'ignore', log_level: str = 'INFO'
) -> (pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame):
"""
Scrapes Zotero collection for accession IDs (run, study, BioProject,
experiment and sample) and associated DOI names.
Args:
collection_name (str): Name of the collection to be scraped.
on_no_dois (str): Behavior if no DOIs were found.
log_level (str, default='INFO'): Logging level.
Returns:
(pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame):
Dataframes with run, study, BioProject, experiment and sample IDs and
associated DOI names scraped from Zotero collection.
"""
logger.setLevel(log_level.upper())
dotenv.load_dotenv()
logger.info(
f'Scraping accession IDs for collection "{collection_name}"...'
)
# initialise Zotero instance
zot = zotero.Zotero(
os.getenv('ZOTERO_USERID'),
os.getenv('ZOTERO_TYPE'),
os.getenv('ZOTERO_APIKEY'))
# get collection id
coll_id | |
<gh_stars>0
"""
This module constrains instructions for binary circuits. Unlike
arithmetic instructions, they generally do not use the vector size in
the instruction code field. Instead the number of bits affected is
given as an extra argument. Also note that a register holds 64 values
instead of just one as is the case for arithmetic
instructions. Therefore, an instruction for 65-128 bits will affect
two registers etc. Similarly, a memory cell holds 64 bits.
"""
import Compiler.instructions_base as base
import Compiler.instructions as spdz
import Compiler.tools as tools
import collections
import itertools
class SecretBitsAF(base.RegisterArgFormat):
reg_type = 'sb'
class ClearBitsAF(base.RegisterArgFormat):
reg_type = 'cb'
base.ArgFormats['sb'] = SecretBitsAF
base.ArgFormats['sbw'] = SecretBitsAF
base.ArgFormats['cb'] = ClearBitsAF
base.ArgFormats['cbw'] = ClearBitsAF
opcodes = dict(
XORS = 0x200,
XORM = 0x201,
ANDRS = 0x202,
BITDECS = 0x203,
BITCOMS = 0x204,
CONVSINT = 0x205,
LDMSDI = 0x206,
STMSDI = 0x207,
LDMSD = 0x208,
STMSD = 0x209,
LDBITS = 0x20a,
ANDS = 0x20b,
TRANS = 0x20c,
BITB = 0x20d,
ANDM = 0x20e,
NOTS = 0x20f,
LDMSB = 0x240,
STMSB = 0x241,
LDMSBI = 0x242,
STMSBI = 0x243,
MOVSB = 0x244,
INPUTB = 0x246,
INPUTBVEC = 0x247,
SPLIT = 0x248,
CONVCBIT2S = 0x249,
XORCBI = 0x210,
BITDECC = 0x211,
CONVCINT = 0x213,
REVEAL = 0x214,
STMSDCI = 0x215,
LDMCB = 0x217,
STMCB = 0x218,
XORCB = 0x219,
ADDCB = 0x21a,
ADDCBI = 0x21b,
MULCBI = 0x21c,
SHRCBI = 0x21d,
SHLCBI = 0x21e,
CONVCINTVEC = 0x21f,
PRINTREGSIGNED = 0x220,
PRINTREGB = 0x221,
PRINTREGPLAINB = 0x222,
PRINTFLOATPLAINB = 0x223,
CONDPRINTSTRB = 0x224,
CONVCBIT = 0x230,
CONVCBITVEC = 0x231,
)
class BinaryVectorInstruction(base.Instruction):
is_vec = lambda self: True
def copy(self, size, subs):
return type(self)(*self.get_new_args(size, subs))
class NonVectorInstruction(base.Instruction):
is_vec = lambda self: False
def __init__(self, *args, **kwargs):
assert(args[0].n <= args[0].unit)
super(NonVectorInstruction, self).__init__(*args, **kwargs)
class NonVectorInstruction1(base.Instruction):
is_vec = lambda self: False
def __init__(self, *args, **kwargs):
assert(args[1].n <= args[1].unit)
super(NonVectorInstruction1, self).__init__(*args, **kwargs)
class xors(BinaryVectorInstruction):
""" Bitwise XOR of secret bit register vectors.
:param: number of arguments to follow (multiple of four)
:param: number of bits (int)
:param: result (sbit)
:param: operand (sbit)
:param: operand (sbit)
:param: (repeat from number of bits)...
"""
code = opcodes['XORS']
arg_format = tools.cycle(['int','sbw','sb','sb'])
class xorm(NonVectorInstruction):
""" Bitwise XOR of single secret and clear bit registers.
:param: number of bits (less or equal 64)
:param: result (sbit)
:param: operand (sbit)
:param: operand (cbit)
"""
code = opcodes['XORM']
arg_format = ['int','sbw','sb','cb']
class xorcb(NonVectorInstruction):
""" Bitwise XOR of two single clear bit registers.
:param: result (cbit)
:param: operand (cbit)
:param: operand (cbit)
"""
code = opcodes['XORCB']
arg_format = ['cbw','cb','cb']
class xorcbi(NonVectorInstruction):
""" Bitwise XOR of single clear bit register and immediate.
:param: result (cbit)
:param: operand (cbit)
:param: immediate (int)
"""
code = opcodes['XORCBI']
arg_format = ['cbw','cb','int']
class andrs(BinaryVectorInstruction):
""" Constant-vector AND of secret bit registers.
:param: number of arguments to follow (multiple of four)
:param: number of bits (int)
:param: result vector (sbit)
:param: vector operand (sbit)
:param: single operand (sbit)
:param: (repeat from number of bits)...
"""
code = opcodes['ANDRS']
arg_format = tools.cycle(['int','sbw','sb','sb'])
def add_usage(self, req_node):
req_node.increment(('bit', 'triple'), sum(self.args[::4]))
class ands(BinaryVectorInstruction):
""" Bitwise AND of secret bit register vector.
:param: number of arguments to follow (multiple of four)
:param: number of bits (int)
:param: result (sbit)
:param: operand (sbit)
:param: operand (sbit)
:param: (repeat from number of bits)...
"""
code = opcodes['ANDS']
arg_format = tools.cycle(['int','sbw','sb','sb'])
def add_usage(self, req_node):
req_node.increment(('bit', 'triple'), sum(self.args[::4]))
class andm(BinaryVectorInstruction):
""" Bitwise AND of single secret and clear bit registers.
:param: number of bits (less or equal 64)
:param: result (sbit)
:param: operand (sbit)
:param: operand (cbit)
"""
code = opcodes['ANDM']
arg_format = ['int','sbw','sb','cb']
class nots(BinaryVectorInstruction):
""" Bitwise NOT of secret register vector.
:param: number of bits (less or equal 64)
:param: result (sbit)
:param: operand (sbit)
"""
code = opcodes['NOTS']
arg_format = ['int','sbw','sb']
class addcb(NonVectorInstruction):
""" Integer addition two single clear bit registers.
:param: result (cbit)
:param: summand (cbit)
:param: summand (cbit)
"""
code = opcodes['ADDCB']
arg_format = ['cbw','cb','cb']
class addcbi(NonVectorInstruction):
""" Integer addition single clear bit register and immediate.
:param: result (cbit)
:param: summand (cbit)
:param: summand (int)
"""
code = opcodes['ADDCBI']
arg_format = ['cbw','cb','int']
class mulcbi(NonVectorInstruction):
""" Integer multiplication single clear bit register and immediate.
:param: result (cbit)
:param: factor (cbit)
:param: factor (int)
"""
code = opcodes['MULCBI']
arg_format = ['cbw','cb','int']
class bitdecs(NonVectorInstruction, base.VarArgsInstruction):
""" Secret bit register decomposition.
:param: number of arguments to follow / number of bits plus one (int)
:param: source (sbit)
:param: destination for least significant bit (sbit)
:param: (destination for one bit higher)...
"""
code = opcodes['BITDECS']
arg_format = tools.chain(['sb'], itertools.repeat('sbw'))
class bitcoms(NonVectorInstruction, base.VarArgsInstruction):
""" Secret bit register decomposition.
:param: number of arguments to follow / number of bits plus one (int)
:param: destination (sbit)
:param: source for least significant bit (sbit)
:param: (source for one bit higher)...
"""
code = opcodes['BITCOMS']
arg_format = tools.chain(['sbw'], itertools.repeat('sb'))
class bitdecc(NonVectorInstruction, base.VarArgsInstruction):
""" Secret bit register decomposition.
:param: number of arguments to follow / number of bits plus one (int)
:param: source (sbit)
:param: destination for least significant bit (sbit)
:param: (destination for one bit higher)...
"""
code = opcodes['BITDECC']
arg_format = tools.chain(['cb'], itertools.repeat('cbw'))
class shrcbi(NonVectorInstruction):
""" Right shift of clear bit register by immediate.
:param: destination (cbit)
:param: source (cbit)
:param: number of bits to shift (int)
"""
code = opcodes['SHRCBI']
arg_format = ['cbw','cb','int']
class shlcbi(NonVectorInstruction):
""" Left shift of clear bit register by immediate.
:param: destination (cbit)
:param: source (cbit)
:param: number of bits to shift (int)
"""
code = opcodes['SHLCBI']
arg_format = ['cbw','cb','int']
class ldbits(NonVectorInstruction):
""" Store immediate in secret bit register.
:param: destination (sbit)
:param: number of bits (int)
:param: immediate (int)
"""
code = opcodes['LDBITS']
arg_format = ['sbw','i','i']
class ldmsb(base.DirectMemoryInstruction, base.ReadMemoryInstruction,
base.VectorInstruction):
""" Copy secret bit memory cell with compile-time address to secret bit
register.
:param: destination (sbit)
:param: memory address (int)
"""
code = opcodes['LDMSB']
arg_format = ['sbw','int']
class stmsb(base.DirectMemoryWriteInstruction, base.VectorInstruction):
""" Copy secret bit register to secret bit memory cell with compile-time
address.
:param: source (sbit)
:param: memory address (int)
"""
code = opcodes['STMSB']
arg_format = ['sb','int']
# def __init__(self, *args, **kwargs):
# super(type(self), self).__init__(*args, **kwargs)
# import inspect
# self.caller = [frame[1:] for frame in inspect.stack()[1:]]
class ldmcb(base.DirectMemoryInstruction, base.ReadMemoryInstruction,
base.VectorInstruction):
""" Copy clear bit memory cell with compile-time address to clear bit
register.
:param: destination (cbit)
:param: memory address (int)
"""
code = opcodes['LDMCB']
arg_format = ['cbw','int']
class stmcb(base.DirectMemoryWriteInstruction, base.VectorInstruction):
""" Copy clear bit register to clear bit memory cell with compile-time
address.
:param: source (cbit)
:param: memory address (int)
"""
code = opcodes['STMCB']
arg_format = ['cb','int']
class ldmsbi(base.ReadMemoryInstruction, base.VectorInstruction):
""" Copy secret bit memory cell with run-time address to secret bit
register.
:param: destination (sbit)
:param: memory address (regint)
"""
code = opcodes['LDMSBI']
arg_format = ['sbw','ci']
class stmsbi(base.WriteMemoryInstruction, base.VectorInstruction):
""" Copy secret bit register to secret bit memory cell with run-time
address.
:param: source (sbit)
:param: memory address (regint)
"""
code = opcodes['STMSBI']
arg_format = ['sb','ci']
class ldmsdi(base.ReadMemoryInstruction):
code = opcodes['LDMSDI']
arg_format = tools.cycle(['sbw','cb','int'])
class stmsdi(base.WriteMemoryInstruction):
code = opcodes['STMSDI']
arg_format = tools.cycle(['sb','cb'])
class ldmsd(base.ReadMemoryInstruction):
code = opcodes['LDMSD']
arg_format = tools.cycle(['sbw','int','int'])
class stmsd(base.WriteMemoryInstruction):
code = opcodes['STMSD']
arg_format = tools.cycle(['sb','int'])
class stmsdci(base.WriteMemoryInstruction):
code = opcodes['STMSDCI']
arg_format = tools.cycle(['cb','cb'])
class convsint(NonVectorInstruction1):
""" Copy clear integer register to secret bit register.
:param: number of bits (int)
:param: destination (sbit)
:param: source (regint)
"""
code = opcodes['CONVSINT']
arg_format = ['int','sbw','ci']
class convcint(NonVectorInstruction):
""" Copy clear integer register to clear bit register.
:param: number of bits (int)
:param: destination (cbit)
:param: source (regint)
"""
code = opcodes['CONVCINT']
arg_format = ['cbw','ci']
class convcbit(NonVectorInstruction1):
""" Copy clear bit register to clear integer register.
:param: destination (regint)
:param: source (cbit)
"""
code = opcodes['CONVCBIT']
arg_format = ['ciw','cb']
@base.vectorize
class convcintvec(base.Instruction):
""" Copy clear register vector by bit to clear bit register
vectors. This means that the first destination will hold the least
significant bits of all inputs etc.
:param: number of arguments to follow / number of bits plus one (int)
:param: source (cint)
:param: destination for least significant bits (sbit)
:param: (destination for bits one step higher)...
"""
code = opcodes['CONVCINTVEC']
arg_format = tools.chain(['c'], tools.cycle(['cbw']))
class convcbitvec(BinaryVectorInstruction):
""" Copy clear bit register vector to clear register by bit. This means
that every element of the destination register vector will hold one bit.
:param: number of | |
<reponame>dev-gmmahs/block-vote-service<gh_stars>1-10
# flask
# pymysql
# flask_jwt_extended
from flask import Flask, request, render_template, send_from_directory, jsonify, redirect
from database import database_manager
from flask_jwt_extended import JWTManager, jwt_required, create_access_token, get_jwt_identity
import datetime
import hashlib
import random
import string
import base64
import datetime
import multiprocessing
import urllib.parse
import os
app = Flask(__name__)
md5 = hashlib.md5()
md5.update("sample".encode())
app.config["JWT_SECRET_KEY"] = md5.hexdigest()
jwt = JWTManager(app)
db = None
log_file_name = ""
# 로그 출력 함수
def log(msg, ip=None):
if not ip:
ip = "00.00.00.00"
date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
log_message = "{} - - [{}] {}".format(ip, date, msg)
# 로그 기록 저장
try:
with open("./log/" + log_file_name, "a", encoding="utf-8") as f:
f.write(log_message + "\n")
except Exception as e:
print(e)
print(log_message)
# 토큰 검증
def accessCheck(sid, token):
try:
result = db.execute("""
SELECT UserIDSeq
FROM UserLogin
WHERE UserIDSeq = %s
AND Token = %s
""", (sid, token))
if result:
return True
else:
log("인증에 실패하였습니다")
return False
except:
return False
# length 길이의 임의의 랜덤문자 생성
def randString(length):
return "".join(random.SystemRandom().choice(string.ascii_uppercase +\
string.ascii_lowercase +\
string.digits) for _ in range(length))
@app.route("/", methods=["GET"])
def index():
return render_template("index.html")
"""
응답코드
0: 성공
2: 인증 실패
3: 해당 투표 없음
4: 투표 참여 가능 인원 초과
5: 참여자 명단에 없음
40: 투표 해시 불일치
50: 이미 참여한 유저
60: 투표 참여 기간 종료
61: 투표 참여 기간 아님
97: 투표 데이터 추가 실패
98: 투표 항목 없음
99: 알 수 없는 오류
"""
# 투표 제출
@app.route("/info/send/vote", methods=["POST"])
@jwt_required
def vote():
try:
req = request.get_json()
sid = get_jwt_identity()
if not accessCheck(sid, request.headers["Authorization"].split()[1]):
return jsonify({"success": False, "code": 2}), 401
vote_undecode = req["vote"]
vote = urllib.parse.unquote(base64.b64decode(req["vote"]).decode())
currentTime = req["currentTime"]
voteCode = req["voteCode"]
nonce = req["nonce"]
vote_info = db.execute("""
SELECT UniqueNumberSeq AS VoteID,
VotePermission,
VoteLimit,
VoteStart,
VoteEnd
FROM Vote_Information
WHERE UniqueNumberSeq = %s
""", (voteCode))
# 투표가 없는 경우
if len(vote_info) == 0:
log("투표를 찾을 수 없습니다")
return jsonify({"success": False, "code": 3}), 404
# 투표 참여기간 확인
now = datetime.datetime.now()
if (now <= vote_info[0]["VoteStart"]):
log("해당 투표 참여 기간이 아닙니다")
return jsonify({"success": False, "code": 61}), 401
if (now >= vote_info[0]["VoteEnd"]):
log("해당 투표 참여 기간이 끝났습니다")
return jsonify({"success": False, "code": 60}), 401
vote_limit = db.execute("""
SELECT COUNT(*) AS COUNT
FROM Vote_User
WHERE JoinAlready = 1
AND UniqueNumberSeq = %s
""", (voteCode))
# 참여 가능 인원이 초과된 경우
if vote_info[0]["VoteLimit"] != 0 and vote_info[0]["VoteLimit"] <= vote_limit[0]["COUNT"]:
log("해당 투표는 참여가능 인원이 가득 찼습니다")
return jsonify({"success": False, "code": 4}), 401
alreadyCheck = db.execute("""
SELECT UserIDSeq
FROM Vote_User
WHERE UserIDSeq = %s
AND UniqueNumberSeq = %s
AND JoinAlready = 1
""", (sid, voteCode))
# 투표 참여했는지 확인
if len(alreadyCheck) != 0:
log("User: {} 이미 참여한 유저입니다".format(sid))
return jsonify({"success": False, "code": 50}), 403
# 참여자 명단 / 오류로 인해 참여되지 않은 유저 조회
user_list = db.execute("""
SELECT UserIDSeq
FROM Vote_User
WHERE UserIDSeq = %s
AND UniqueNumberSeq = %s
AND JoinAlready = 0
""", (sid, voteCode))
# 해당 유저가 투표에 참여했는지 확인/참여자 명단에 있는지 확인
if vote_info[0]["VotePermission"] == 1:
if len(user_list) == 0:
log("User: {} 투표 참여자 명단에 존재하지 않습니다".format(sid))
return jsonify({"success": False, "code": 5}), 401
else:
if len(user_list) == 0:
db.update("""
INSERT INTO Vote_User
VALUES (%s, %s, 0)
""", (sid, voteCode))
log("User: {} 투표 참여자로 기록 됨".format(sid))
else:
log("User: {} 투표 참여기록이 존재합니다".format(sid))
hashingData = (str(vote_undecode) +\
str(currentTime) +\
str(voteCode) +\
str(nonce)).encode("utf-8")
checkHash = hashlib.sha256(hashingData).hexdigest()
log("원본 해시: " + req["hash"])
log("서버 해시: " + checkHash)
if checkHash != req["hash"]:
log("투표 데이터 해시가 일치하지 않습니다")
return jsonify({"success": False, "code": 40}), 403
# 이전 투표 데이터 해시
prev_data = db.execute("""
SELECT Hash AS hash
FROM Vote_Data
WHERE UniqueNumberSeq = %s
ORDER BY Vote_JoinDate DESC
LIMIT 1
""", (vote_info[0]["VoteID"]))
# 이전 블록이 없을 경우 해시는 빈 값으로 지정
if not prev_data:
log("Genesis block")
prev_hash = ""
else:
prev_hash = prev_data[0]["hash"]
intergrated = hashlib.sha256(str(checkHash + prev_hash).encode("utf-8")).hexdigest()
log("통합 해시: " + intergrated)
vote_item = db.execute("""
SELECT Vote_Item
FROM Vote_Item
WHERE UniqueNumberSeq = %s
""", (voteCode))[0]["Vote_Item"]
# 투표한 항목이 투표에 존재하는지 확인
if vote in vote_item.split(","):
inserted = db.update("""
INSERT INTO Vote_Data
(UniqueNumberSeq, UserIDSeq, Vote_JoinDate, Vote_Item, nonce, Hash, Prev_Hash, intergrated_hash)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
""", (voteCode, sid, currentTime, vote_undecode, nonce, req["hash"], prev_hash, intergrated))
log("User: {} 투표 데이터 추가".format(sid))
if inserted == 0:
log("User: {} 투표 데이터 추가 실패".format(sid))
db.update("""
DELETE FROM Vote_User
WHERE UserIDSeq = %s
AND UniqueNumberSeq = %s
""", (sid, voteCode))
return jsonify({"success": False, "code": 97}), 500
result = db.update("""
UPDATE Vote_User
SET JoinAlready = 1
WHERE UserIDSeq = %s
AND UniqueNumberSeq = %s
""", (sid, voteCode))
log("User: {} 투표 참여 완료 설정".format(sid))
if result and inserted:
log("User: {} 참여 완료, ({})".format(sid, vote))
return jsonify({"success": True, "code": 0}), 200
else:
log("User: {} 참여 실패".format(sid))
return jsonify({"success": False, "code": 99}), 404
else:
log("투표 항목이 존재하지 않습니다")
return jsonify({"success": False, "code": 98}), 404
except Exception as e:
log(e)
return jsonify({"success": False, "code": 99}), 500
# 회원가입 라우팅
@app.route("/info/login/regist", methods=["POST"])
def regist():
try:
req = request.get_json()
salt = randString(16)
# 중복되지않는 SID 생성
while True:
s_id = randString(20)
r = db.execute("""
SELECT COUNT(*) AS COUNT
FROM UserTable
WHERE UserIDSeq = %s
""", (s_id))
if r.pop()["COUNT"] == 0:
break
id_ = req["id"]
password = req["password"]
name = req["name"]
sex = req["sex"]
already = True if len(db.execute("""
SELECT UserID
FROM UserTable
WHERE UserID = %s
""", (id_))) > 0 else False
if already:
log("이미 존재하는 ID 입니다")
return jsonify({"success": False, "already": True}), 200
result = db.update("""
INSERT INTO UserTable
VALUES (%s, %s, HEX(AES_ENCRYPT(%s, SHA2(%s, 256))),
%s, %s, %s)
""", (s_id, id_, password, salt, salt, name, sex))
if result is 0:
return jsonify({"success": False, "already": False}), 200
log("유저 가입 됨")
return jsonify({"success": True, "already": False})
except Exception as e:
log(e)
return jsonify({"success": False, "already": False})
# 로그인
@app.route("/login", methods=["POST"])
def login():
try:
req = request.get_json()
id_ = req["id"]
password = req["password"]
result = db.execute(
"""
SELECT UserIDSeq
FROM UserTable
WHERE UserID = %s
AND AES_DECRYPT(UNHEX(Userpw), SHA2(
(SELECT Salt FROM UserTable
WHERE UserID = %s),
256)) = %s
""", (id_, id_, password))
if len(result) == 0:
log("알 수 없는 유저입니다.")
return jsonify({"msg": "아이디와 비밀번호를 확인해주세요"}), 404
elif len(result) > 1:
log("동일한 유저가 2명 이상 존재합니다. 관리자에게 문의하세요")
return jsonify({"msg": "유저 데이터 에러, 관리자에게 문의하세요"}), 500
db.update("""
DELETE FROM UserLogin
WHERE UserIDSeq = %s
""", (result[0]["UserIDSeq"]))
# 토큰 유효시간 1시간
expire = datetime.timedelta(hours=1)
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# 유저 고유 ID로 토큰 발행
access_token = create_access_token(identity=result[0]["UserIDSeq"], expires_delta=expire)
db.update("""
INSERT INTO UserLogin
VALUES (%s, %s, %s)
""", (result[0]["UserIDSeq"], access_token, current_time))
log("유저 로그인: {}..".format(access_token[0:20]))
return jsonify({"token": access_token}), 200
except Exception as e:
log(e)
return jsonify({"msg": "알 수 없는 오류", "token": ""}), 500
@app.route("/logout", methods=["POST"])
@jwt_required
def logout():
try:
current_user = get_jwt_identity()
if db.update("""
DELETE FROM UserLogin
WHERE UserIDSeq = %s
""", (current_user)) == 0:
return jsonify(False), 500
log("유저 로그아웃")
return jsonify(True), 200
except Exception as e:
log(e)
return jsonify(False), 500
# 토큰 엑세스 확인 라우팅
@app.route("/access", methods=["GET"])
@jwt_required
def access():
try:
sid = get_jwt_identity()
if accessCheck(sid, request.headers["Authorization"].split()[1]):
log("접근 가능한 유저입니다.")
# 클라이언트에게 성공 전달
return jsonify(True), 200
else:
return jsonify(False), 401
except:
return jsonify(False), 500
# 토큰 만료 핸들링
@jwt.expired_token_loader
def expired_token():
log("토큰 만료 됨")
return jsonify({"msg": "토큰 만료 됨"}), 401
"""
응답코드
0: 성공
2: 인증 실패
70: 투표 정보 추가 실패
99: 알 수 없는 오류
"""
# 투표 생성 라우팅
@app.route("/create/vote", methods=["POST"])
@jwt_required
def create():
try:
if not accessCheck(get_jwt_identity(), request.headers["Authorization"].split()[1]):
return jsonify({"code": 2}), 401
req = request.get_json()
# 중복되지않는 투표 고유 ID 생성
while True:
vote_id = randString(20)
count = db.execute("""
SELECT COUNT(*) AS COUNT
FROM Vote_Information
WHERE UniqueNumberSeq = %s
""", (vote_id))
if count.pop()["COUNT"] == 0:
break
# 중복되지않는 투표 참여 코드 생성
while True:
vote_code = randString(10)
count = db.execute("""
SELECT COUNT(*) AS COUNT
FROM Vote_Information
WHERE Vote_JoinCode = %s
""", (vote_code))
if count.pop()["COUNT"] == 0:
break
# 투표 명
namev = req["vote_name"]
# 투표 시작시간
startv = "%s %s:00:00" % (req["vote_start"], str(req["vote_start_time"]).rjust(2, "0"))
# 투표 종료시간
endv = "%s %s:00:00" % (req["vote_end"], str(req["vote_end_time"]).rjust(2, "0"))
# 투표 참여 권한 (0: 아무나, 1: 지정)
permissionv = req["vote_permission"]
# 투표 참여자ID 목록
targetv = req["vote_target"]
# 투표 참여 인원
limit = req["vote_limit"]
# 투표 항목
itemv = ",".join(req["vote_item"])
# 투표 생성
affected = | |
"""_unquote_to_bytes('abc%20def') -> b'abc def'."""
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
if not s:
# Is it a string-like object?
s.split
return b''
if isinstance(s, str):
s = s.encode('utf-8')
bits = s.split(b'%')
if len(bits) == 1:
return s
res = [bits[0]]
append = res.append
# Delay the initialization of the table to not waste memory
# if the function is never called
global _HEXTOBYTE
if _HEXTOBYTE is None:
_HEXTOBYTE = {(a + b).encode(): bytes([int(a + b, 16)])
for a in _HEXDIG for b in _HEXDIG}
for item in bits[1:]:
try:
c = chr(int(item[:2], 16)).encode('ascii')
if decodable is None or c in decodable:
append(_HEXTOBYTE[item[:2]])
append(item[2:])
#FIXME: We'll need to do our own surrogate pair decoding because:
#>>> '\ud800'.encode('utf-8') -> UnicodeEncodeError: 'utf-8' codec can't encode character '\ud800' in position 0: surrogates not allowed
else:
append(b'%')
append(item)
except (ValueError, KeyError):
append(b'%')
append(item)
return b''.join(res)
#>>> from amara3.iri import percent_decode
#>>> u0 = 'example://A/b/c/%7bfoo%7d'
#>>> u1 = percent_decode(u0)
#>>> u1
#'example://A/b/c/{foo}'
def percent_decode(s, encoding='utf-8', decodable=None, errors='replace'):
"""
[*** Experimental API ***] Reverses the percent-encoding of the given
string.
Similar to urllib.parse.unquote()
By default, all percent-encoded sequences are decoded, but if a byte
string is given via the 'decodable' argument, only the sequences
corresponding to those octets will be decoded.
Percent-encoded sequences are converted to bytes, then converted back to
string (Unicode) according to the given encoding.
For example, by default, 'abc%E2%80%A2' will be converted to 'abc\u2022',
because byte sequence E2 80 A2 represents character U+2022 in UTF-8.
This function is intended for use on the portions of a URI that are
delimited by reserved characters (see percent_encode), or on a value from
data of media type application/x-www-form-urlencoded.
>>> from amara3.iri import percent_decode
>>> u0 = 'http://host/abc%E2%80%A2/x/y/z'
>>> u1 = percent_decode(u0)
>>> hex(ord(u1[15]))
'0x2022'
"""
# Most of this comes from urllib.parse.unquote().
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
# If given a string argument, does not decode
# percent-encoded octets above %7F.
if '%' not in s:
#s.split
return s
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
bits = _ASCII_PAT.split(s)
res = [bits[0]]
append = res.append #Saving the func lookups in the tight loop below
for i in range(1, len(bits), 2):
append(_unquote_to_bytes(bits[i], decodable=decodable).decode(encoding, errors))
append(bits[i + 1])
return ''.join(res)
def absolutize(iri_ref, base_iri, limit_schemes=None):
"""
Resolves a IRI reference to absolute form, effecting the result of RFC
3986 section 5. The IRI reference is considered to be relative to the
given base IRI.
iri_ref - relative URI to be resolved into absolute form. If already
absolute, it will be returned as is.
base_iri - base IRI for resolving iri_ref. If '' or None iri_ref will be
returned as is. base_iri should matche the absolute-URI syntax rule of
RFC 3986, and its path component should not contain '.' or '..' segments
if the scheme is hierarchical. If these are violated you may get unexpected
results.
This function only conducts a minimal sanity check in order to determine
if relative resolution is possible: it raises a ValueError if the base
URI does not have a scheme component. While it is true that the base URI
is irrelevant if the URI reference has a scheme, an exception is raised
in order to signal that the given string does not even come close to
meeting the criteria to be usable as a base URI.
It is the caller's responsibility to make a determination of whether the
URI reference constitutes a "same-document reference", as defined in RFC
2396 or RFC 3986. As per the spec, dereferencing a same-document
reference "should not" involve retrieval of a new representation of the
referenced resource. Note that the two specs have different definitions
of same-document reference: RFC 2396 says it is *only* the cases where the
reference is the empty string, or \"#\" followed by a fragment; RFC 3986
requires making a comparison of the base URI to the absolute form of the
reference (as is returned by the spec), minus its fragment component,
if any.
This function is similar to urlparse.urljoin() and urllib.basejoin().
Those functions, however, are (as of Python 2.3) outdated, buggy, and/or
designed to produce results acceptable for use with other core Python
libraries, rather than being earnest implementations of the relevant
specs. Their problems are most noticeable in their handling of
same-document references and 'file:' URIs, both being situations that
come up far too often to consider the functions reliable enough for
general use.
"""
# Reasons to avoid using urllib.basejoin() and urlparse.urljoin():
# - Both are partial implementations of long-obsolete specs.
# - Both accept relative URLs as the base, which no spec allows.
# - urllib.basejoin() mishandles the '' and '..' references.
# - If the base URL uses a non-hierarchical or relative path,
# or if the URL scheme is unrecognized, the result is not
# always as expected (partly due to issues in RFC 1808).
# - If the authority component of a 'file' URI is empty,
# the authority component is removed altogether. If it was
# not present, an empty authority component is in the result.
# - '.' and '..' segments are not always collapsed as well as they
# should be (partly due to issues in RFC 1808).
# - Effective Python 2.4, urllib.basejoin() *is* urlparse.urljoin(),
# but urlparse.urljoin() is still based on RFC 1808.
# This procedure is based on the pseudocode in RFC 3986 sec. 5.2.
#
# ensure base URI is absolute
if not base_iri or is_absolute(iri_ref):
return iri_ref
if not base_iri or not is_absolute(base_iri):
raise ValueError("Invalid base URI: {base} cannot be used to resolve "
"reference {ref}; the base URI must be absolute, not "
"relative.".format(base=base_iri, ref=iri_ref))
if limit_schemes and get_scheme(base_iri) not in limit_schemes:
scheme = get_scheme(base_iri)
raise ValueError("The URI scheme {scheme} is not supported by resolver".format(scheme=scheme))
# shortcut for the simplest same-document reference cases
if iri_ref == '' or iri_ref[0] == '#':
return base_iri.split('#')[0] + iri_ref
# ensure a clean slate
tScheme = tAuth = tPath = tQuery = None
# parse the reference into its components
(rScheme, rAuth, rPath, rQuery, rFrag) = split_uri_ref(iri_ref)
# if the reference is absolute, eliminate '.' and '..' path segments
# and skip to the end
if rScheme is not None:
tScheme = rScheme
tAuth = rAuth
tPath = remove_dot_segments(rPath)
tQuery = rQuery
else:
# the base URI's scheme, and possibly more, will be inherited
(bScheme, bAuth, bPath, bQuery, bFrag) = split_uri_ref(base_iri)
# if the reference is a net-path, just eliminate '.' and '..' path
# segments; no other changes needed.
if rAuth is not None:
tAuth = rAuth
tPath = remove_dot_segments(rPath)
tQuery = rQuery
# if it's not a net-path, we need to inherit pieces of the base URI
else:
# use base URI's path if the reference's path is empty
if not rPath:
tPath = bPath
# use the reference's query, if any, or else the base URI's,
tQuery = rQuery is not None and rQuery or bQuery
# the reference's path is not empty
else:
# just use the reference's path if it's absolute
if rPath[0] == '/':
tPath = remove_dot_segments(rPath)
# merge the reference's relative path with the base URI's path
else:
if bAuth is not None and not bPath:
tPath = '/' + rPath
else:
tPath = bPath[:bPath.rfind('/')+1] + rPath
tPath = remove_dot_segments(tPath)
# use the reference's query
tQuery = rQuery
# since the reference isn't a net-path,
# use the authority from the base URI
tAuth = bAuth
# inherit the scheme from the base URI
tScheme = bScheme
# always use the reference's fragment (but no need to define another var)
#tFrag | |
<reponame>BPDanek/DeepLearningFlappyBird_stage
#!/usr/bin/env python
#*
# distinction between dqn (vanilla) and this:
# we want to formulate an addition to state, s, called "delta s", ds, which will cause the Q value of s+ds and a
# target action, a^t, to be higher than the q value for s+ds and any other action. The implication is that adding ds
# to our data will result in an action to be drawn. In cases where ds is not added/nothing is changed, we will
# behave normally.
# In order to retain the natural behaviour of a DQN controller, we will not be changing the weights or controller
# for that matter, we will instead minimize the loss between Q(s+ds, a^t) and Q(s+ds, a), where a^t is the target a,
# and a is any non-target a. Thee loss function will be (some) hinge loss: l(a,b) = max(b - (a + eps), 0), which
# will essentially enforce the condition: a >= b + eps
# An informal proof associated with the possibility this will work depends on the fact that our controller learns how
# to behave well within a certain set of input states from the possible set of states it's been trained on,
# called (here) the game-possible pixel space. This is the set of frames the game can generate under any scenario
# within the game. The game-possible pixel space is small, relative to the pixel space, which is a space containing
# all possible combinations of pixel intensities that a screen can generate. Given that ds blongs in the pixel space,
# and s belongs in the game-possible pixel space, we can say that s+ds belongs in the pixel space, which the controller
# may not know how to handle, which, if abused properly may result in a simple adversarial attack.
from __future__ import print_function
import tensorflow as tf
import cv2
import sys
sys.path.append("game/")
import wrapped_flappy_bird as game
import random
import numpy as np
from collections import deque
# const for advesarial optimization:
# flap = [0, 1]
# noaction = [1, 0]
action_target = [0, 1]
LR = 0.01 # learning rate for optimizing ds
# number of time steps in which to calculate ds, first 10 frames of game
INTERVAL = 10
GAME = 'bird' # the name of the game being played for log files
ACTIONS = 2 # number of valid actions
GAMMA = 0.99 # decay rate of past observations
OBSERVE = 100000. # timesteps to observe before training
EXPLORE = 2000000. # frames over which to anneal epsilon
FINAL_EPSILON = 0.0001 # final value of epsilon
INITIAL_EPSILON = 0.0001 # starting value of epsilon
REPLAY_MEMORY = 50000 # number of previous transitions to remember
BATCH = 32 # size of minibatch
FRAME_PER_ACTION = 1
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev = 0.01)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.01, shape = shape)
return tf.Variable(initial)
def conv2d(x, W, stride):
return tf.nn.conv2d(x, W, strides = [1, stride, stride, 1], padding = "SAME")
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = "SAME")
def createNetwork():
# network weights
W_conv1 = weight_variable([8, 8, 4, 32])
b_conv1 = bias_variable([32])
W_conv2 = weight_variable([4, 4, 32, 64])
b_conv2 = bias_variable([64])
W_conv3 = weight_variable([3, 3, 64, 64])
b_conv3 = bias_variable([64])
W_fc1 = weight_variable([1600, 512])
b_fc1 = bias_variable([512])
W_fc2 = weight_variable([512, ACTIONS])
b_fc2 = bias_variable([ACTIONS])
# input layer
s = tf.placeholder("float", [None, 80, 80, 4])
# hidden layers
h_conv1 = tf.nn.relu(conv2d(s, W_conv1, 4) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2, 2) + b_conv2)
#h_pool2 = max_pool_2x2(h_conv2)
h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3, 1) + b_conv3)
#h_pool3 = max_pool_2x2(h_conv3)
#h_pool3_flat = tf.reshape(h_pool3, [-1, 256])
h_conv3_flat = tf.reshape(h_conv3, [-1, 1600])
h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)
# readout layer
readout = tf.matmul(h_fc1, W_fc2) + b_fc2
return s, readout, h_fc1
def trainNetwork(s, readout, h_fc1, delta_s, sess):
# define the cost function
a = tf.placeholder("float", [None, ACTIONS])
y = tf.placeholder("float", [None])
readout_action = tf.reduce_sum(tf.multiply(readout, a), reduction_indices=1)
cost = tf.reduce_mean(tf.square(y - readout_action))
train_step = tf.train.AdamOptimizer(1e-6).minimize(cost)
# open up a game state to communicate with emulator
game_state = game.GameState()
# store the previous observations in replay memory
D = deque()
# printing
a_file = open("logs_" + GAME + "/readout.txt", 'w')
h_file = open("logs_" + GAME + "/hidden.txt", 'w')
# get the first state by doing nothing and preprocess the image to 80x80x4
do_nothing = np.zeros(ACTIONS)
do_nothing[0] = 1
x_t, r_0, terminal = game_state.frame_step(do_nothing)
x_t = cv2.cvtColor(cv2.resize(x_t, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, x_t = cv2.threshold(x_t,1,255,cv2.THRESH_BINARY)
s_t = np.stack((x_t, x_t, x_t, x_t), axis=2)
# saving and loading networks
saver = tf.train.Saver()
sess.run(tf.initialize_all_variables())
checkpoint = tf.train.get_checkpoint_state("saved_networks")
if checkpoint and checkpoint.model_checkpoint_path:
saver.restore(sess, checkpoint.model_checkpoint_path)
print("Successfully loaded:", checkpoint.model_checkpoint_path)
else:
print("Could not find old network weights")
# start training
epsilon = INITIAL_EPSILON
t = 0
while "flappy bird" != "angry bird":
# choose an action epsilon greedily
readout_t = readout.eval(feed_dict={s : [s_t]})[0]
a_t = np.zeros([ACTIONS])
action_index = 0
if t % FRAME_PER_ACTION == 0:
if random.random() <= epsilon:
print("----------Random Action----------")
action_index = random.randrange(ACTIONS)
a_t[random.randrange(ACTIONS)] = 1
else:
action_index = np.argmax(readout_t)
a_t[action_index] = 1
else:
a_t[0] = 1 # do nothing
# scale down epsilon
if epsilon > FINAL_EPSILON and t > OBSERVE:
epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
# run the selected action and observe next state and reward
x_t1_colored, r_t, terminal = game_state.frame_step(a_t)
x_t1 = cv2.cvtColor(cv2.resize(x_t1_colored, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, x_t1 = cv2.threshold(x_t1, 1, 255, cv2.THRESH_BINARY)
x_t1 = np.reshape(x_t1, (80, 80, 1))
#s_t1 = np.append(x_t1, s_t[:,:,1:], axis = 2)
s_t1 = np.append(x_t1, s_t[:, :, :3], axis=2)
# store the transition in D
D.append((s_t, a_t, r_t, s_t1, terminal))
if len(D) > REPLAY_MEMORY:
D.popleft()
# only train if done observing
if t > INTERVAL:
# sample a minibatch to optimize on
opt_batch = random.sample(list(D), INTERVAL)
# get the batch variables
s_opt_batch = [d[0] for d in opt_batch]
s_ds = np.ndarray((INTERVAL, 80, 80, 4), dtype=float) # forward init.
# a_batch = [d[1] for d in minibatch]
# r_batch = [d[2] for d in minibatch]
# s_j1_batch = [d[3] for d in minibatch]
# y_batch = []
# readout_j_batch = readout.eval(feed_dict = {s : s_j_batch})
# readout_j1_batch = readout.eval(feed_dict = {s : s_j1_batch})
# for i in range(0, len(minibatch)):
# terminal = minibatch[i][4]
# # if terminal, only equals reward
# if terminal:
# y_batch.append(r_batch[i])
# else:
# y_batch.append(r_batch[i] + GAMMA * np.max(readout_j1_batch[i]))
# IN ATTACK FORMULATION NOT NECESSARY, WE WANT TO RETAIN WEIGHTS, BUT TAKE THE PARAMS INTO ATTACK OPERATION
# perform gradient step
# train_step.run(feed_dict = {
# y : y_batch,
# a : a_batch,
# s : s_j_batch}
# )
# taking params into attack operation:
# intake batch of s_t, a_t, r_t, s_t1 resulted from a normal controller devised optimization, across batch by modulating ds,
# reduce the sum (expected) loss between Q(s+ds, a^t) and Q(s+ds, a), where a^t != a.
# subjects: batch of s_t, a_t, r_t, s_t1, size BATCH.
# Note: the training is complete, so in theory we shouldn't be using this as a SGD batch anymore.
#
#
# theory/idea:
# RL is less suceptible to a stationary attack since reproducing a setting is challenging. We can go through an
# interaction, record it, and then produce an optimization which will abuse that origional interaction, but when
# will that come in handy? In CV, usually repeated inputs are easy to produce, but in an RL setting there isn't
# opportunity for repeated input.
# if we find that one adversarial input transfers well to other similar images, maybe we can make a case here.
# input and noise which should result in target action to be drawn
s_ds = np.reshape(s_opt_batch,[INTERVAL, 80, 80, 4]) + delta_s
# Q values for both actions at state s + ds for entire batch
# you just need to feed into s
# ds generates automatically
# then you use s+ds as your new input
# talk after meeting have Q's
readout_s_ds = readout.eval(feed_dict={s : [s_opt_batch][0]})
# readout(s) = [Q(no flap), Q(flap)]
# a = readout[target_action]
a = tf.placeholder("float", INTERVAL) # readout_s_ds[1]
a = | |
on the PCI bus and a
SCSI bus (scsi.0) is created.
During hotplug we could query QEMU via info qtree HMP command but parsing
the result is too complicated. Instead we use the info stored in runtime
files. We parse NIC and disk entries and based on their hvinfo we reserve
the corresponding slots.
The runtime argument is a tuple as returned by _LoadKVMRuntime(). Obtain
disks and NICs from it. In case a runtime file is not available (see
_GenerateKVMRuntime()) we return the bus slots that QEMU boots with by
default.
"""
# This is by default and returned during _GenerateKVMRuntime()
bus_slots = {
_PCI_BUS: bitarray(self._DEFAULT_PCI_RESERVATIONS),
_SCSI_BUS: bitarray(self._DEFAULT_SCSI_RESERVATIONS),
}
# Adjust the empty slots depending of the corresponding hvparam
if hvp and constants.HV_KVM_PCI_RESERVATIONS in hvp:
res = hvp[constants.HV_KVM_PCI_RESERVATIONS]
pci = bitarray(constants.QEMU_PCI_SLOTS)
pci.setall(False) # pylint: disable=E1101
pci[0:res:1] = True
bus_slots[_PCI_BUS] = pci
# This is during hot-add
if runtime:
_, nics, _, disks = runtime
disks = [d for d, _, _ in disks]
for d in disks + nics:
if not d.hvinfo or "bus" not in d.hvinfo:
continue
bus = d.hvinfo["bus"]
slots = bus_slots[bus]
if bus == _PCI_BUS:
slot = d.hvinfo["addr"]
slots[int(slot, 16)] = True
elif bus == _SCSI_BUS:
slot = d.hvinfo["scsi-id"]
slots[slot] = True
return bus_slots
@_with_qmp
def _VerifyHotplugCommand(self, _instance, kvm_devid, should_exist):
"""Checks if a previous hotplug command has succeeded.
Depending on the should_exist value, verifies that an entry identified by
device ID is present or not.
@raise errors.HypervisorError: if result is not the expected one
"""
for i in range(5):
found = self.qmp.HasDevice(kvm_devid)
logging.info("Verifying hotplug command (retry %s): %s", i, found)
if found and should_exist:
break
if not found and not should_exist:
break
time.sleep(1)
if found and not should_exist:
msg = "Device %s should have been removed but is still there" % kvm_devid
raise errors.HypervisorError(msg)
if not found and should_exist:
msg = "Device %s should have been added but is missing" % kvm_devid
raise errors.HypervisorError(msg)
logging.info("Device %s has been correctly hot-plugged", kvm_devid)
@_with_qmp
def HotAddDevice(self, instance, dev_type, device, extra, seq):
""" Helper method to hot-add a new device
It generates the device ID and hvinfo, and invokes the
device-specific method.
"""
kvm_devid = _GenerateDeviceKVMId(dev_type, device)
runtime = self._LoadKVMRuntime(instance)
up_hvp = runtime[2]
device_type = _DEVICE_TYPE[dev_type](up_hvp)
bus_state = self._GetBusSlots(up_hvp, runtime)
# in case of hot-mod this is given
if not device.hvinfo:
device.hvinfo = _GenerateDeviceHVInfo(dev_type, kvm_devid,
device_type, bus_state)
if dev_type == constants.HOTPLUG_TARGET_DISK:
uri = _GetDriveURI(device, extra[0], extra[1])
disable_auto_ro = self.qmp.HasDynamicAutoReadOnly()
def drive_add_fn(filename):
"""Helper function that uses HMP to hot-add a drive."""
cmd = "drive_add dummy file=%s,if=none,id=%s,format=raw" % \
(filename, kvm_devid)
if disable_auto_ro:
# This is necessary for the drive_add/device_add combination to work
# after QEMU 4.0. auto-read-only first appeared in 3.1, but 4.0
# changed its behavior in a way that breaks hotplugging. See #1547.
cmd += ",auto-read-only=off"
# When hot plugging a disk, parameters should match the current runtime.
# I.e. for live migration, the cache mode is critical.
cmd += self._GenerateDiskAioCacheParameters(
up_hvp[constants.HV_KVM_DISK_AIO], up_hvp[constants.HV_DISK_CACHE],
device_type)
if up_hvp[constants.HV_DISK_DISCARD] != constants.HT_DISCARD_DEFAULT:
cmd += ",discard=%s" % up_hvp[constants.HV_DISK_DISCARD]
self._CallMonitorCommand(instance.name, cmd)
# This must be done indirectly due to the fact that we pass the drive's
# file descriptor via QMP first, then we add the corresponding drive that
# refers to this fd. Note that if the QMP connection terminates before
# a drive which keeps a reference to the fd passed via the add-fd QMP
# command has been created, then the fd gets closed and cannot be used
# later (e.g., via an drive_add HMP command).
self.qmp.HotAddDisk(device, kvm_devid, uri, drive_add_fn)
elif dev_type == constants.HOTPLUG_TARGET_NIC:
kvmpath = instance.hvparams[constants.HV_KVM_PATH]
is_chrooted = instance.hvparams[constants.HV_KVM_USE_CHROOT]
kvmhelp = self._GetKVMOutput(kvmpath, self._KVMOPT_HELP)
devlist = self._GetKVMOutput(kvmpath, self._KVMOPT_DEVICELIST)
features, _, _ = self._GetNetworkDeviceFeatures(up_hvp, devlist, kvmhelp)
(tap, tapfds, vhostfds) = OpenTap(features=features)
self._ConfigureNIC(instance, seq, device, tap)
self.qmp.HotAddNic(device, kvm_devid, tapfds, vhostfds, features,
is_chrooted)
utils.WriteFile(self._InstanceNICFile(instance.name, seq), data=tap)
self._VerifyHotplugCommand(instance, kvm_devid, True)
# update relevant entries in runtime file
index = _DEVICE_RUNTIME_INDEX[dev_type]
entry = _RUNTIME_ENTRY[dev_type](device, extra)
runtime[index].append(entry)
self._SaveKVMRuntime(instance, runtime)
@_with_qmp
def HotDelDevice(self, instance, dev_type, device, _, seq):
""" Helper method for hot-del device
It gets device info from runtime file, generates the device name and
invokes the device-specific method.
"""
runtime = self._LoadKVMRuntime(instance)
entry = _GetExistingDeviceInfo(dev_type, device, runtime)
kvm_device = _RUNTIME_DEVICE[dev_type](entry)
kvm_devid = _GenerateDeviceKVMId(dev_type, kvm_device)
if dev_type == constants.HOTPLUG_TARGET_DISK:
self.qmp.HotDelDisk(kvm_devid)
# drive_del is not implemented yet in qmp
command = "drive_del %s\n" % kvm_devid
self._CallMonitorCommand(instance.name, command)
elif dev_type == constants.HOTPLUG_TARGET_NIC:
self.qmp.HotDelNic(kvm_devid)
utils.RemoveFile(self._InstanceNICFile(instance.name, seq))
self._VerifyHotplugCommand(instance, kvm_devid, False)
index = _DEVICE_RUNTIME_INDEX[dev_type]
runtime[index].remove(entry)
self._SaveKVMRuntime(instance, runtime)
return kvm_device.hvinfo
def HotModDevice(self, instance, dev_type, device, _, seq):
""" Helper method for hot-mod device
It gets device info from runtime file, generates the device name and
invokes the device-specific method. Currently only NICs support hot-mod
"""
if dev_type == constants.HOTPLUG_TARGET_NIC:
# putting it back in the same bus and slot
device.hvinfo = self.HotDelDevice(instance, dev_type, device, _, seq)
self.HotAddDevice(instance, dev_type, device, _, seq)
@classmethod
def _ParseKVMVersion(cls, text):
"""Parse the KVM version from the --help output.
@type text: string
@param text: output of kvm --help
@return: (version, v_maj, v_min, v_rev)
@raise errors.HypervisorError: when the KVM version cannot be retrieved
"""
match = cls._VERSION_RE.search(text.splitlines()[0])
if not match:
raise errors.HypervisorError("Unable to get KVM version")
v_all = match.group(0)
v_maj = int(match.group(1))
v_min = int(match.group(2))
if match.group(4):
v_rev = int(match.group(4))
else:
v_rev = 0
return (v_all, v_maj, v_min, v_rev)
@classmethod
def _GetKVMOutput(cls, kvm_path, option):
"""Return the output of a kvm invocation
@type kvm_path: string
@param kvm_path: path to the kvm executable
@type option: a key of _KVMOPTS_CMDS
@param option: kvm option to fetch the output from
@return: output a supported kvm invocation
@raise errors.HypervisorError: when the KVM help output cannot be retrieved
"""
assert option in cls._KVMOPTS_CMDS, "Invalid output option"
optlist, can_fail = cls._KVMOPTS_CMDS[option]
result = utils.RunCmd([kvm_path] + optlist)
if result.failed and not can_fail:
raise errors.HypervisorError("Unable to get KVM %s output" %
" ".join(optlist))
return result.output
@classmethod
def _GetKVMVersion(cls, kvm_path):
"""Return the installed KVM version.
@return: (version, v_maj, v_min, v_rev)
@raise errors.HypervisorError: when the KVM version cannot be retrieved
"""
return cls._ParseKVMVersion(cls._GetKVMOutput(kvm_path, cls._KVMOPT_HELP))
@classmethod
def _GetDefaultMachineVersion(cls, kvm_path):
"""Return the default hardware revision (e.g. pc-1.1)
"""
output = cls._GetKVMOutput(kvm_path, cls._KVMOPT_MLIST)
match = cls._DEFAULT_MACHINE_VERSION_RE.search(output)
if match:
return match.group(1)
else:
return "pc"
@classmethod
def _StopInstance(cls, instance, force=False, name=None, timeout=None):
"""Stop an instance.
"""
assert(timeout is None or force is not None)
if name is not None and not force:
raise errors.HypervisorError("Cannot shutdown cleanly by name only")
if name is None:
name = instance.name
acpi = instance.hvparams[constants.HV_ACPI]
else:
acpi = False
_, pid, alive = cls._InstancePidAlive(name)
if pid > 0 and alive:
if force or not acpi:
utils.KillProcess(pid)
else:
cls._CallMonitorCommand(name, "system_powerdown", timeout)
cls._ClearUserShutdown(instance.name)
def StopInstance(self, instance, force=False, retry=False, name=None,
timeout=None):
"""Stop an instance.
"""
self._StopInstance(instance, force, name=name, timeout=timeout)
def CleanupInstance(self, instance_name):
"""Cleanup after a stopped instance
"""
pidfile, pid, alive = self._InstancePidAlive(instance_name)
if pid > 0 and alive:
raise errors.HypervisorError("Cannot cleanup a live instance")
self._RemoveInstanceRuntimeFiles(pidfile, instance_name)
self._ClearUserShutdown(instance_name)
def RebootInstance(self, instance):
"""Reboot an instance.
"""
# For some reason if we do a 'send-key ctrl-alt-delete' to the control
# socket the instance will stop, but now power up again. So we'll resort
# to shutdown and restart.
_, _, alive = self._InstancePidAlive(instance.name)
if not alive:
raise errors.HypervisorError("Failed to reboot instance %s:"
" not running" % instance.name)
# StopInstance will delete the saved KVM runtime so:
# ...first load it...
kvm_runtime = self._LoadKVMRuntime(instance)
# ...now we can safely call StopInstance...
if not self.StopInstance(instance):
self.StopInstance(instance, force=True)
# ...and finally we can save it again, and execute it...
self._SaveKVMRuntime(instance, kvm_runtime)
kvmpath = instance.hvparams[constants.HV_KVM_PATH]
kvmhelp = self._GetKVMOutput(kvmpath, self._KVMOPT_HELP)
self._ExecuteKVMRuntime(instance, kvm_runtime, kvmhelp)
def MigrationInfo(self, instance):
"""Get instance information to perform a migration.
@type instance: L{objects.Instance}
@param instance: instance to be migrated
@rtype: string
@return: content of the KVM runtime file
"""
return self._ReadKVMRuntime(instance.name)
def AcceptInstance(self, instance, info, target):
"""Prepare to accept an instance.
@type instance: L{objects.Instance}
@param instance: instance to be accepted
@type info: string
@param info: content of the KVM | |
from phi.api import *
class TestExamples(object):
"""docstring for TestExamples."""
def test_example_1(self):
text = "a bb ccc"
avg_word_length = P.Pipe(
text,
Obj.split(" "), #['a', 'bb', 'ccc']
P.map(len), #[1, 2, 3]
list, # python 3 only
P.sum() / len #6 / 3 == 2
)
assert 2 == avg_word_length
text = "a bb ccc"
avg_word_length = P.Pipe(
text,
Obj.split(" "), #['a', 'bb', 'ccc']
P.map(len), #[1, 2, 3]
list, # python 3 only
Seq(sum) / len #6 / 3 == 2
)
assert 2 == avg_word_length
text = "a bb ccc"
avg_word_length = P.Pipe(
text,
Obj.split(" "), #['a', 'bb', 'ccc']
P.map(len), #[1, 2, 3]
list, # python 3 only
P.sum() / P.len() #6 / 3 == 2
)
assert 2 == avg_word_length
avg_word_length = P.Pipe(
"1 22 333",
Obj.split(' '), # ['1', '22', '333']
P.map(len), # [1, 2, 3]
list, # python 3 only
List(
sum # 1 + 2 + 3 == 6
,
len # len([1, 2, 3]) == 3
),
P[0] / P[1] # sum / len == 6 / 3 == 2
)
assert avg_word_length == 2
def test_getting_started(self):
from phi import P
def add1(x): return x + 1
def mul3(x): return x * 3
x = P.Pipe(
1.0, #input 1
add1, #1 + 1 == 2
mul3 #2 * 3 == 6
)
assert x == 6
################
################
from phi import P
x = P.Pipe(
1.0, #input 1
P + 1, #1 + 1 == 2
P * 3 #2 * 3 == 6
)
assert x == 6
################
################
from phi import P, List
[x, y] = P.Pipe(
1.0, #input 1
List(
P + 1 #1 + 1 == 2
,
P * 3 #1 * 3 == 3
)
)
assert x == 2
assert y == 3
################
################
from phi import P, List
[x, y] = P.Pipe(
1.0, #input 1
P * 2, #1 * 2 == 2
List(
P + 1 #2 + 1 == 3
,
P * 3 #2 * 3 == 6
)
)
assert x == 3
assert y == 6
################
################
from phi import P, Rec
result = P.Pipe(
1.0, #input 1
P * 2, #1 * 2 == 2
Dict(
x = P + 1 #2 + 1 == 3
,
y = P * 3 #2 * 3 == 6
)
)
assert result.x == 3
assert result.y == 6
################
################
from phi import P, Rec
result = P.Pipe(
1.0, #input 1
P * 2, #1 * 2 == 2
Dict(
x = P + 1 #2 + 1 == 3
,
y = P * 3 #2 * 3 == 6
),
Rec.x / Rec.y #3 / 6 == 0.5
)
assert result == 0.5
################
################
from phi import P, Rec, List, Write, Read
[result, s] = P.Pipe(
1.0, #input 1
Write(s = P * 2), #s = 2 * 1 == 2
Dict(
x = P + 1 #2 + 1 == 3
,
y = P * 3 #2 * 3 == 6
),
List(
Rec.x / Rec.y #3 / 6 == 0.5
,
Read('s') #load 's' == 2
)
)
assert result == 0.5
assert s == 2
################
################
from phi import P, Rec, List, Write, Read
[result, s] = P.Pipe(
1.0, #input 1
P * 2, Write('s'), #s = 2 * 1 == 2
Dict(
x = P + 1 #2 + 1 == 3
,
y = P * 3 #2 * 3 == 6
),
List(
Rec.x / Rec.y #3 / 6 == 0.5
,
Read('s') #load 's' == 2
)
)
assert result == 0.5
assert s == 2
################
################
from phi import P, Rec, Write, Read, List
[result, s] = P.Pipe(
1.0, #input 1
Write(s = P * 2), #s = 2 * 1 == 2
Dict(
x = P + 1 #2 + 1 == 3
,
y = P * 3 #2 * 3 == 6
),
List(
Rec.x / Rec.y #3 / 6 == 0.5
,
Read.s + 3 # 2 + 3 == 5
)
)
assert result == 0.5
assert s == 5
################
################
from phi import P, Rec, Read, Write
[result, s] = P.Pipe(
1.0, #input 1
Write(s = P * 2), #s = 2 * 1 == 2
Dict(
x = P + 1 #2 + 1 == 3
,
y = P * 3 #2 * 3 == 6
),
List(
Rec.x / Rec.y #3 / 6 == 0.5
,
Read.s + 3 # 2 + 3 == 5
)
)
assert result == 0.5
assert s == 5
################
################
from phi import P, Rec, Val
[result, s, val] = P.Pipe(
1.0, #input 1
Write(s = P * 2), #s = 2 * 1 == 2
Dict(
x = P + 1 #2 + 1 == 3
,
y = P * 3 #2 * 3 == 6
),
List(
Rec.x / Rec.y #3 / 6 == 0.5
,
Read.s + 3 # 2 + 3 == 5
,
Val(9) + 1 #input 9 and add 1, gives 10
)
)
assert result == 0.5
assert s == 5
assert val == 10
#########################
#########################
from phi import P, Rec, Read, Write, Val, If
[result, s, val] = P.Pipe(
1.0, #input 1
Write(s = (P + 3) / (P + 1)), #s = 4 / 2 == 2
Dict(
x = P + 1 #2 + 1 == 3
,
y = P * 3 #2 * 3 == 6
),
List(
Rec.x / Rec.y #3 / 6 == 0.5
,
Read.s + 3 # 2 + 3 == 5
,
If( Rec.y > 7,
Val(9) + 1 #input 9 and add 1, gives 10
).Elif( Rec.y < 4,
"Yes"
).Else(
"Sorry, come back latter."
)
)
)
assert result == 0.5
assert s == 5
assert val == "Sorry, come back latter."
######################################
#######################################
from phi import P, Rec, Read, Write, Val, If
f = P.Seq(
Write(s = (P + 3) / (P + 1)), #s = 4 / 2 == 2
Dict(
x = P + 1 #2 + 1 == 3
,
y = P * 3 #2 * 3 == 6
),
List(
Rec.x / Rec.y #3 / 6 == 0.5
,
Read.s + 3 # 2 + 3 == 5
,
If( Rec.y > 7,
Val(9) + 1 #input 9 and add 1, gives 10
).Else(
"Sorry, come back latter."
)
)
)
[result, s, val] = f(1.0)
assert result == 0.5
assert s == 5
assert val == "Sorry, come back latter."
def test_builder_MakeRefContext(self):
from phi import P
assert 2 == P.Pipe(
Write(s = 1), #s = 1
P.Seq(
Write(s = P + 1), #s = 2
),
Read('s') # s == 2
)
################################
################################
def test_builder_NPipe(self):
from phi import P
assert 1 == P.Pipe(
Write(s = 1), # write s == 1, outer context
lambda x: P.Pipe(
x,
Write(s = P + 1) # write s == 2, inner context
),
Read('s') # read s == 1, outer context
)
#############################
#############################
def test_not(self):
from phi import P
assert True == P.Pipe(
1,
P + 1, # 1 + 1 == 2
P > 5, # 2 > 5 == False
P.Not() # not False == True
)
################################
################################
from phi import P
assert True == P.Pipe(
1,
(P + 1 > 5).Not() # not 1 + 1 > 5 == not 2 > 5 == not False == True
)
############################
#############################
from phi import P
f = (P + 1 > 5).Not() #lambda x: not x + 1 > 5
assert f(1) == True
def test_contains(self):
from phi import P
| |
<filename>src/pyrin/core/structs.py
# -*- coding: utf-8 -*-
"""
core structs module.
"""
from collections import deque
from threading import Lock
from abc import abstractmethod
from werkzeug.datastructures import MultiDict, ImmutableMultiDict, ImmutableDict, Headers
import pyrin.utils.misc as misc_utils
from pyrin.core.exceptions import CoreAttributeError, ContextAttributeError, \
CoreNotImplementedError, PackageClassIsNotSetError, CoreKeyError
class SingletonMetaBase(type):
"""
singleton meta base class.
this is a thread-safe implementation of singleton.
"""
_lock = Lock()
def __call__(cls, *args, **kwargs):
if cls._has_instance() is False:
with cls._lock:
if cls._has_instance() is False:
instance = super().__call__(*args, **kwargs)
cls._register_instance(instance)
return cls._get_instance()
@abstractmethod
def _has_instance(cls):
"""
gets a value indicating there is a registered instance.
:raises CoreNotImplementedError: core not implemented error.
:rtype: bool
"""
raise CoreNotImplementedError()
@abstractmethod
def _register_instance(cls, instance):
"""
registers the given instance.
:param object instance: instance to be registered.
:raises CoreNotImplementedError: core not implemented error.
"""
raise CoreNotImplementedError()
@abstractmethod
def _get_instance(cls):
"""
gets the registered instance.
:raises CoreNotImplementedError: core not implemented error.
:rtype: object
"""
raise CoreNotImplementedError()
class UniqueSingletonMeta(SingletonMetaBase):
"""
unique singleton meta class.
this is a thread-safe implementation of singleton.
this class only allows a single unique object for all descendant types.
for example: {Base -> UniqueSingletonMeta, A -> Base, B -> A}
if some_object = Base() then always Base() = A() = B() = some_object.
or if some_object = A() then always A() = B() = some_object != Base().
"""
_instance = None
_lock = Lock()
def _has_instance(cls):
"""
gets a value indicating that there is a registered instance.
:rtype: bool
"""
return cls._instance is not None
def _register_instance(cls, instance):
"""
registers the given instance.
"""
cls._instance = instance
def _get_instance(cls):
"""
gets the registered instance.
:rtype: object
"""
return cls._instance
class MultiSingletonMeta(SingletonMetaBase):
"""
multi singleton meta class.
this is a thread-safe implementation of singleton.
this class allows a unique object per each type of descendants.
for example: {Base -> UniqueSingletonMeta, A -> Base, B -> A}
if some_object = Base() then always Base() != A() != B() but always Base() = some_object.
or if some_object = A() then always Base() != A() != B() but always A() = some_object.
"""
# a dictionary containing an instance of each type.
# in the form of: {type: instance}
_instances = dict()
_lock = Lock()
def _has_instance(cls):
"""
gets a value indicating that there is a registered instance.
:rtype: bool
"""
return cls in cls._instances
def _register_instance(cls, instance):
"""
registers the given instance.
"""
cls._instances[cls] = instance
def _get_instance(cls):
"""
gets the registered instance.
:rtype: object
"""
return cls._instances.get(cls)
class DTO(dict):
"""
context class for storing objects in every layer.
it's actually a dictionary with the capability to treat keys as instance attributes.
"""
def __getattr__(self, name):
if name in self:
return self.get(name)
raise CoreAttributeError('Property [{name}] not found.'.format(name=name))
def __getitem__(self, item):
if item in self:
return self.get(item)
raise CoreKeyError('Key [{name}] not found.'.format(name=item))
def __setattr__(self, name, value):
self[name] = value
class CoreObject(object):
"""
core object class.
this should be used as the base object for all application objects.
"""
def __init__(self):
"""
initializes an instance of CoreObject.
"""
super().__init__()
self.__name = None
def __setattr__(self, name, value):
return self._setattr(name, value)
def __repr__(self):
"""
gets the string representation of current object.
:rtype: str
"""
return str(self)
def __str__(self):
"""
gets the string representation of current object.
:rtype: str
"""
return self.get_fully_qualified_name()
def get_name(self):
"""
gets the name of the object.
if name is not available, returns its class name.
:rtype: str
"""
if self.__name is not None:
return self.__name
return self.get_class_name()
def _set_name(self, name):
"""
sets new name to current object.
:param str name: object new name.
"""
self.__name = name
def get_class_name(self):
"""
gets the object's class name.
:rtype: str
"""
return self.__class__.__name__
def get_module_name(self):
"""
gets the object's module name.
:rtype: str
"""
return self.__class__.__module__
def get_doc(self):
"""
gets the docstring of the object.
:rtype: str
"""
return self.__doc__
def get_fully_qualified_name(self):
"""
gets fully qualified name of this object.
it gets `module_name.class_name` as fully qualified name.
:rtype: str
"""
return '{module}.{name}'.format(module=self.__module__,
name=self.__class__.__name__)
def _setattr(self, name, value):
"""
sets the given value to specified attribute.
:param str name: attribute name.
:param object value: attribute value.
"""
return super().__setattr__(name, value)
class Context(DTO):
"""
context class for storing objects in every layer.
it's actually a dictionary with the capability to add keys directly.
"""
attribute_error = ContextAttributeError
attribute_error_message = 'Property [{name}] not found.'
def __getattr__(self, name):
if name in self:
return self.get(name)
self._raise_key_error(name)
def __getitem__(self, item):
if item in self:
return self.get(item)
self._raise_key_error(item)
def _raise_key_error(self, key):
"""
raises an error for given key.
:param object key: key object that caused the error.
:raises ContextAttributeError: context attribute error.
"""
raise self.attribute_error(self.attribute_error_message.format(name=key))
class HookSingletonMeta(MultiSingletonMeta):
"""
hook singleton meta class.
this is a thread-safe implementation of singleton.
"""
_instances = dict()
_lock = Lock()
class Hook(CoreObject, metaclass=HookSingletonMeta):
"""
base hook class.
all application hook classes must be subclassed from this one.
"""
pass
class ManagerSingletonMeta(MultiSingletonMeta):
"""
manager singleton meta class.
this is a thread-safe implementation of singleton.
"""
_instances = dict()
_lock = Lock()
class Manager(CoreObject, metaclass=ManagerSingletonMeta):
"""
base manager class.
all application manager classes must be subclassed from this one.
"""
# this attribute should be set with the package class of current manager.
# this is useful if you want to extend pyrin packages in your application
# and let pyrin use your custom package's package class in its code.
package_class = None
def get_package_class(self):
"""
gets the package class of current manager.
this method is useful if you want to access the correct package class of a
manager using services module of that package.
each package that needs to expose this method, could implement a service method
and return the result of this method.
:raises PackageClassIsNotSetError: package class is not set error.
:returns: type[Package]
"""
if self.package_class is None:
raise PackageClassIsNotSetError('Package class for current manager '
'[{manager}] is not set. you must set '
'"package_class" attribute for this manager '
'to be able to use this method.'
.format(manager=self))
return self.package_class
class CLISingletonMeta(MultiSingletonMeta):
"""
cli singleton meta class.
this is a thread-safe implementation of singleton.
"""
_instances = dict()
_lock = Lock()
class CLI(CoreObject, metaclass=CLISingletonMeta):
"""
base cli class.
all application cli classes must be subclassed from this one.
"""
# this value must be set in each subclass with the relevant callable
# execute service with the param signature of: `(str handler_name, **inputs)`
_execute_service = None
@classmethod
def execute(cls, handler_name, **options):
"""
executes the handler with the given name with given inputs.
:param str handler_name: handler name to be executed.
:raises CLIHandlerNotFoundError: cli handler not found error.
:rtype: int
"""
return cls._execute_service(handler_name, **options)
class Stack(deque):
"""
stack class.
this class extends `deque` and provides a `peek()` method to
just get the top most item without removing it.
it also provides some other useful methods for convenient of usage.
note that `Stack` is not guaranteed to be thread-safe on all python
implementations, because it extends `deque`. so do not use it when
there is a multi-thread access to the same stack.
"""
def peek(self):
"""
gets the top most item of stack, without removing it.
the return value of `peek()` is the same as `pop()`
but without removing it from the stack.
if the stack is empty, it raises an error.
:raises IndexError: index error.
:rtype: object
"""
return self[-1]
def peek_all(self):
"""
gets all items of stack, without removing them.
the result is a list of all items of the stack in the order of
`pop()`. meaning that, last inserted items will be in lower indices.
:rtype: list[object]
"""
return list(reversed(self))
def push(self, value):
"""
adds the given value into top of stack.
this method is just implemented for convenient of usage,
it will call `append()` under the hood.
:param object value: value to be added into stack.
"""
self.append(value)
def dispose(self):
"""
deletes the top most item of stack, without returning it.
if the stack is empty, it raises an error.
:raises IndexError: index error.
"""
del self[-1]
class CoreMultiDict(MultiDict):
"""
core multi dict | |
170:
return '~'
if table2Version == 174 and indicatorOfParameter == 168:
return '~'
if table2Version == 174 and indicatorOfParameter == 167:
return '~'
if table2Version == 174 and indicatorOfParameter == 164:
return '~'
if table2Version == 174 and indicatorOfParameter == 139:
return '~'
if table2Version == 174 and indicatorOfParameter == 111:
return '~'
if table2Version == 174 and indicatorOfParameter == 110:
return '~'
if table2Version == 174 and indicatorOfParameter == 99:
return '~'
if table2Version == 174 and indicatorOfParameter == 98:
return 'sithick'
if table2Version == 174 and indicatorOfParameter == 95:
return '~'
if table2Version == 174 and indicatorOfParameter == 94:
return '~'
if table2Version == 174 and indicatorOfParameter == 90:
return '~'
if table2Version == 174 and indicatorOfParameter == 89:
return '~'
if table2Version == 174 and indicatorOfParameter == 88:
return '~'
if table2Version == 174 and indicatorOfParameter == 87:
return '~'
if table2Version == 174 and indicatorOfParameter == 86:
return '~'
if table2Version == 174 and indicatorOfParameter == 85:
return '~'
if table2Version == 174 and indicatorOfParameter == 83:
return '~'
if table2Version == 174 and indicatorOfParameter == 55:
return '~'
if table2Version == 174 and indicatorOfParameter == 49:
return '~'
if table2Version == 174 and indicatorOfParameter == 42:
return '~'
if table2Version == 174 and indicatorOfParameter == 41:
return '~'
if table2Version == 174 and indicatorOfParameter == 40:
return '~'
if table2Version == 174 and indicatorOfParameter == 39:
return '~'
if table2Version == 174 and indicatorOfParameter == 34:
return '~'
if table2Version == 174 and indicatorOfParameter == 31:
return '~'
if table2Version == 174 and indicatorOfParameter == 9:
return 'ssro'
if table2Version == 174 and indicatorOfParameter == 8:
return 'sro'
if table2Version == 174 and indicatorOfParameter == 6:
return '~'
if table2Version == 173 and indicatorOfParameter == 255:
return '~'
if table2Version == 173 and indicatorOfParameter == 240:
return '~'
if table2Version == 173 and indicatorOfParameter == 239:
return '~'
if table2Version == 173 and indicatorOfParameter == 228:
return 'tpara'
if table2Version == 173 and indicatorOfParameter == 212:
return 'soiara'
if table2Version == 173 and indicatorOfParameter == 211:
return '~'
if table2Version == 173 and indicatorOfParameter == 210:
return '~'
if table2Version == 173 and indicatorOfParameter == 209:
return '~'
if table2Version == 173 and indicatorOfParameter == 208:
return '~'
if table2Version == 173 and indicatorOfParameter == 205:
return 'roara'
if table2Version == 173 and indicatorOfParameter == 197:
return '~'
if table2Version == 173 and indicatorOfParameter == 196:
return '~'
if table2Version == 173 and indicatorOfParameter == 195:
return '~'
if table2Version == 173 and indicatorOfParameter == 189:
return 'sundara'
if table2Version == 173 and indicatorOfParameter == 182:
return 'evara'
if table2Version == 173 and indicatorOfParameter == 181:
return 'nsssara'
if table2Version == 173 and indicatorOfParameter == 180:
return 'ewssara'
if table2Version == 173 and indicatorOfParameter == 179:
return 'ttrara'
if table2Version == 173 and indicatorOfParameter == 178:
return 'tsrara'
if table2Version == 173 and indicatorOfParameter == 177:
return 'strara'
if table2Version == 173 and indicatorOfParameter == 176:
return 'ssrara'
if table2Version == 173 and indicatorOfParameter == 175:
return 'strdara'
if table2Version == 173 and indicatorOfParameter == 169:
return 'ssrdara'
if table2Version == 173 and indicatorOfParameter == 154:
return '~'
if table2Version == 173 and indicatorOfParameter == 153:
return '~'
if table2Version == 173 and indicatorOfParameter == 149:
return '~'
if table2Version == 173 and indicatorOfParameter == 147:
return 'slhfara'
if table2Version == 173 and indicatorOfParameter == 146:
return 'sshfara'
if table2Version == 173 and indicatorOfParameter == 145:
return '~'
if table2Version == 173 and indicatorOfParameter == 144:
return 'sfara'
if table2Version == 173 and indicatorOfParameter == 143:
return 'mcpra'
if table2Version == 173 and indicatorOfParameter == 142:
return 'lspara'
if table2Version == 173 and indicatorOfParameter == 50:
return '~'
if table2Version == 173 and indicatorOfParameter == 48:
return '~'
if table2Version == 173 and indicatorOfParameter == 45:
return '~'
if table2Version == 173 and indicatorOfParameter == 44:
return '~'
if table2Version == 172 and indicatorOfParameter == 255:
return '~'
if table2Version == 172 and indicatorOfParameter == 240:
return '~'
if table2Version == 172 and indicatorOfParameter == 239:
return '~'
if table2Version == 172 and indicatorOfParameter == 228:
return 'tprate'
if table2Version == 172 and indicatorOfParameter == 212:
return 'soira'
if table2Version == 172 and indicatorOfParameter == 211:
return '~'
if table2Version == 172 and indicatorOfParameter == 210:
return '~'
if table2Version == 172 and indicatorOfParameter == 209:
return '~'
if table2Version == 172 and indicatorOfParameter == 208:
return '~'
if table2Version == 172 and indicatorOfParameter == 205:
return 'mrort'
if table2Version == 172 and indicatorOfParameter == 197:
return 'gwdrate'
if table2Version == 172 and indicatorOfParameter == 196:
return '~'
if table2Version == 172 and indicatorOfParameter == 195:
return '~'
if table2Version == 172 and indicatorOfParameter == 189:
return 'msdr'
if table2Version == 172 and indicatorOfParameter == 182:
return 'erate'
if table2Version == 172 and indicatorOfParameter == 181:
return 'nsssra'
if table2Version == 172 and indicatorOfParameter == 180:
return 'ewssra'
if table2Version == 172 and indicatorOfParameter == 179:
return 'mtntrf'
if table2Version == 172 and indicatorOfParameter == 178:
return 'mtnsrf'
if table2Version == 172 and indicatorOfParameter == 177:
return 'msntrf'
if table2Version == 172 and indicatorOfParameter == 176:
return 'msnsrf'
if table2Version == 172 and indicatorOfParameter == 175:
return 'msdtrf'
if table2Version == 172 and indicatorOfParameter == 169:
return 'msdsrf'
if table2Version == 172 and indicatorOfParameter == 154:
return 'mlwhr'
if table2Version == 172 and indicatorOfParameter == 153:
return 'mswhr'
if table2Version == 172 and indicatorOfParameter == 149:
return 'msnrf'
if table2Version == 172 and indicatorOfParameter == 147:
return 'mslhfl'
if table2Version == 172 and indicatorOfParameter == 146:
return 'msshfl'
if table2Version == 172 and indicatorOfParameter == 145:
return 'bldrate'
if table2Version == 172 and indicatorOfParameter == 144:
return 'mtsfr'
if table2Version == 172 and indicatorOfParameter == 143:
return 'cprate'
if table2Version == 172 and indicatorOfParameter == 142:
return 'mlsprt'
if table2Version == 172 and indicatorOfParameter == 50:
return 'mlspfr'
if table2Version == 172 and indicatorOfParameter == 48:
return '~'
if table2Version == 172 and indicatorOfParameter == 45:
return '~'
if table2Version == 172 and indicatorOfParameter == 44:
return 'esrate'
if table2Version == 171 and indicatorOfParameter == 255:
return '~'
if table2Version == 171 and indicatorOfParameter == 254:
return 'atmwa'
if table2Version == 171 and indicatorOfParameter == 253:
return 'atzea'
if table2Version == 171 and indicatorOfParameter == 252:
return 'athea'
if table2Version == 171 and indicatorOfParameter == 251:
return 'attea'
if table2Version == 171 and indicatorOfParameter == 250:
return 'iaa'
if table2Version == 171 and indicatorOfParameter == 249:
return 'aiwa'
if table2Version == 171 and indicatorOfParameter == 248:
return 'cca'
if table2Version == 171 and indicatorOfParameter == 247:
return 'ciwca'
if table2Version == 171 and indicatorOfParameter == 246:
return 'clwca'
if table2Version == 171 and indicatorOfParameter == 245:
return 'flsra'
if table2Version == 171 and indicatorOfParameter == 244:
return 'fsra'
if table2Version == 171 and indicatorOfParameter == 243:
return 'fala'
if table2Version == 171 and indicatorOfParameter == 242:
return 'alwa'
if table2Version == 171 and indicatorOfParameter == 241:
return 'acfa'
if table2Version == 171 and indicatorOfParameter == 240:
return 'lsfa'
if table2Version == 171 and indicatorOfParameter == 239:
return 'csfa'
if table2Version == 171 and indicatorOfParameter == 238:
return 'tsna'
if table2Version == 171 and indicatorOfParameter == 237:
return 'swal4'
if table2Version == 171 and indicatorOfParameter == 236:
return 'stal4'
if table2Version == 171 and indicatorOfParameter == 235:
return 'skta'
if table2Version == 171 and indicatorOfParameter == 234:
return 'lsrha'
if table2Version == 171 and indicatorOfParameter == 233:
return 'asqa'
if table2Version == 171 and indicatorOfParameter == 232:
return 'iea'
if table2Version == 171 and indicatorOfParameter == 231:
return 'ishfa'
if table2Version == 171 and indicatorOfParameter == 230:
return 'inssa'
if table2Version == 171 and indicatorOfParameter == 229:
return 'iewsa'
if table2Version == 171 and indicatorOfParameter == | |
import py, pytest, sys, os, textwrap, types
from pypy.interpreter.gateway import app2interp_temp
from pypy.interpreter.error import OperationError
from pypy.interpreter.function import Method
from pypy.tool.pytest import appsupport
from pypy.tool.option import make_config, make_objspace
from pypy.config.config import ConflictConfigError
from inspect import isclass, getmro
from pypy.tool.udir import udir
from pypy.tool.autopath import pypydir
from pypy.tool import leakfinder
# pytest settings
rsyncdirs = ['.', '../lib-python', '../lib_pypy', '../demo']
rsyncignore = ['_cache']
# PyPy's command line extra options (these are added
# to py.test's standard options)
#
option = None
def pytest_report_header():
return "pytest-%s from %s" %(pytest.__version__, pytest.__file__)
def pytest_configure(config):
global option
option = config.option
def _set_platform(opt, opt_str, value, parser):
from pypy.config.translationoption import PLATFORMS
from pypy.translator.platform import set_platform
if value not in PLATFORMS:
raise ValueError("%s not in %s" % (value, PLATFORMS))
set_platform(value, None)
def pytest_addoption(parser):
group = parser.getgroup("pypy options")
group.addoption('--view', action="store_true", dest="view", default=False,
help="view translation tests' flow graphs with Pygame")
group.addoption('-A', '--runappdirect', action="store_true",
default=False, dest="runappdirect",
help="run applevel tests directly on python interpreter (not through PyPy)")
group.addoption('--direct', action="store_true",
default=False, dest="rundirect",
help="run pexpect tests directly")
group.addoption('-P', '--platform', action="callback", type="string",
default="host", callback=_set_platform,
help="set up tests to use specified platform as compile/run target")
group = parser.getgroup("JIT options")
group.addoption('--viewloops', action="store_true",
default=False, dest="viewloops",
help="show only the compiled loops")
def pytest_sessionstart():
# have python subprocesses avoid startup customizations by default
try:
del os.environ['PYTHONSTARTUP']
except KeyError:
pass
def pytest_funcarg__space(request):
spaceconfig = getattr(request.cls, 'spaceconfig', {})
return gettestobjspace(**spaceconfig)
_SPACECACHE={}
def gettestobjspace(name=None, **kwds):
""" helper for instantiating and caching space's for testing.
"""
try:
config = make_config(option, objspace=name, **kwds)
except ConflictConfigError, e:
# this exception is typically only raised if a module is not available.
# in this case the test should be skipped
py.test.skip(str(e))
key = config.getkey()
try:
return _SPACECACHE[key]
except KeyError:
if getattr(option, 'runappdirect', None):
if name not in (None, 'std'):
myname = getattr(sys, 'pypy_objspaceclass', '')
if not myname.lower().startswith(name):
py.test.skip("cannot runappdirect test: "
"%s objspace required" % (name,))
return TinyObjSpace(**kwds)
space = maketestobjspace(config)
_SPACECACHE[key] = space
return space
def maketestobjspace(config=None):
if config is None:
config = make_config(option)
try:
space = make_objspace(config)
except OperationError, e:
check_keyboard_interrupt(e)
if option.verbose:
import traceback
traceback.print_exc()
py.test.fail("fatal: cannot initialize objspace: %r" %
(config.objspace.name,))
space.startup() # Initialize all builtin modules
space.setitem(space.builtin.w_dict, space.wrap('AssertionError'),
appsupport.build_pytest_assertion(space))
space.setitem(space.builtin.w_dict, space.wrap('raises'),
space.wrap(appsupport.app_raises))
space.setitem(space.builtin.w_dict, space.wrap('skip'),
space.wrap(appsupport.app_skip))
space.raises_w = appsupport.raises_w.__get__(space)
space.eq_w = appsupport.eq_w.__get__(space)
return space
class TinyObjSpace(object):
def __init__(self, **kwds):
import sys
info = getattr(sys, 'pypy_translation_info', None)
for key, value in kwds.iteritems():
if key == 'usemodules':
if info is not None:
for modname in value:
ok = info.get('objspace.usemodules.%s' % modname,
False)
if not ok:
py.test.skip("cannot runappdirect test: "
"module %r required" % (modname,))
else:
if '__pypy__' in value:
py.test.skip("no module __pypy__ on top of CPython")
continue
if info is None:
py.test.skip("cannot runappdirect this test on top of CPython")
has = info.get(key, None)
if has != value:
#print sys.pypy_translation_info
py.test.skip("cannot runappdirect test: space needs %s = %s, "\
"while pypy-c was built with %s" % (key, value, has))
for name in ('int', 'long', 'str', 'unicode', 'None'):
setattr(self, 'w_' + name, eval(name))
def appexec(self, args, body):
body = body.lstrip()
assert body.startswith('(')
src = py.code.Source("def anonymous" + body)
d = {}
exec src.compile() in d
return d['anonymous'](*args)
def wrap(self, obj):
return obj
def unpackiterable(self, itr):
return list(itr)
def is_true(self, obj):
return bool(obj)
def str_w(self, w_str):
return w_str
def newdict(self, module=None):
return {}
def newtuple(self, iterable):
return tuple(iterable)
def newlist(self, iterable):
return list(iterable)
def call_function(self, func, *args, **kwds):
return func(*args, **kwds)
def call_method(self, obj, name, *args, **kwds):
return getattr(obj, name)(*args, **kwds)
def getattr(self, obj, name):
return getattr(obj, name)
def setattr(self, obj, name, value):
setattr(obj, name, value)
def getbuiltinmodule(self, name):
return __import__(name)
def delslice(self, obj, *args):
obj.__delslice__(*args)
def translation_test_so_skip_if_appdirect():
if option.runappdirect:
py.test.skip("translation test, skipped for appdirect")
class OpErrKeyboardInterrupt(KeyboardInterrupt):
pass
def check_keyboard_interrupt(e):
# we cannot easily convert w_KeyboardInterrupt to KeyboardInterrupt
# in general without a space -- here is an approximation
try:
if e.w_type.name == 'KeyboardInterrupt':
tb = sys.exc_info()[2]
raise OpErrKeyboardInterrupt, OpErrKeyboardInterrupt(), tb
except AttributeError:
pass
#
# Interfacing/Integrating with py.test's collection process
#
#
def ensure_pytest_builtin_helpers(helpers='skip raises'.split()):
""" hack (py.test.) raises and skip into builtins, needed
for applevel tests to run directly on cpython but
apparently earlier on "raises" was already added
to module's globals.
"""
import __builtin__
for helper in helpers:
if not hasattr(__builtin__, helper):
setattr(__builtin__, helper, getattr(py.test, helper))
def pytest_sessionstart(session):
""" before session.main() is called. """
# stick py.test raise in module globals -- carefully
ensure_pytest_builtin_helpers()
def pytest_pycollect_makemodule(path, parent):
return PyPyModule(path, parent)
class PyPyModule(py.test.collect.Module):
""" we take care of collecting classes both at app level
and at interp-level (because we need to stick a space
at the class) ourselves.
"""
def accept_regular_test(self):
if self.config.option.runappdirect:
# only collect regular tests if we are in an 'app_test' directory,
# or in test_lib_pypy
names = self.listnames()
return "app_test" in names or "test_lib_pypy" in names
else:
return True
def funcnamefilter(self, name):
if name.startswith('test_'):
return self.accept_regular_test()
if name.startswith('app_test_'):
return True
return False
def classnamefilter(self, name):
if name.startswith('Test'):
return self.accept_regular_test()
if name.startswith('AppTest'):
return True
if name.startswith('ExpectTest'):
return True
#XXX todo
#if name.startswith('AppExpectTest'):
# return True
return False
def makeitem(self, name, obj):
if isclass(obj) and self.classnamefilter(name):
if name.startswith('AppTest'):
return AppClassCollector(name, parent=self)
elif name.startswith('ExpectTest'):
if self.config.option.rundirect:
return py.test.collect.Class(name, parent=self)
return ExpectClassCollector(name, parent=self)
# XXX todo
#elif name.startswith('AppExpectTest'):
# if option.rundirect:
# return AppClassCollector(name, parent=self)
# return AppExpectClassCollector(name, parent=self)
else:
return IntClassCollector(name, parent=self)
elif hasattr(obj, 'func_code') and self.funcnamefilter(name):
if name.startswith('app_test_'):
assert not obj.func_code.co_flags & 32, \
"generator app level functions? you must be joking"
return AppTestFunction(name, parent=self)
elif obj.func_code.co_flags & 32: # generator function
return pytest.Generator(name, parent=self)
else:
return IntTestFunction(name, parent=self)
def skip_on_missing_buildoption(**ropts):
__tracebackhide__ = True
import sys
options = getattr(sys, 'pypy_translation_info', None)
if options is None:
py.test.skip("not running on translated pypy "
"(btw, i would need options: %s)" %
(ropts,))
for opt in ropts:
if not options.has_key(opt) or options[opt] != ropts[opt]:
break
else:
return
py.test.skip("need translated pypy with: %s, got %s"
%(ropts,options))
class LazyObjSpaceGetter(object):
def __get__(self, obj, cls=None):
space = gettestobjspace()
if cls:
cls.space = space
return space
class AppError(Exception):
def __init__(self, excinfo):
self.excinfo = excinfo
def pytest_runtest_setup(__multicall__, item):
if isinstance(item, py.test.collect.Function):
appclass = item.getparent(PyPyClassCollector)
if appclass is not None:
spaceconfig = getattr(appclass.obj, 'spaceconfig', None)
if spaceconfig:
appclass.obj.space = gettestobjspace(**spaceconfig)
__multicall__.execute()
if isinstance(item, py.test.collect.Function):
if not getattr(item.obj, 'dont_track_allocations', False):
leakfinder.start_tracking_allocations()
def pytest_runtest_call(__multicall__, item):
__multicall__.execute()
item._success = True
def pytest_runtest_teardown(__multicall__, item):
__multicall__.execute()
if isinstance(item, py.test.collect.Function):
if (not getattr(item.obj, 'dont_track_allocations', False)
and leakfinder.TRACK_ALLOCATIONS):
item._pypytest_leaks = leakfinder.stop_tracking_allocations(False)
else: # stop_tracking_allocations() already called
item._pypytest_leaks = None
# check for leaks, but only if the test passed so far
if getattr(item, '_success', False) and item._pypytest_leaks:
raise leakfinder.MallocMismatch(item._pypytest_leaks)
if 'pygame' in sys.modules:
assert option.view, ("should not invoke Pygame "
"if conftest.option.view is False")
_pygame_imported = False
class IntTestFunction(py.test.collect.Function):
def __init__(self, *args, **kwargs):
super(IntTestFunction, self).__init__(*args, **kwargs)
self.keywords['interplevel'] = True
def runtest(self):
try:
super(IntTestFunction, self).runtest()
except OperationError, e:
check_keyboard_interrupt(e)
raise
except Exception, e:
cls = e.__class__
while cls is not Exception:
if cls.__name__ == 'DistutilsPlatformError':
from distutils.errors import DistutilsPlatformError
if isinstance(e, DistutilsPlatformError):
py.test.skip('%s: %s' % (e.__class__.__name__, e))
cls = cls.__bases__[0]
raise
class AppTestFunction(py.test.collect.Function):
def __init__(self, *args, **kwargs):
super(AppTestFunction, self).__init__(*args, **kwargs)
self.keywords['applevel'] = True
def _prunetraceback(self, traceback):
return traceback
def execute_appex(self, space, target, *args):
try:
target(*args)
except OperationError, e:
tb = sys.exc_info()[2]
if e.match(space, space.w_KeyboardInterrupt):
raise OpErrKeyboardInterrupt, OpErrKeyboardInterrupt(), tb
appexcinfo = appsupport.AppExceptionInfo(space, e)
if appexcinfo.traceback:
raise AppError, AppError(appexcinfo), tb
raise
def runtest(self):
target = self.obj
if self.config.option.runappdirect:
return target()
space = gettestobjspace()
filename = self._getdynfilename(target)
func = app2interp_temp(target, filename=filename)
print "executing", func
self.execute_appex(space, func, space)
def repr_failure(self, excinfo):
if excinfo.errisinstance(AppError):
excinfo = excinfo.value.excinfo
return super(AppTestFunction, self).repr_failure(excinfo)
def _getdynfilename(self, func):
code = getattr(func, 'im_func', func).func_code
return "[%s:%s]" % (code.co_filename, code.co_firstlineno)
class AppTestMethod(AppTestFunction):
def setup(self):
super(AppTestMethod, self).setup()
instance = self.parent.obj
w_instance = self.parent.w_instance
space = instance.space
for name in dir(instance):
if name.startswith('w_'):
if self.config.option.runappdirect:
setattr(instance, name[2:], getattr(instance, name))
else:
obj = getattr(instance, name)
if isinstance(obj, types.MethodType):
source = py.std.inspect.getsource(obj).lstrip()
w_func = space.appexec([], textwrap.dedent("""
():
%s
return %s
""") % (source, name))
w_obj = Method(space, w_func, w_instance, space.w_None)
else:
w_obj = obj
space.setattr(w_instance, space.wrap(name[2:]), w_obj)
def runtest(self):
target = self.obj
if self.config.option.runappdirect:
return target()
space = target.im_self.space
filename = self._getdynfilename(target)
func = app2interp_temp(target.im_func, filename=filename)
w_instance = self.parent.w_instance
self.execute_appex(space, func, space, w_instance)
class PyPyClassCollector(py.test.collect.Class):
def setup(self):
cls = self.obj
if not hasattr(cls, 'spaceconfig'):
cls.space = LazyObjSpaceGetter()
else:
assert hasattr(cls, 'space') # set by pytest_runtest_setup
super(PyPyClassCollector, self).setup()
class IntInstanceCollector(py.test.collect.Instance):
Function | |
<reponame>arunnthevapalan/great_expectations<gh_stars>0
import copy
import json
import logging
import uuid
from dataclasses import asdict, dataclass
from enum import Enum
from typing import Any, Dict, List, Optional, Set, Union
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.batch import (
Batch,
BatchRequest,
RuntimeBatchRequest,
batch_request_contains_batch_data,
)
from great_expectations.core.config_peer import ConfigPeer
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.core.expectation_suite import ExpectationSuite
from great_expectations.core.usage_statistics.usage_statistics import (
UsageStatisticsHandler,
get_profiler_run_usage_statistics,
usage_statistics_enabled_method,
)
from great_expectations.core.util import convert_to_json_serializable, nested_update
from great_expectations.data_context.store import ProfilerStore
from great_expectations.data_context.types.resource_identifiers import (
ConfigurationIdentifier,
GeCloudIdentifier,
)
from great_expectations.data_context.util import instantiate_class_from_config
from great_expectations.rule_based_profiler.config.base import (
DomainBuilderConfig,
ExpectationConfigurationBuilderConfig,
ParameterBuilderConfig,
RuleBasedProfilerConfig,
domainBuilderConfigSchema,
expectationConfigurationBuilderConfigSchema,
parameterBuilderConfigSchema,
)
from great_expectations.rule_based_profiler.domain_builder.domain_builder import (
DomainBuilder,
)
from great_expectations.rule_based_profiler.expectation_configuration_builder import (
ExpectationConfigurationBuilder,
init_rule_expectation_configuration_builders,
)
from great_expectations.rule_based_profiler.helpers.util import (
convert_variables_to_dict,
)
from great_expectations.rule_based_profiler.parameter_builder import (
ParameterBuilder,
init_rule_parameter_builders,
)
from great_expectations.rule_based_profiler.rule.rule import Rule
from great_expectations.rule_based_profiler.types import (
ParameterContainer,
build_parameter_container_for_variables,
)
from great_expectations.types import SerializableDictDot
from great_expectations.util import filter_properties_dict
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class ReconciliationStrategy(Enum):
NESTED_UPDATE = "nested_update"
REPLACE = "replace"
UPDATE = "update"
@dataclass
class ReconciliationDirectives(SerializableDictDot):
variables: ReconciliationStrategy = ReconciliationStrategy.UPDATE
domain_builder: ReconciliationStrategy = ReconciliationStrategy.UPDATE
parameter_builder: ReconciliationStrategy = ReconciliationStrategy.UPDATE
expectation_configuration_builder: ReconciliationStrategy = (
ReconciliationStrategy.UPDATE
)
def to_dict(self) -> dict:
return asdict(self)
def to_json_dict(self) -> dict:
return convert_to_json_serializable(data=self.to_dict())
class BaseRuleBasedProfiler(ConfigPeer):
"""
BaseRuleBasedProfiler class is initialized from RuleBasedProfilerConfig typed object and contains all functionality
in the form of interface methods (which can be overwritten by subclasses) and their reference implementation.
"""
DEFAULT_RECONCILATION_DIRECTIVES: ReconciliationDirectives = (
ReconciliationDirectives(
variables=ReconciliationStrategy.UPDATE,
domain_builder=ReconciliationStrategy.UPDATE,
parameter_builder=ReconciliationStrategy.UPDATE,
expectation_configuration_builder=ReconciliationStrategy.UPDATE,
)
)
EXPECTATION_SUCCESS_KEYS: Set[str] = {
"auto",
"profiler_config",
}
def __init__(
self,
profiler_config: RuleBasedProfilerConfig,
data_context: Optional["DataContext"] = None, # noqa: F821
usage_statistics_handler: Optional[UsageStatisticsHandler] = None,
):
"""
Create a new RuleBasedProfilerBase using configured rules (as captured in the RuleBasedProfilerConfig object).
For a rule or an item in a rule configuration, instantiates the following if
available: a domain builder, a parameter builder, and a configuration builder.
These will be used to define profiler computation patterns.
Args:
profiler_config: RuleBasedProfilerConfig -- formal typed object containing configuration
data_context: DataContext object that defines a full runtime environment (data access, etc.)
"""
name: str = profiler_config.name
config_version: float = profiler_config.config_version
variables: Optional[Dict[str, Any]] = profiler_config.variables
rules: Optional[Dict[str, Dict[str, Any]]] = profiler_config.rules
self._name = name
self._config_version = config_version
self._profiler_config = profiler_config
if variables is None:
variables = {}
self._usage_statistics_handler = usage_statistics_handler
# Necessary to annotate ExpectationSuite during `run()`
self._citation = {
"name": name,
"config_version": config_version,
"variables": variables,
"rules": rules,
}
# Convert variables argument to ParameterContainer
_variables: ParameterContainer = build_parameter_container_for_variables(
variables_configs=variables
)
self._variables = _variables
self._data_context = data_context
self._rules = self._init_profiler_rules(rules=rules)
def _init_profiler_rules(
self,
rules: Dict[str, Dict[str, Any]],
) -> List[Rule]:
if rules is None:
rules = {}
rule_object_list: List[Rule] = []
rule_name: str
rule_config: Dict[str, Any]
for rule_name, rule_config in rules.items():
rule_object_list.append(
self._init_rule(rule_name=rule_name, rule_config=rule_config)
)
return rule_object_list
def _init_rule(
self,
rule_name: str,
rule_config: Dict[str, Any],
) -> Rule:
# Config is validated through schema but do a sanity check
attr: str
for attr in (
"domain_builder",
"expectation_configuration_builders",
):
if attr not in rule_config:
raise ge_exceptions.ProfilerConfigurationError(
message=f'Invalid rule "{rule_name}": missing mandatory {attr}.'
)
# Instantiate builder attributes
domain_builder: DomainBuilder = RuleBasedProfiler._init_rule_domain_builder(
domain_builder_config=rule_config["domain_builder"],
data_context=self._data_context,
)
parameter_builders: Optional[
List[ParameterBuilder]
] = init_rule_parameter_builders(
parameter_builder_configs=rule_config.get("parameter_builders"),
data_context=self._data_context,
)
expectation_configuration_builders: List[
ExpectationConfigurationBuilder
] = init_rule_expectation_configuration_builders(
expectation_configuration_builder_configs=rule_config[
"expectation_configuration_builders"
],
data_context=self._data_context,
)
# Compile previous steps and package into a Rule object
return Rule(
name=rule_name,
domain_builder=domain_builder,
parameter_builders=parameter_builders,
expectation_configuration_builders=expectation_configuration_builders,
)
@staticmethod
def _init_rule_domain_builder(
domain_builder_config: dict,
data_context: Optional["DataContext"] = None, # noqa: F821
) -> DomainBuilder:
domain_builder: DomainBuilder = instantiate_class_from_config(
config=domain_builder_config,
runtime_environment={"data_context": data_context},
config_defaults={
"module_name": "great_expectations.rule_based_profiler.domain_builder"
},
)
return domain_builder
@usage_statistics_enabled_method(
event_name="profiler.run",
args_payload_fn=get_profiler_run_usage_statistics,
)
def run(
self,
variables: Optional[Dict[str, Any]] = None,
rules: Optional[Dict[str, Dict[str, Any]]] = None,
batch_list: Optional[List[Batch]] = None,
batch_request: Optional[Union[BatchRequest, RuntimeBatchRequest, dict]] = None,
force_batch_data: bool = False,
reconciliation_directives: ReconciliationDirectives = DEFAULT_RECONCILATION_DIRECTIVES,
expectation_suite: Optional[ExpectationSuite] = None,
expectation_suite_name: Optional[str] = None,
include_citation: bool = True,
) -> ExpectationSuite:
"""
Args:
:param variables attribute name/value pairs (overrides)
:param rules name/(configuration-dictionary) (overrides)
:param batch_list: List of Batch objects used to supply arguments at runtime.
:param batch_request: batch_request used to supply arguments at runtime.
:param force_batch_data: Whether or not to overwrite any existing batch_request value in Builder components.
:param reconciliation_directives directives for how each rule component should be overwritten
:param expectation_suite: An existing ExpectationSuite to update.
:param expectation_suite_name: A name for returned ExpectationSuite.
:param include_citation: Whether or not to include the Profiler config in the metadata for the ExpectationSuite produced by the Profiler
:return: Set of rule evaluation results in the form of an ExpectationSuite
"""
assert not (
expectation_suite and expectation_suite_name
), "Ambiguous arguments provided; you may pass in an ExpectationSuite or provide a name to instantiate a new one (but you may not do both)."
effective_variables: Optional[
ParameterContainer
] = self.reconcile_profiler_variables(
variables=variables,
reconciliation_strategy=reconciliation_directives.variables,
)
effective_rules: List[Rule] = self.reconcile_profiler_rules(
rules=rules, reconciliation_directives=reconciliation_directives
)
effective_rules = self.generate_rule_overrides_from_batch_request(
rules=effective_rules,
batch_list=batch_list,
batch_request=batch_request,
force_batch_data=force_batch_data,
)
if expectation_suite is None:
if expectation_suite_name is None:
expectation_suite_name = f"tmp.profiler_{self.__class__.__name__}_suite_{str(uuid.uuid4())[:8]}"
expectation_suite = ExpectationSuite(
expectation_suite_name=expectation_suite_name,
data_context=self._data_context,
)
if include_citation:
expectation_suite.add_citation(
comment="Suite created by Rule-Based Profiler with the configuration included.",
profiler_config=self._citation,
)
rule: Rule
for rule in effective_rules:
expectation_configurations: List[ExpectationConfiguration] = rule.generate(
variables=effective_variables,
)
expectation_configuration: ExpectationConfiguration
for expectation_configuration in expectation_configurations:
expectation_suite._add_expectation(
expectation_configuration=expectation_configuration,
send_usage_event=False,
match_type="domain",
overwrite_existing=True,
)
return expectation_suite
def add_rule(self, rule: Rule) -> None:
"""
Add Rule object to existing profiler object by reconciling profiler rules and updating _profiler_config.
"""
rules_dict: Dict[str, Dict[str, Any]] = {
rule.name: rule.to_json_dict(),
}
effective_rules: List[Rule] = self.reconcile_profiler_rules(
rules=rules_dict,
reconciliation_directives=ReconciliationDirectives(
domain_builder=ReconciliationStrategy.UPDATE,
parameter_builder=ReconciliationStrategy.UPDATE,
expectation_configuration_builder=ReconciliationStrategy.UPDATE,
),
)
updated_rules: Optional[Dict[str, Dict[str, Any]]] = {
rule.name: rule.to_json_dict() for rule in effective_rules
}
self.rules: List[Rule] = effective_rules
self._profiler_config.rules = updated_rules
def reconcile_profiler_variables(
self,
variables: Optional[Dict[str, Any]] = None,
reconciliation_strategy: ReconciliationStrategy = DEFAULT_RECONCILATION_DIRECTIVES.variables,
) -> Optional[ParameterContainer]:
"""
Profiler "variables" reconciliation involves combining the variables, instantiated from Profiler configuration
(e.g., stored in a YAML file managed by the Profiler store), with the variables overrides, provided at run time.
The reconciliation logic for "variables" is of the "replace" nature: An override value complements the original
on key "miss", and replaces the original on key "hit" (or "collision"), because "variables" is a unique member.
:param variables: variables overrides, supplied in dictionary (configuration) form
:param reconciliation_strategy: one of update, nested_update, or overwrite ways of reconciling overwrites
:return: reconciled variables in their canonical ParameterContainer object form
"""
effective_variables: ParameterContainer
if variables and isinstance(variables, dict):
variables_configs: dict = self._reconcile_profiler_variables_as_dict(
variables=variables, reconciliation_strategy=reconciliation_strategy
)
effective_variables = build_parameter_container_for_variables(
variables_configs=variables_configs
)
else:
effective_variables = self.variables
return effective_variables
def _reconcile_profiler_variables_as_dict(
self,
variables: Optional[Dict[str, Any]],
reconciliation_strategy: ReconciliationStrategy = DEFAULT_RECONCILATION_DIRECTIVES.variables,
) -> dict:
if variables is None:
variables = {}
variables_configs: Optional[Dict[str, Any]] = convert_variables_to_dict(
variables=self.variables
)
if reconciliation_strategy == ReconciliationStrategy.NESTED_UPDATE:
variables_configs = nested_update(
variables_configs,
variables,
)
elif reconciliation_strategy == ReconciliationStrategy.REPLACE:
variables_configs = variables
elif reconciliation_strategy == ReconciliationStrategy.UPDATE:
variables_configs.update(variables)
return variables_configs
def reconcile_profiler_rules(
self,
rules: Optional[Dict[str, Dict[str, Any]]] = None,
reconciliation_directives: ReconciliationDirectives = DEFAULT_RECONCILATION_DIRECTIVES,
) -> List[Rule]:
"""
Profiler "rules" reconciliation involves combining the rules, instantiated from Profiler configuration (e.g.,
stored in a YAML file managed by the Profiler store), with the rules overrides, provided at run time.
The reconciliation logic for "rules" is of the "procedural" nature:
(1) Combine every rule override configuration with any instantiated rule into a reconciled configuration
(2) Re-instantiate Rule objects from the reconciled rule configurations
:param rules: rules overrides, supplied in dictionary (configuration) form for each rule name as the key
:param reconciliation_directives directives for how each rule component should be overwritten
:return: reconciled rules in their canonical List[Rule] object form
"""
effective_rules: Dict[str, Rule] = self._reconcile_profiler_rules_as_dict(
rules, reconciliation_directives
)
return list(effective_rules.values())
def _reconcile_profiler_rules_as_dict(
self,
rules: Optional[Dict[str, Dict[str, Any]]] = None,
reconciliation_directives: ReconciliationDirectives = DEFAULT_RECONCILATION_DIRECTIVES,
) -> Dict[str, Rule]:
if rules is None:
rules = {}
effective_rules: Dict[str, Rule] = self._get_rules_as_dict()
rule_name: str
rule_config: dict
override_rule_configs: Dict[str, Dict[str, Any]] = {
rule_name: RuleBasedProfiler._reconcile_rule_config(
existing_rules=effective_rules,
rule_name=rule_name,
rule_config=rule_config,
reconciliation_directives=reconciliation_directives,
)
for rule_name, rule_config in rules.items()
}
override_rules: Dict[str, Rule] = {
rule_name: self._init_rule(rule_name=rule_name, rule_config=rule_config)
for rule_name, rule_config in override_rule_configs.items()
}
effective_rules.update(override_rules)
return effective_rules
@staticmethod
def _reconcile_rule_config(
existing_rules: Dict[str, Rule],
rule_name: str,
rule_config: dict,
reconciliation_directives: ReconciliationDirectives = DEFAULT_RECONCILATION_DIRECTIVES,
) -> Dict[str, Any]:
"""
A "rule configuration" reconciliation is the process of combining the configuration of a single candidate
override rule with at most one configuration corresponding to the list of rules instantiated from | |
from civicboom.lib.base import *
from cbutils.misc import make_username
from civicboom.controllers.account import AccountController
set_persona = AccountController().set_persona
from civicboom.model.member import Group, GroupMembership, group_member_roles, group_join_mode, group_member_visibility, group_content_visibility, Member
#from civicboom.controllers.contents import _normalize_member # now part of base
from civicboom.controllers.contents import ContentsController
create_content = ContentsController().create
from civicboom.lib.form_validators.dict_overlay import validate_dict
import formencode
from civicboom.lib.form_validators.base import DefaultSchema
from civicboom.lib.form_validators.registration import UniqueUsernameValidator
from civicboom.controllers.settings import SettingsController
import re
from civicboom.lib.communication.email_lib import send_email
settings_update = SettingsController().update
log = logging.getLogger(__name__)
#-------------------------------------------------------------------------------
# Constants
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Form Schema
#-------------------------------------------------------------------------------
# This also appears in Setting Controller
class PrivateGroupValidator(formencode.validators.FancyValidator):
messages = {
'invalid' : x_('Value must be one of: public; private'),
'require_upgrade' : x_('You require a paid account to use this feature, please contact us!'),
}
def _to_python(self, value, state):
if value not in ['public', 'private']:
raise formencode.Invalid(self.message('invalid', state), value, state)
if value == "private" and not c.logged_in_persona.has_account_required('plus'):
raise formencode.Invalid(self.message('require_upgrade', state), value, state)
return value
class GroupSchema(DefaultSchema):
name = formencode.validators.String(max=255, min=4 , not_empty=False)
description = formencode.validators.String(max=4096, min=0 , not_empty=False)
default_role = formencode.validators.OneOf(group_member_roles.enums , not_empty=False)
join_mode = formencode.validators.OneOf(group_join_mode.enums , not_empty=False)
member_visibility = PrivateGroupValidator()
default_content_visibility = PrivateGroupValidator()
class CreateGroupSchema(GroupSchema):
username = UniqueUsernameValidator()
#-------------------------------------------------------------------------------
# Global Functions
#-------------------------------------------------------------------------------
def _gen_username(base):
if Session.query(Member).filter(Member.id==base).count() == 0:
return base
if not re.search(base, "[0-9]$"):
base = base + "2"
while Session.query(Member).filter(Member.id==base).count() > 0:
name, num = re.match("(.*?)([0-9]+)", base).groups()
base = name + str(int(num)+1)
return base
#-------------------------------------------------------------------------------
# Member Search
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Group Controler
#-------------------------------------------------------------------------------
class GroupsController(BaseController):
"""
@doc groups
@title Groups
@desc REST Controller styled on the Atom Publishing Protocol
"""
@web
def index(self, **kwargs):
"""
GET /groups: All groups the current user is a member of
@api groups 1.0 (WIP)
@param * (see common list return controls)
@return 200 - data.list = array of group objects that logged in user is a member including the additional field 'members "role" in the group'
"""
# url('groups')
# member searching?
pass
def members(self, **kwargs):
# AllanC: this was created for two calls
# member -> whats groups they were members of and there roles
# group -> list members and there roles
#
# this had complications because of:
# permissions of the viewing user
# permissions of the group
# the roles need returning (so cant be part of members/index neatly)
#
# UNFINISHED!!!!!!!!!!!! AND BROKEN!
# Setup search criteria
if 'include_fields' not in kwargs:
kwargs['include_fields'] = ""
#if 'status' not in kwargs:
# kwargs['status']
results = Session.query(Member).join(Group, Member, Group.members_roles)
if 'group' in kwargs:
group = normalize_member(kwargs['group'])
results = results.filter(Group.id==group)
if 'member' in kwargs:
member = normalize_member(kwargs['member'])
results = results.filter(Member.id==member)
#results = results.filter(Member.status=='active')
if 'limit' not in kwargs: #Set default limit and offset (can be overfidden by user)
kwargs['limit'] = config['search.default.limit']
if 'offset' not in kwargs:
kwargs['offset'] = 0
results = results.order_by(Member.name.asc())
results = results.limit(kwargs['limit']).offset(kwargs['offset']) # Apply limit and offset (must be done at end)
# Return search results
return action_ok(
data = {'list': [member.to_dict(**kwargs) for member in results.all()]} ,
)
@web
@auth
@role_required('admin')
def create(self, **kwargs):
"""
POST /groups: Create a new group
Creates a new group with the specified username with the currently
logged in user as as administrator of the new group
@api groups 1.0 (WIP)
@param username a unique username, cannot clash with existing usernames
@param name display name
@param description description of groups purpose
@param default_role
admin
editor
contributor
observer
@param join_mode
public
invite
invite_and_request
@param member_visibility
public
private
@param default_content_visibility (plus account required)
public
private
@return 400 data invalid (ie, username that already exisits)
@return 201 group created, data.id = new group id
@return 301 if format redirect specifyed will redirect to show group
"""
create_push_assignment = kwargs.get('create_push_assignment')
if create_push_assignment:
del kwargs['create_push_assignment']
# url('groups') + POST
# if only display name is specified, generate a user name
if not kwargs.get('username') and kwargs.get("name"):
kwargs["username"] = _gen_username(make_username(kwargs.get("name")))
# if only user name is specified, generate a display name
if not kwargs.get('name') and kwargs.get("username"):
kwargs["name"] = kwargs.get("username")
if not c.logged_in_persona.has_account_required('plus'):
if not kwargs.get('member_visibility'):
kwargs['member_visibility'] = 'public'
if not kwargs.get('default_content_visibility'):
kwargs['default_content_visibility'] = 'public'
# Need to validate before creating group, not sure how we could do this via settings controller :S GregM
data = {'settings':kwargs, 'action':'create'}
data = validate_dict(data, CreateGroupSchema(), dict_to_validate_key='settings', template_error='groups/edit')
group_dict = data['settings']
# Create and set group admin here!
group = Group()
group.id = group_dict['username']
group.name = group_dict['name']
group.status = 'active'
group_admin = GroupMembership()
group_admin.member = c.logged_in_persona
group_admin.role = "admin"
group.members_roles.append(group_admin)
group.payment_account = c.logged_in_persona.payment_account # The group is allocated the same payment account as the creator. All groups are free but if they want the plus features like approval and private content then this is needed
# GregM: Dirty hack, again... In demo mode users & groups don't have payment accounts, we need to override the account_type manually
if config['demo_mode']:
group.account_type = c.logged_in_persona.account_type
#AllanC - TODO - limit number of groups a payment account can support - the could be the differnece between plus and corporate
# GregM: Create current user as admin of group too to allow them to admin group (until permission tree is sorted!)
#if isinstance(c.logged_in_persona, Group):
# group_admin_user = GroupMembership()
# group_admin_user.member = c.logged_in_user
# group_admin_user.role = "admin"
# group.members_roles.append(group_admin_user)
Session.add(group)
Session.commit()
# AllanC - Hack
# The group has been created, but the additional feilds handled by the settings controller need to be updated (e.g. description and logo image)
# However, we have not set c.logged_in_persona so the call to the settings controller will not have the permissions for the newly created group
# We fake the login here
# We cant use set_persona as this called the set_persona controller action and calls a redirect
logged_in_persona = c.logged_in_persona # have to remeber previous persona to return to or set_persona below thinks were already swiched and will perform no action
logged_in_persona_role = c.logged_in_persona_role
c.logged_in_persona = group
c.logged_in_persona_role = 'admin'
# AllanC - old call? to be removed?
# self.update(group.username, **kwargs) # Overlay any additional form fields over the new group object using the update method - also intercepts if format is redirect
# Call settings controller to update group settings!
kwargs['panel'] = 'general'
settings_update(group, private=True, **kwargs)
# GregM: Create new request for group (Arrgh, have to fudge the format otherwise we cause a redirect):
format = c.format
if create_push_assignment:
c.format = 'python'
assignment = create_content(type='assignment', private=False, title=_("Send us your stories"), content=_("Join us in making the news by telling us your stories, sending in videos, pictures or audio: Get recognition and get published - share your news with us now!"), format="python")
group.config['push_assignment'] = assignment.get('data', {}).get('id')
c.format = format
c.logged_in_persona = logged_in_persona
c.logged_in_persona_role = logged_in_persona_role
user_log.info("Created Group #%s (%s)" % (group.id, group.name))
# AllanC - Temp email alert for new group
send_email(config['email.event_alert'], subject='new group', content_text='%s - %s by %s' % (c.logged_in_persona.username, c.logged_in_persona.name, c.logged_in_user.username))
# GregM: prompt_aggregate for new group :)
set_persona(group, prompt_aggregate=True) # Will redirect if in html or redirect mode
return action_ok(message=_('group created'), data={'id':group.id}, code=201)
@web
#@auth ? need token?
@authorize
@role_required('admin')
def new(self, **kwargs):
"""
GET /groups/new: Form to create a new item
@return 200 - ???
"""
#url_for('new_group')
##print settings_base
return action_ok( action="create", template='groups/create')
@web
@auth
@role_required('admin')
def update(self, id, **kwargs):
"""
PUT /groups/{id}: Depricated!
"""
# h.form(h.url_for('message', id=ID), method='delete')
# Rather than delete the setting this simple blanks the required fields - or removes the config dict entry
raise action_error(_('operation not supported'), code=501)
group = get_group(id, is_current_persona_admin=True)
group_dict = group.to_dict()
group_dict.update(kwargs)
data = {'group':group_dict, 'action':'edit'}
data = validate_dict(data, GroupSchema(), dict_to_validate_key='group', template_error='groups/edit')
group_dict = data['group']
group.name = group_dict['name']
#group.description = group_dict['description'] GregM: Broke description saving, ARRGHHHHHH!!!!!!!!!
group.default_role = group_dict['default_role']
group.join_mode = group_dict['join_mode']
group.member_visibility = group_dict['member_visibility']
group.default_content_visibility = group_dict.get('default_content_visibility', "public") # Shish: hack
# GregM: call settings_update with logo_file as avatar
# ARRRGHHH: Hacked c.format as settings_update redirects on html
# old_persona = c.logged_in_persona
## GregM DIRTIEST HACK IN HISTORY! OMFG! Works... do not try this at | |
<gh_stars>1-10
from collections import namedtuple
H4 = 'html4' # only in html4
H5 = 'html5' # only in html5
HB = 'both' # allowed in both
N = 'normal' # has a closing tag
E = 'empty' # doesn't have a closing tag
ES = namedtuple('ES', 'tag, standard, type_, info')
ER = namedtuple('ES', 'tag, info')
HTML_ELEMENTS = [
ES("html", HB, N, "Defines an HTML document"),
ES("head", HB, N, "Defines information about the document"),
ES("title", HB, N, "Defines a title for the document"),
ES("body", HB, N, "Defines the document's body"),
ES("h1", HB, N, "Defines HTML heading, rank 1."),
ES("h2", HB, N, "Defines HTML heading, rank 2."),
ES("h3", HB, N, "Defines HTML heading, rank 3."),
ES("h4", HB, N, "Defines HTML heading, rank 4."),
ES("h5", HB, N, "Defines HTML heading, rank 5."),
ES("h6", HB, N, "Defines HTML heading, rank 6."),
ES("p", HB, N, "Defines a paragraph"),
ES("br", HB, E, "Inserts a single line break"),
ES("hr", HB, E, "Defines a thematic change in the content"),
# Formatting
ES("acronym", H4, N, "Not supported in HTML5. Use <abbr> instead. Defines an acronym."),
ES("abbr", HB, N, "Defines an abbreviation or an acronym"),
ES("address", HB, N, "Defines contact information for the author/owner of a document/article"),
ES("b", HB, N, "Defines bold text"),
ES("bdi", H5, N, "Isolates a part of text that might be formatted in a different direction from "
"other text outside it"), # noqa: E501
ES("bdo", HB, N, "Overrides the current text direction"),
ES("big", H4, N, "Not supported in HTML5. Use CSS instead. Defines big text"),
ES("blockquote", HB, N, "Defines a section that is quoted from another source"),
ES("center", H4, N, "Not supported in HTML5. Use CSS instead. Defines centered text."),
ES("cite", HB, N, "Defines the title of a work"),
ES("code", HB, N, "Defines a piece of computer code"),
ES("del", HB, N, "Defines text that has been deleted from a document"),
ES("dfn", HB, N, "Represents the defining instance of a term"),
ES("em", HB, N, "Defines emphasized text "),
ES("font", H4, N, "Not supported in HTML5. Use CSS instead. Defines font, color, and size for text."),
ES("i", HB, N, "Defines a part of text in an alternate voice or mood"),
ES("ins", HB, N, "Defines a text that has been inserted into a document"),
ES("kbd", HB, N, "Defines keyboard input"),
ES("mark", H5, N, "Defines marked/highlighted text"),
ES("meter", HB, N, "Defines a scalar measurement within a known range (a gauge)"),
ES("pre", HB, N, "Defines preformatted text"),
ES("progress", H5, N, "Represents the progress of a task"),
ES("q", HB, N, "Defines a short quotation"),
ES("rp", H5, N, "Defines what to show in browsers that do not support ruby annotations"),
ES("rt", H5, N, "Defines an explanation/pronunciation of characters (for East Asian typography)"),
ES("ruby", H5, N, "Defines a ruby annotation (for East Asian typography)"),
ES("s", HB, N, "Defines text that is no longer correct"),
ES("samp", HB, N, "Defines sample output from a computer program"),
ES("small", HB, N, "Defines smaller text"),
ES("strike", H4, N, "Not supported in HTML5. Use <del> or <s> instead. Defines strikethrough text"),
ES("strong", HB, N, "Defines important text"),
ES("sub", HB, N, "Defines subscripted text"),
ES("sup", HB, N, "Defines superscripted text"),
ES("time", H5, N, "Defines a date/time"),
ES("tt", H4, N, "Not supported in HTML5. Use CSS instead. Defines teletype text."),
ES("u", HB, N, "Defines text that should be stylistically different from normal text"),
ES("var", HB, N, "Defines a variable"),
ES("wbr", H5, E, "Defines a possible line-break"),
# Forms and Input
ES("form", HB, N, "Defines an HTML form for user input"),
ES("input", HB, E, "Defines an input control"),
ES("textarea", HB, N, "Defines a multiline input control (text area)"),
ES("button", HB, N, "Defines a clickable button"),
ES("select", HB, N, "Defines a drop-down list"),
ES("optgroup", HB, N, "Defines a group of related options in a drop-down list"),
ES("option", HB, N, "Defines an option in a drop-down list"),
ES("label", HB, N, "Defines a label for an <input> element"),
ES("fieldset", HB, N, "Groups related elements in a form"),
ES("legend", HB, N, "Defines a caption for a <fieldset> element"),
ES("datalist", H5, N, "Specifies a list of pre-defined options for input controls"),
ES("keygen", H5, E, "Defines a key-pair generator field (for forms)"),
ES("output", H5, N, "Defines the result of a calculation"),
# Frames
ES("frame", H4, N, "Not supported in HTML5. Defines a window (a frame) in a frameset"),
ES("frameset", H4, N, "Not supported in HTML5. Defines a set of frames"),
ES("noframes", H4, N, "Not supported in HTML5. Defines an alternate content for users "
"that do not support frames"),
ES("iframe", HB, N, "Defines an inline frame"),
# Images
ES("img", HB, E, "Defines an image"),
ES("map", HB, N, "Defines a client-side image-map"),
ES("area", HB, E, "Defines an area inside an image-map"),
ES("canvas", H5, N, "Used to draw graphics, on the fly, via scripting (usually JavaScript)"),
ES("figcaption", H5, N, "Defines a caption for a <figure> element"),
ES("figure", H5, N, "Specifies self-contained content"),
ES("picture", H5, N, "Defines a container for multiple image resources"),
# Audio / Video
ES("audio", H5, N, "Defines sound content"),
ES("source", H5, E, "Defines multiple media resources for media elements (<video>, <audio> and <picture>"),
ES("track", H5, E, "Defines text tracks for media elements (<video> and <audio>"),
ES("video", H5, N, "Defines a video or movie"),
# Links
ES("a", HB, N, "Defines a hyperlink"),
ES("link", HB, E, "Defines the relationship between a document and an external resource "
"(most used to link to style sheets)"),
ES("nav", H5, N, "Defines navigation links"),
# Lists
ES("ul", HB, N, "Defines an unordered list"),
ES("ol", HB, N, "Defines an ordered list"),
ES("li", HB, N, "Defines a list item"),
ES("dir", H4, N, "Not supported in HTML5. Use <ul> instead. Defines a directory list"),
ES("dl", HB, N, "Defines a description list"),
ES("dt", HB, N, "Defines a term/name in a description list"),
ES("dd", HB, N, "Defines a description of a term/name in a description list"),
ES("menu", HB, N, "Defines a list/menu of commands"),
ES("menuitem", H5, N, "Defines a command/menu item that the user can invoke from a popup menu"),
# Tables
ES("table", HB, N, "Defines a table"),
ES("caption", HB, N, "Defines a table caption"),
ES("th", HB, N, "Defines a header cell in a table"),
ES("tr", HB, N, "Defines a row in a table"),
ES("td", HB, N, "Defines a cell in a table"),
ES("thead", HB, N, "Groups the header content in a table"),
ES("tbody", HB, N, "Groups the body content in a table"),
ES("tfoot", HB, N, "Groups the footer content in a table"),
ES("col", HB, E, "Specifies column properties for each column within a <colgroup> element"),
ES("colgroup", HB, N, "Specifies a group of one or more columns in a table for formatting"),
# Styles and Semantics
ES("style", HB, N, "Defines style information for a document"),
ES("div", HB, N, "Defines a section in a document"),
ES("span", HB, N, "Defines a section in a document"),
ES("header", H5, N, "Defines a header for a document or section"),
ES("footer", H5, N, "Defines a footer for a document or section"),
ES("main", H5, N, "Specifies the main content of a document"),
ES("section", H5, N, "Defines a section in a document"),
ES("article", H5, N, "Defines an article"),
ES("aside", H5, N, "Defines content aside from the page content"),
ES("details", H5, N, "Defines additional details that the user can view or hide"),
ES("dialog", H5, N, "Defines a dialog box or window"),
ES("summary", H5, N, "Defines a visible heading for a <details> element"),
ES("data", H5, N, "Links the given content with a machine-readable translation"),
# Meta Info
ES("head", HB, N, "Defines information about the document"),
ES("meta", HB, E, "Defines metadata about an HTML document"),
ES("base", HB, E, "Specifies the base URL/target for all relative URLs in a document"),
ES("basefont", H4, N, "Not supported in HTML5. Use CSS instead. Specifies a default color, size, "
"and font for all text in a document"),
# Programming
ES("script", HB, N, "Defines a client-side script"),
ES("noscript", HB, N, "Defines an alternate content for users that do not support client-side scripts"),
ES("applet", H4, | |
type checking removes much of the power of using a
## dynamic language like Python.
## src = EventProcessorParameter(default=None,constant=True,precedence=0.10,doc=
## """The EventProcessor from which messages originate.""")
## dest = EventProcessorParameter(default=None,constant=True,precedence=0.11,doc=
## """The EventProcessor to which messages are delivered.""")
src = param.Parameter(default=None,constant=True,precedence=0.10,doc=
"""The EventProcessor from which messages originate.""")
dest = param.Parameter(default=None,constant=True,precedence=0.11,doc=
"""The EventProcessor to which messages are delivered.""")
src_port = param.Parameter(default=None,precedence=0.20,doc=
"""
Identifier that can be used to distinguish different types of outgoing connections.
EventProcessors that generate only a single type of
outgoing event will typically use a src_port of None. However,
if multiple types of communication are meaningful, the
EventProcessor can accept other values for src_port. It is up
to the src EventProcessor to deliver appropriate data to each
port, and to declare what will be sent over that port.
""")
dest_port = param.Parameter(default=None,precedence=0.21,doc=
"""
Identifier that can be used to distinguish different types of incoming connections.
EventProcessors that accept only a single type of incoming
event will typically use a src_port of None. However, if
multiple types of communication are meaningful, the
EventProcessor can accept other values for dest_port. It is up
to the dest EventProcessor to process the data appropriately
for each port, and to define what is expected to be sent to
that port.
""")
# Should the lower bound be exclusive?
delay = param.Number(default=0.05,bounds=(0,None),doc="""
Simulation time between generation of an Event by the src and delivery to the dest.
Should normally be nonzero, to represent a causal with a well-defined ordering
of events.""")
private = param.Boolean(default=False,doc=
"""Set to true if this connection is for internal use only, not to be manipulated by a user.""")
# CEBALERT: should be reimplemented. It's difficult to understand,
# and contains the same code twice. But it does work.
def remove(self):
"""
Remove this connection from its src's list of out_connections and its
dest's list of in_connections.
"""
# remove from EPs that have this as in_connection
i = 0
to_del = []
for in_conn in self.dest.in_connections:
if in_conn is self:
to_del.append(i)
i+=1
for i in to_del:
del self.dest.in_connections[i]
# remove from EPs that have this as out_connection
i = 0
to_del = []
for out_conn in self.src.out_connections:
if out_conn is self:
to_del.append(i)
i+=1
for i in to_del:
del self.src.out_connections[i]
def script_repr(self,imports=[],prefix=" "):
"""Generate a runnable command for creating this connection."""
if self.private:
return ""
settings=[]
for name,val in self.get_param_values():
try: # There may be a better way to figure out which parameters specify classes
if issubclass(val,object):
rep=val.__name__
# Generate import statement
cls = val.__name__
mod = val.__module__
imports.append("from %s import %s" % (mod,cls))
except TypeError:
if name=="src" or name=="dest":
rep=None
else:
rep = parameterized.script_repr(val,imports,prefix,settings)
if rep is not None:
settings.append('%s=%s' % (name,rep))
# add import statement
cls = self.__class__.__name__
mod = self.__module__
imports.append("from %s import %s" % (mod,cls))
return _simulation_path+".connect('"+self.src.name+"','"+self.dest.name+ \
"',connection_type="+self.__class__.__name__+ \
",\n"+prefix+(",\n"+prefix).join(settings) + ")"
# CB: event is not a Parameterized because of a (small) performance hit.
class Event(object):
"""Hierarchy of classes for storing simulation events of various types."""
def __init__(self,time):
self.time = time
def __call__(self,sim):
"""
Cause some computation to be performed, deliver a message, etc.,
as appropriate for each subtype of Event. Should be passed the
simulation object, to allow access to .time() etc.
"""
raise NotImplementedError
def __cmp__(self,ev):
"""
Implements event comparison by time, allowing sorting,
and queue maintenance using bisect module or minheap
implementations, if needed.
NOTE: identity comparisons should always be done using the
'is' operator, not '=='.
"""
if self.time > ev.time:
return 1
elif self.time < ev.time:
return -1
else:
return 0
class EPConnectionEvent(Event):
"""
An Event for delivery to an EPConnection.
Provides access to a data field, which can be used for anything
the src wants to provide, and a link to the connection over which
it has arrived, so that the dest can determine what to do with the
data.
By default, the data is deepcopied before being added to this
instance for safety (e.g. so that future changes to data
structures do not affect messages arriving from the past).
However, if you can ensure that the copying is not
necessary (e.g. if you deepcoy before sending a set of
identical messages), then you can pass deep_copy=False
to avoid the copy.
"""
def __init__(self,time,conn,data=None,deep_copy=True):
super(EPConnectionEvent,self).__init__(time)
assert isinstance(conn,EPConnection)
self.data = deepcopy(data) if deep_copy else data
self.conn = conn
def __call__(self,sim):
self.conn.dest.input_event(self.conn,self.data)
def __repr__(self):
return "EPConnectionEvent(time="+`self.time`+",conn="+`self.conn`+")"
class CommandEvent(Event):
"""An Event consisting of a command string to execute."""
def __init__(self,time,command_string):
"""
Add the event to the simulation.
Raises an exception if the command_string contains a syntax
error.
"""
self.command_string = command_string
self.__test()
super(CommandEvent,self).__init__(time)
def __repr__(self):
return "CommandEvent(time="+`self.time`+", command_string='"+self.command_string+"')"
def script_repr(self,imports=[],prefix=" "):
"""Generate a runnable command for creating this CommandEvent."""
return _simulation_path+'.schedule_command('\
+`self.time`+',"'+self.command_string+'")'
def __call__(self,sim):
"""
exec's the command_string in __main__.__dict__.
Be sure that any required items will be present in
__main__.__dict__; in particular, consider what will be present
after the network is saved and restored. For instance, results of
scripts you have run, or imports they make---all currently
available in __main__.__dict__---will not be saved with the
network.
"""
# Presumably here to avoid importing __main__ into the rest of the file
import __main__
param.Parameterized(name='CommandEvent').message("Running command %s",
self.command_string)
try:
exec self.command_string in __main__.__dict__
except:
print "Error in scheduled command:"
raise
def __test(self):
"""
Check for SyntaxErrors in the command.
"""
try:
compile(self.command_string,"CommandString","single")
except SyntaxError:
print "Error in scheduled command:"
raise
class FunctionEvent(Event):
"""
Event that executes a given function function(*args,**kw).
"""
def __init__(self,time,fn,*args,**kw):
super(FunctionEvent,self).__init__(time)
self.fn = fn
self.args = args
self.kw = kw
def __call__(self,sim):
self.fn(*self.args,**self.kw)
def __repr__(self):
return 'FunctionEvent(%s,%s,*%s,**%s)' % (`self.time`,`self.fn`,`self.args`,`self.kw`)
class EventSequence(Event):
"""
Event that contains a sequence of other events to be scheduled and
executed.
The .time attributes of the events in the sequence are interpreted
as offsets relative to the start time of the sequence itself.
"""
def __init__(self,time,sequence):
super(EventSequence,self).__init__(time)
self.sequence = sequence
def __call__(self,sim):
# Enqueue all the events in the sequence, offsetting their
# times from the current time
sched_time = sim.time()
for ev in self.sequence:
new_ev = copy(ev)
sched_time += ev.time
new_ev.time = sched_time
sim.enqueue_event(new_ev)
def __repr__(self):
return 'EventSequence(%s,%s)' % (`self.time`,`self.sequence`)
class PeriodicEventSequence(EventSequence):
"""
An EventSequence that reschedules itself periodically
Takes a period argument that determines how often the sequence
will be scheduled. If the length of the sequence is longer than
the period, then the length of the sequence will be used as the period.
"""
## JPHACKALERT: This should really be refactored into a
## PeriodicEvent class that periodically executes a single event,
## then the user can construct a periodic sequence using a
## combination of PeriodicEvent and EventSequence. This would
## change the behavior if the sequence length is longer than the
## period, but I'm not sure how important that is, and it might
## actually be useful the other way.
def __init__(self,time,period,sequence):
super(PeriodicEventSequence,self).__init__(time,sequence)
self.period = period
def __call__(self,sim):
super(PeriodicEventSequence,self).__call__(sim)
# Find the timed length of the sequence
seq_length = sum(e.time for e in self.sequence)
if seq_length < self.period:
# If the sequence is shorter than the period, then reschedule
# the sequence to occur again after the period
self.time += self.period
else:
# If the sequence is longer than the period, then
# reschedule to start after the sequence ends.
self.time += seq_length
sim.enqueue_event(self)
def __repr__(self):
return 'PeriodicEventSequence(%s,%s,%s)' % (`self.time`,`self.period`,`self.sequence`)
# CB: code that previously existed in various places now collected
# together. The original timing code was not properly tested, and the
# current code has not been tested either: needs writing cleanly and
# testing. This whole class is pretty difficult to follow.
#
### JP: Is it possible that some or all of this can be more cleanly
### implemented using PeriodicEvents?
from math import floor
class SomeTimer(param.Parameterized):
"""
Provides a countdown timer for functions that run repeatedly.
There are two distinct ways to use the timer.
The first, via call_and_time(), is for calling some function every
specified number of steps for a specified duration. Currently
call_and_time() is | |
only one of new_left and new_right will necessarily be a
bracket, but index_of_bracket_char will definitely be a bracket.
"""
expanded = False
orig_left = left
orig_right = right
while (s[left] not in self.brackets or expand and not expanded) and \
(s[right] not in self.brackets or expand and not expanded) and \
(left > 0 or right < max_right):
expanded = False
if left > 0:
left -= 1
if s[left] in self.brackets:
other = self.find_matching_bracket(s[left], s, left)
if other is not None and other >= orig_right:
expanded = 'left'
if right < max_right:
right += 1
if s[right] in self.brackets:
other = self.find_matching_bracket(s[right], s, right)
if other is not None and other <= orig_left:
expanded = 'right'
if s[left] in self.brackets and (not expand or expanded == 'left'):
return left, right, s[left], left
if s[right] in self.brackets and (not expand or expanded == 'right'):
return left, right, s[right], right
return None, None, None, None
#@+node:ekr.20061113221414: *4* mb.find_matching_bracket
def find_matching_bracket(self, ch1, s, i):
'''Find the bracket matching s[i] for self.language.'''
self.forward = ch1 in self.open_brackets
# Find the character matching the initial bracket.
for n in range(len(self.brackets)):
if ch1 == self.brackets[n]:
target = self.matching_brackets[n]
break
else:
return None
f = self.scan if self.forward else self.scan_back
return f(ch1, target, s, i)
#@+node:ekr.20160121164556.1: *4* mb.scan & helpers
def scan(self, ch1, target, s, i):
'''Scan forward for target.'''
level = 0
while 0 <= i < len(s):
progress = i
ch = s[i]
if ch in '"\'':
# Scan to the end/beginning of the string.
i = self.scan_string(s, i)
elif self.starts_comment(s, i):
i = self.scan_comment(s, i)
elif ch == '/' and self.is_regex(s, i):
i = self.scan_regex(s, i)
elif ch == ch1:
level += 1
i += 1
elif ch == target:
level -= 1
if level <= 0:
return i
i += 1
else:
i += 1
assert i > progress
# Not found
return None
#@+node:ekr.20160119090634.1: *5* mb.scan_comment
def scan_comment(self, s, i):
'''Return the index of the character after a comment.'''
i1 = i
start = self.start_comment if self.forward else self.end_comment
end = self.end_comment if self.forward else self.start_comment
offset = 1 if self.forward else - 1
if g.match(s, i, start):
if not self.forward:
i1 += len(end)
i += offset
while 0 <= i < len(s):
if g.match(s, i, end):
i = i + len(end) if self.forward else i - 1
return i
i += offset
self.oops('unmatched multiline comment')
elif self.forward:
# Scan to the newline.
target = '\n'
while 0 <= i < len(s):
if s[i] == '\n':
i += 1
return i
i += 1
else:
# Careful: scan to the *first* target on the line
target = self.single_comment
found = None
i -= 1
while 0 <= i < len(s) and s[i] != '\n':
if g.match(s, i, target):
found = i
i -= 1
if found is None:
self.oops('can not happen: unterminated single-line comment')
found = 0
return found
return i
#@+node:ekr.20160119101851.1: *5* mb.starts_comment
def starts_comment(self, s, i):
'''Return True if s[i] starts a comment.'''
assert 0 <= i < len(s)
if self.forward:
if self.single_comment and g.match(s, i, self.single_comment):
return True
return (
self.start_comment and self.end_comment and
g.match(s, i, self.start_comment)
)
if s[i] == '\n':
if self.single_comment:
# Scan backward for any single-comment delim.
i -= 1
while i >= 0 and s[i] != '\n':
if g.match(s, i, self.single_comment):
return True
i -= 1
return False
return (
self.start_comment and self.end_comment and
g.match(s, i, self.end_comment)
)
#@+node:ekr.20160119230141.1: *4* mb.scan_back & helpers
def scan_back(self, ch1, target, s, i):
'''Scan backwards for delim.'''
level = 0
while i >= 0:
progress = i
ch = s[i]
if self.ends_comment(s, i):
i = self.back_scan_comment(s, i)
elif ch in '"\'':
# Scan to the beginning of the string.
i = self.scan_string(s, i)
elif ch == '/' and self.is_regex(s, i):
i = self.scan_regex(s, i)
elif ch == ch1:
level += 1
i -= 1
elif ch == target:
level -= 1
if level <= 0:
return i
i -= 1
else:
i -= 1
assert i < progress
# Not found
return None
#@+node:ekr.20160119230141.2: *5* mb.back_scan_comment
def back_scan_comment(self, s, i):
'''Return the index of the character after a comment.'''
i1 = i
if g.match(s, i, self.end_comment):
i1 += len(self.end_comment) # For traces.
i -= 1
while i >= 0:
if g.match(s, i, self.start_comment):
i -= 1
return i
i -= 1
self.oops('unmatched multiline comment')
return i
# Careful: scan to the *first* target on the line
found = None
i -= 1
while i >= 0 and s[i] != '\n':
if g.match(s, i, self.single_comment):
found = i-1
i -= 1
if found is None:
self.oops('can not happen: unterminated single-line comment')
found = 0
return found
#@+node:ekr.20160119230141.4: *5* mb.ends_comment
def ends_comment(self, s, i):
'''
Return True if s[i] ends a comment. This is called while scanning
backward, so this is a bit of a guess.
'''
if s[i] == '\n':
# This is the hard (dubious) case.
# Let w, x, y and z stand for any strings not containg // or quotes.
# Case 1: w"x//y"z Assume // is inside a string.
# Case 2: x//y"z Assume " is inside the comment.
# Case 3: w//x"y"z Assume both quotes are inside the comment.
#
# That is, we assume (perhaps wrongly) that a quote terminates a
# string if and *only* if the string starts *and* ends on the line.
if self.single_comment:
# Scan backward for single-line comment delims or quotes.
quote = None
i -= 1
while i >= 0 and s[i] != '\n':
progress = i
if quote and s[i] == quote:
quote = None
i -= 1
elif s[i] in '"\'':
if not quote:
quote = s[i]
i -= 1
elif g.match(s, i, self.single_comment):
# Assume that there is a comment only if the comment delim
# isn't inside a string that begins and ends on *this* line.
if quote:
while i >= 0 and s[i] != 'n':
if s[i] == quote:
return False
i -= 1
return True
else:
i -= 1
assert progress > i
return False
return (
self.start_comment and
self.end_comment and
g.match(s, i, self.end_comment))
#@+node:ekr.20160119104148.1: *4* mb.oops
def oops(self, s):
'''Report an error in the match-brackets command.'''
g.es(s, color='red')
#@+node:ekr.20160119094053.1: *4* mb.run
#@@nobeautify
def run(self):
'''The driver for the MatchBrackets class.
With no selected range, find the nearest bracket and select from
it to it's match, moving cursor to mathc. With selected range, the
first time, move cursor back to other end of range. The second time,
select enclosing range.
'''
#
# A partial fix for bug 127: Bracket matching is buggy.
w = self.c.frame.body.wrapper
s = w.getAllText()
_mb = self.c.user_dict['_match_brackets']
sel_range = w.getSelectionRange()
if not w.hasSelection():
_mb['count'] = 1
if _mb['range'] == sel_range and _mb['count'] == 1:
# haven't been to other end yet
_mb['count'] += 1
# move insert point to other end of selection
insert = 1 if w.getInsertPoint() == sel_range[0] else 0
w.setSelectionRange(sel_range[0], sel_range[1], insert=sel_range[insert])
return
# find bracket nearest cursor
max_right = len(s) - 1 # insert point can be past last char.
left = right = min(max_right, w.getInsertPoint())
left, right, ch, index = self.expand_range(s, left, right, max_right)
if left is None:
g.es("Bracket not found")
return
index2 = self.find_matching_bracket(ch, s, index)
# if this is the first time we've selected the range index-index2, do
# nothing extra. The second time, move cursor to other end (requires
# no special action here), and the third time, try to expand the range
# to any enclosing brackets
minmax = (min(index, index2), max(index, index2)+1)
# the range, +1 to match w.getSelectionRange()
if _mb['range'] == minmax: # count how many times this has been the answer
_mb['count'] += 1
else:
_mb['count'] = 1
_mb['range'] = minmax
if _mb['count'] >= 3: # try to expand range
left, right, ch, | |
#!/usr/bin/python
# Author: <NAME> <<EMAIL>>
# Imports
import os
import optparse
import rospkg
from yamlUtils import *
from termcolor import colored
from collections import namedtuple
# Setup
rospack = rospkg.RosPack()
# Types
ConfigVar = namedtuple("ConfigVar", "path name value")
ConfigDiff = namedtuple("ConfigDiff", "path name srcValue dstValue diff")
class DiffResult:
def __init__(self): pass
Equal, SrcOnly, DstOnly, Changed = range(4)
# Main function
def main():
# Default config directory
defaultDir = os.path.join(rospack.get_path('launch'), 'config')
# Parse the arguments
parser = optparse.OptionParser()
parser.add_option('-b', '--basepath', type = "string", dest = "basepath", help = "Base path", default = defaultDir)
parser.add_option('-s', '--src', type = "string", dest = "src", help = "Source config file")
parser.add_option('-d', '--dst', type = "string", dest = "dst", help = "Destination config file")
parser.add_option('-e', '--element', type = "string", dest = "element", help = "Element in the config file to compare", default = "/")
options, args = parser.parse_args()
# Process the arguments
if not options.src:
parser.error("Source config file not specified!")
if not options.dst:
parser.error("Destination config file not specified!")
basepath = options.basepath.rstrip('/')
srcFilename = options.src
dstFilename = options.dst
srcPath = basepath + "/" + srcFilename
dstPath = basepath + "/" + dstFilename
element = options.element
# Display which configs are being compared
print
print "COMPARE CONFIG FILES:"
print colored("Src: " + srcPath, "yellow")
print colored("Dst: " + dstPath, "yellow")
# Check whether the files exist
if not os.path.isfile(srcPath):
print
error("Source config file not found!", True)
if not os.path.isfile(dstPath):
print
error("Destination config file not found!", True)
# Load the source and destination config files
srcData = readYAML(srcPath)
dstData = readYAML(dstPath)
# Retrieve the yaml nodes to compare
elementComponents = pathToComponents(element)
element = componentsToPath(elementComponents)
elementPrefix = "/" + element if element else element
if elementComponents:
srcNode = getNodeByComponents(srcData, elementComponents)
if (srcNode is None) or (not type(srcNode) is dict):
error("Source config file does not contain the specified element to compare!", True)
dstNode = getNodeByComponents(dstData, elementComponents)
if (dstNode is None) or (not type(dstNode) is dict):
error("Destination config file does not contain the specified element to compare!", True)
else:
srcNode = srcData
dstNode = dstData
# Display which element of the config files is being compared
if element:
print "Element: /" + element
else:
print "Element: <all>"
print
# Parse the source and destination node trees into lists and sort them (important!)
srcConfigs = listOfLeafNodes(srcNode)
srcConfigs.sort(key = lambda tmp: tmp.path)
dstConfigs = listOfLeafNodes(dstNode)
dstConfigs.sort(key = lambda tmp: tmp.path)
# Merge and diff the source and destination configs
configs = []
srcIndex = 0
dstIndex = 0
while True:
haveSrc = (srcIndex < len(srcConfigs))
haveDst = (dstIndex < len(dstConfigs))
srcConfig = srcConfigs[srcIndex] if haveSrc else None
dstConfig = dstConfigs[dstIndex] if haveDst else None
if not haveSrc and not haveDst:
break
elif haveSrc and not haveDst:
configs.append(ConfigDiff(srcConfig.path, srcConfig.name, srcConfig.value, None, DiffResult.SrcOnly))
srcIndex += 1
elif haveDst and not haveSrc:
configs.append(ConfigDiff(dstConfig.path, dstConfig.name, None, dstConfig.value, DiffResult.DstOnly))
dstIndex += 1
else:
if srcConfig.path == dstConfig.path:
diffResult = DiffResult.Equal if srcConfig.value == dstConfig.value else DiffResult.Changed
configs.append(ConfigDiff(srcConfig.path, srcConfig.name, srcConfig.value, dstConfig.value, diffResult))
srcIndex += 1
dstIndex += 1
elif srcConfig.path < dstConfig.path:
configs.append(ConfigDiff(srcConfig.path, srcConfig.name, srcConfig.value, None, DiffResult.SrcOnly))
srcIndex += 1
else:
configs.append(ConfigDiff(dstConfig.path, dstConfig.name, None, dstConfig.value, DiffResult.DstOnly))
dstIndex += 1
# Print a guide for interpreting the coming diff
print "GUIDE:"
print colored("Config in " + srcFilename + " only (source)", "green")
print colored("Config in " + dstFilename + " only (destination)", "red")
print colored("Config value different between " + srcFilename + " and " + dstFilename, "cyan")
print
# Print the calculated diff between source and destination
identical = True
printSeparator()
print "DIFF RESULTS: (" + colored(srcFilename, "yellow") + " ==> " + colored(dstFilename, "yellow") + ")"
for config in configs:
printConfigDiff(config, elementPrefix)
if config.diff != DiffResult.Equal:
identical = False
if identical:
print colored("Everything is equal!", "green")
print
print "Nothing more to do..."
print
return
print
# Give some options to the user to take action
printSeparator()
print "PERFORM AN ACTION:"
print " (a) Add all " + colored("new", "green") + " configs to " + colored(dstFilename, "yellow")
print " (r) Remove all " + colored("old", "red") + " configs from " + colored(dstFilename, "yellow")
print " (u) Update all " + colored("new", "green") + "/" + colored("old", "red") + " configs in " + colored(dstFilename, "yellow")
print " (m) Mirror all " + colored("new", "green") + "/" + colored("old", "red") + "/" + colored("changed", "cyan") + " configs to " + colored(dstFilename, "yellow")
print " (c) Perform a custom merge"
print " (q) Quit (default)"
while True:
# Query the user for a selection
userChoice = raw_input("Choice? ")
# Perform the selected action
touchedSrc = False
touchedDst = False
if userChoice == "a":
print
print colored("Writing to " + dstFilename + ":", "yellow")
performAdd(configs, dstNode, elementPrefix)
touchedDst = True
print
elif userChoice == "r":
print
print colored("Writing to " + dstFilename + ":", "yellow")
performRemove(configs, dstNode, elementPrefix)
touchedDst = True
print
elif userChoice == "u":
print
print colored("Writing to " + dstFilename + ":", "yellow")
performAdd(configs, dstNode, elementPrefix)
performRemove(configs, dstNode, elementPrefix)
touchedDst = True
print
elif userChoice == "m":
print
print colored("Writing to " + dstFilename + ":", "yellow")
performAdd(configs, dstNode, elementPrefix)
performRemove(configs, dstNode, elementPrefix)
performChange(configs, dstNode, elementPrefix)
touchedDst = True
print
elif userChoice == "c":
print
print colored("Writing to " + srcFilename + " and/or " + dstFilename + " as required:", "yellow")
touchedSrc, touchedDst = performCustomMerge(configs, srcNode, dstNode, elementPrefix, srcFilename, dstFilename)
print
elif userChoice == "q" or not userChoice:
print
else:
continue
# Write the source and destination data to disk if they have been modified
if touchedSrc:
writeYAML(srcPath, srcData)
print colored("Saved all changes to the source file " + srcFilename + "!", "yellow")
else:
print "The source file " + srcFilename + " was not modified!"
if touchedDst:
writeYAML(dstPath, dstData)
print colored("Saved all changes to the destination file " + dstFilename + "!", "yellow")
else:
print "The destination file " + dstFilename + " was not modified!"
print
break
# Add all new configs from the source to the destination
def performAdd(configs, dstNode, elementPrefix):
# Add the appropriate configs
numAdded = 0
for config in configs:
if config.diff != DiffResult.SrcOnly:
continue
if writeNodeByPath(dstNode, config.path, config.srcValue):
numAdded += 1
else:
warning("Failed to add " + elementPrefix + config.path + " with value " + config.srcValue + "!")
if elementPrefix:
print colored("Added " + str(numAdded) + " new config(s) to " + elementPrefix, "green")
else:
print colored("Added " + str(numAdded) + " new config(s)", "green")
# Remove all old configs in the destination that do not exist in the source
def performRemove(configs, dstNode, elementPrefix):
# Remove the appropriate configs
numRemoved = 0
for config in configs:
if config.diff != DiffResult.DstOnly:
continue
if removeNodeByPath(dstNode, config.path):
numRemoved += 1
else:
warning("Failed to remove " + elementPrefix + config.path + "!")
if elementPrefix:
print colored("Removed " + str(numRemoved) + " old config(s) from " + elementPrefix, "red")
else:
print colored("Removed " + str(numRemoved) + " old config(s)", "red")
# Update all configs in the destination that have a different value than the source
def performChange(configs, dstNode, elementPrefix):
# Change the appropriate configs
numChanged = 0
for config in configs:
if config.diff != DiffResult.Changed:
continue
if writeNodeByPath(dstNode, config.path, config.srcValue):
numChanged += 1
else:
warning("Failed to change " + elementPrefix + config.path + " to value " + config.srcValue + "!")
if elementPrefix:
print colored("Changed " + str(numChanged) + " config(s) in " + elementPrefix, "cyan")
else:
print colored("Changed " + str(numChanged) + " config(s)", "cyan")
# Perform a custom merge based on the calculated diff
def performCustomMerge(configs, srcNode, dstNode, elementPrefix, srcFilename, dstFilename):
# Initialise how many actions of each type were performed
numAddedSrc = 0
numAddedDst = 0
numRemovedSrc = 0
numRemovedDst = 0
numUpdatedSrc = 0
numUpdatedDst = 0
numNothing = 0
# Print a guide for the response options for each diff
print "For each diff item you will have, where applicable, the following options:"
print " (A) Add an " + colored("old", "red") + " config back into the source file " + colored(srcFilename, "yellow")
print " (R) Remove a " + colored("new", "green") + " config from the source file " + colored(srcFilename, "yellow")
print " (U) Update a " + colored("changed", "cyan") + " config in the source file " + colored(srcFilename, "yellow")
print " (a) Add a " + colored("new", "green") + " config to the destination file " + colored(dstFilename, "yellow")
print " (r) Remove an " + colored("old", "red") + " config from the destination file " + colored(dstFilename, "yellow")
print " (u) Update a " + colored("changed", "cyan") + " config in the destination file " + colored(dstFilename, "yellow")
print " (n) Do nothing (default)"
print " (q) Quit"
print
# Print that the custom merge is starting
printSeparator()
# Count how many diff items there are
diffCount = 0
for config in configs:
if config.diff != DiffResult.SrcOnly and config.diff != DiffResult.DstOnly and config.diff != DiffResult.Changed:
continue
diffCount += 1
# Go through each diff item and prompt the user for an action
index = 0
for config in configs:
# Ignore diff items that are equal
if config.diff != DiffResult.SrcOnly and config.diff != DiffResult.DstOnly and config.diff != DiffResult.Changed:
continue
index += 1
# Print the diff item
print "Diff item (" + str(index) + " / " + str(diffCount) + ")"
printConfigDiff(config, elementPrefix)
fullPath = elementPrefix + "/" + config.path
# Query and perform the required action
shouldQuit = False
if config.diff == DiffResult.SrcOnly:
while not shouldQuit:
userChoice = raw_input("Action (R / a / n)? ")
if userChoice == "a":
if writeNodeByPath(dstNode, config.path, config.srcValue):
print colored("Added " | |
<reponame>isi-vista/adam
from itertools import chain
from typing import Sequence, Optional, Iterable
from immutablecollections import immutableset
from more_itertools import flatten
from adam.language.language_generator import LanguageGenerator
from adam.language.dependency import LinearizedDependencyTree
from adam.ontology import OntologyNode
from adam.curriculum.curriculum_utils import (
Phase1InstanceGroup,
CHOOSER_FACTORY,
phase1_instances,
standard_object,
learner_template_factory,
make_noise_objects,
)
from adam.language_specific import MASS_NOUN
from adam.language.dependency.universal_dependencies import NOUN
from adam.ontology.phase2_ontology import gravitationally_aligned_axis_is_largest
from adam.ontology import IS_SPEAKER, IS_ADDRESSEE
from adam.curriculum.phase1_curriculum import (
make_pass_template,
throw_on_ground_template,
throw_template,
throw_up_down_template,
throw_to_template,
bare_move_template,
transitive_move_template,
make_jump_template,
intransitive_roll,
transitive_roll_with_surface,
transitive_roll,
bare_fly,
fall_on_ground_template,
falling_template,
make_take_template,
make_push_templates,
make_walk_run_template,
)
from adam.language_specific.english.english_language_generator import (
USE_ADVERBIAL_PATH_MODIFIER,
)
from adam.ontology import THING
from adam.ontology.phase1_ontology import (
GAILA_PHASE_1_ONTOLOGY,
ANIMATE,
INANIMATE,
BOX,
FAST,
HARD_FORCE,
SOFT_FORCE,
SLOW,
SELF_MOVING,
CAN_JUMP,
ROLLABLE,
CAN_HAVE_THINGS_RESTING_ON_THEM,
BIRD,
bigger_than,
EAT,
AGENT,
PATIENT,
COOKIE,
PERSON,
WATERMELON,
TOWARD,
AWAY_FROM,
MOM,
LEARNER,
DOG,
BABY,
DAD,
CHAIR,
TABLE,
THEME,
SPIN,
HEAD,
HAND,
GROUND,
NONHUMAN_ANIMAL,
)
from adam.situation import Action, SituationObject
from adam.situation.high_level_semantics_situation import HighLevelSemanticsSituation
from adam.situation.templates.phase1_templates import (
sampled,
TemplateObjectVariable,
Phase1SituationTemplate,
)
from adam.language_specific.english.english_phase_1_lexicon import (
GAILA_PHASE_1_ENGLISH_LEXICON,
)
BOOL_SET = immutableset([True, False])
# easy hack to get all nouns that aren't recognized particulars, body parts, or mass nouns -- i.e. the ones that can be big or small
NODES_TO_CHOOSE_FROM = [
x[0]
for x in GAILA_PHASE_1_ENGLISH_LEXICON._ontology_node_to_word.items() # pylint:disable=protected-access
if x[1].part_of_speech in [NOUN]
and MASS_NOUN not in x[1].properties
and x[0] not in [BABY, HEAD, HAND, GROUND, NONHUMAN_ANIMAL, PERSON]
]
# differentiate between the nodes that can be modified with tall and those that can't
TALL_ELIGIBLE_NODES = [
node
for node in NODES_TO_CHOOSE_FROM
if gravitationally_aligned_axis_is_largest(node, GAILA_PHASE_1_ONTOLOGY)
]
BIG_ELIGIBLE_NODES = [
node for node in NODES_TO_CHOOSE_FROM if node not in TALL_ELIGIBLE_NODES
]
CHOOSER = CHOOSER_FACTORY()
def make_eat_big_small_curriculum( # pylint: disable=unused-argument
num_samples: Optional[int],
noise_objects: Optional[int],
language_generator: LanguageGenerator[
HighLevelSemanticsSituation, LinearizedDependencyTree
],
) -> Phase1InstanceGroup:
# "Mom eats a big cookie"
# We generate situations directly since templates fail to generate plurals.
learner = SituationObject.instantiate_ontology_node(
ontology_node=LEARNER,
debug_handle=LEARNER.handle,
ontology=GAILA_PHASE_1_ONTOLOGY,
)
situations = []
for eater_ontology_node in [MOM, DAD, BABY, DOG]:
eater = SituationObject.instantiate_ontology_node(
ontology_node=eater_ontology_node,
debug_handle=eater_ontology_node.handle,
ontology=GAILA_PHASE_1_ONTOLOGY,
)
for _object in [COOKIE, WATERMELON]:
object_to_eat = SituationObject.instantiate_ontology_node(
ontology_node=_object,
debug_handle=_object.handle + "_salient",
ontology=GAILA_PHASE_1_ONTOLOGY,
)
object_to_eat2 = SituationObject.instantiate_ontology_node(
ontology_node=_object,
debug_handle=_object.handle + "_non_salient",
ontology=GAILA_PHASE_1_ONTOLOGY,
)
other_edibles = [
SituationObject.instantiate_ontology_node(
ontology_node=_object,
debug_handle=_object.handle + f"_{i}",
ontology=GAILA_PHASE_1_ONTOLOGY,
)
for i in range(3)
]
computed_background = [learner]
computed_background.extend(other_edibles)
computed_background.extend([object_to_eat2])
# Big
for relation_list in [
[
bigger_than(object_to_eat, object_to_eat2),
bigger_than(object_to_eat, other_edibles),
],
[
bigger_than(object_to_eat2, object_to_eat),
bigger_than(other_edibles, object_to_eat),
],
]:
situations.append(
HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[eater, object_to_eat],
other_objects=computed_background,
actions=[
Action(
EAT,
argument_roles_to_fillers=[
(AGENT, eater),
(PATIENT, object_to_eat),
],
)
],
always_relations=relation_list,
)
)
return phase1_instances(
"Big - Small Curriculum", situations, language_generator=language_generator
)
def _tall_x_template(
background: Iterable[TemplateObjectVariable],
random_node: OntologyNode = CHOOSER.choice(TALL_ELIGIBLE_NODES),
) -> Phase1SituationTemplate:
# hack to pick a random node that will yield "tall"
theme1 = standard_object("theme1", random_node)
theme2 = standard_object("theme2", random_node)
computed_background = [theme2]
computed_background.extend(background)
return Phase1SituationTemplate(
f"tall-{theme1.handle}",
salient_object_variables=[theme1],
background_object_variables=computed_background,
asserted_always_relations=[bigger_than(theme1, theme2)],
gazed_objects=[theme1],
)
def _big_x_template(
background: Iterable[TemplateObjectVariable],
random_node: OntologyNode = CHOOSER.choice(BIG_ELIGIBLE_NODES),
) -> Phase1SituationTemplate:
# hack to pick a random node that will yield "big"
theme1 = standard_object("theme1", random_node)
theme2 = standard_object("theme2", random_node)
computed_background = [theme2, learner_template_factory()]
computed_background.extend(background)
return Phase1SituationTemplate(
f"big-{theme1.handle}",
salient_object_variables=[theme1],
background_object_variables=computed_background,
asserted_always_relations=[bigger_than(theme1, theme2)],
gazed_objects=[theme1],
)
def _little_x_template(
background: Iterable[TemplateObjectVariable],
random_node: OntologyNode = CHOOSER.choice(BIG_ELIGIBLE_NODES),
) -> Phase1SituationTemplate:
# hack to pick a random node that will yield "little"
theme1 = standard_object("theme1", random_node)
theme2 = standard_object("theme2", random_node)
computed_background = [theme2]
computed_background.extend(background)
return Phase1SituationTemplate(
f"little-{theme1.handle}",
salient_object_variables=[theme1],
background_object_variables=computed_background,
asserted_always_relations=[bigger_than(theme2, theme1)],
gazed_objects=[theme1],
)
def _short_x_template(
background: Iterable[TemplateObjectVariable],
random_node: OntologyNode = CHOOSER.choice(TALL_ELIGIBLE_NODES),
) -> Phase1SituationTemplate:
# hack to pick a random node that will yield "short"
theme1 = standard_object("theme1", random_node)
theme2 = standard_object("theme2", random_node)
computed_background = [theme2]
computed_background.extend(background)
return Phase1SituationTemplate(
f"short-{theme1.handle}",
salient_object_variables=[theme1],
background_object_variables=computed_background,
asserted_always_relations=[bigger_than(theme2, theme1)],
gazed_objects=[theme1],
)
def make_spin_tall_short_curriculum(
# TODO: Refactor this curriculum
# See: https://github.com/isi-vista/adam/issues/898
# pylint: disable=unused-argument
num_samples: Optional[int],
noise_objects: Optional[int],
language_generator: LanguageGenerator[
HighLevelSemanticsSituation, LinearizedDependencyTree
],
) -> Phase1InstanceGroup:
# "Mom spins a tall chair"
# We generate situations directly since templates fail to generate plurals.
learner = SituationObject.instantiate_ontology_node(
ontology_node=LEARNER,
debug_handle=LEARNER.handle,
ontology=GAILA_PHASE_1_ONTOLOGY,
)
situations = []
for agent_ontology_node in [MOM, DAD, BABY, DOG]:
agent = SituationObject.instantiate_ontology_node(
ontology_node=agent_ontology_node,
debug_handle=agent_ontology_node.handle,
ontology=GAILA_PHASE_1_ONTOLOGY,
)
for _object in [CHAIR, TABLE]:
theme = SituationObject.instantiate_ontology_node(
ontology_node=_object,
debug_handle=_object.handle + "_salient",
ontology=GAILA_PHASE_1_ONTOLOGY,
)
theme2 = SituationObject.instantiate_ontology_node(
ontology_node=_object,
debug_handle=_object.handle + "_non_salient",
ontology=GAILA_PHASE_1_ONTOLOGY,
)
other_objs = [
SituationObject.instantiate_ontology_node(
ontology_node=_object,
debug_handle=_object.handle + f"_{i}",
ontology=GAILA_PHASE_1_ONTOLOGY,
)
for i in range(3)
]
computed_background = [learner]
computed_background.extend(other_objs)
computed_background.extend([theme2])
# Tall and short
for relation_list in [
[bigger_than(theme2, theme), bigger_than(other_objs, theme)],
[bigger_than(theme, theme2), bigger_than(theme, other_objs)],
]:
situations.append(
HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[agent, theme],
other_objects=computed_background,
actions=[
Action(
SPIN,
argument_roles_to_fillers=[
(AGENT, agent),
(THEME, theme),
],
)
],
always_relations=relation_list,
)
)
return phase1_instances(
"Tall - Short Curriculum", situations, language_generator=language_generator
)
def make_imprecise_size_descriptions(
num_samples: Optional[int],
noise_objects: Optional[int],
language_generator: LanguageGenerator[
HighLevelSemanticsSituation, LinearizedDependencyTree
],
) -> Phase1InstanceGroup:
# we choose random tall and short nodes here
random_tall_nodes = (
[CHOOSER.choice(TALL_ELIGIBLE_NODES) for i in range(num_samples)]
if num_samples
else [CHOOSER.choice(TALL_ELIGIBLE_NODES) for i in range(5)]
)
random_big_nodes = (
[CHOOSER.choice(BIG_ELIGIBLE_NODES) for i in range(num_samples)]
if num_samples
else [CHOOSER.choice(BIG_ELIGIBLE_NODES) for i in range(5)]
)
background = make_noise_objects(noise_objects)
return phase1_instances(
"Imprecise Size",
chain(
flatten(
# generate big and small for all eligible nodes
[
sampled(
template(random_node=node, background=background),
ontology=GAILA_PHASE_1_ONTOLOGY,
chooser=CHOOSER_FACTORY(),
block_multiple_of_the_same_type=False,
max_to_sample=num_samples if num_samples else 5,
)
for node in random_big_nodes
for template in [_big_x_template, _little_x_template]
]
),
flatten(
# generate tall and short for all eligible nodes
[
sampled(
template(random_node=node, background=background),
ontology=GAILA_PHASE_1_ONTOLOGY,
chooser=CHOOSER_FACTORY(),
max_to_sample=1,
block_multiple_of_the_same_type=False,
)
for node in random_tall_nodes
for template in [_tall_x_template, _short_x_template]
]
),
),
language_generator=language_generator,
)
def make_throw_imprecise_temporal_descriptions(
num_samples: Optional[int],
noise_objects: Optional[int],
language_generator: LanguageGenerator[
HighLevelSemanticsSituation, LinearizedDependencyTree
],
) -> Phase1InstanceGroup:
thrower = standard_object(
"thrower_0",
THING,
required_properties=[ANIMATE],
banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
)
catcher = standard_object(
"catcher_0",
THING,
required_properties=[ANIMATE],
banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
)
object_thrown = standard_object("object_0", required_properties=[INANIMATE])
implicit_goal_reference = standard_object("implicit_throw_goal_object", BOX)
background = make_noise_objects(noise_objects)
return phase1_instances(
"throwing-with-temporal-descriptions",
chain(
# Throw on Ground
flatten(
sampled(
throw_on_ground_template(
thrower,
object_thrown,
spatial_properties=[FAST] if is_fast else [SLOW],
background=background,
),
ontology=GAILA_PHASE_1_ONTOLOGY,
chooser=CHOOSER_FACTORY(),
max_to_sample=num_samples if num_samples else 5,
block_multiple_of_the_same_type=True,
)
for is_fast in BOOL_SET
),
# Throw
flatten(
sampled(
throw_template(
thrower,
object_thrown,
implicit_goal_reference,
spatial_properties=[FAST] if is_fast else [SLOW],
background=background,
),
ontology=GAILA_PHASE_1_ONTOLOGY,
chooser=CHOOSER_FACTORY(),
max_to_sample=num_samples if num_samples else 5,
block_multiple_of_the_same_type=True,
)
for is_fast in BOOL_SET
),
# Throw up, down
flatten(
sampled(
throw_up_down_template(
thrower,
object_thrown,
implicit_goal_reference,
is_up=is_up,
spatial_properties=[FAST] if is_fast else [SLOW],
background=background,
),
ontology=GAILA_PHASE_1_ONTOLOGY,
chooser=CHOOSER_FACTORY(),
max_to_sample=num_samples if num_samples else 5,
block_multiple_of_the_same_type=True,
)
for is_fast in BOOL_SET
for is_up in BOOL_SET
),
# Throw To
flatten(
sampled(
throw_to_template(
thrower,
object_thrown,
catcher,
spatial_properties=[FAST] if is_fast else [SLOW],
background=background,
),
ontology=GAILA_PHASE_1_ONTOLOGY,
chooser=CHOOSER_FACTORY(),
max_to_sample=num_samples if num_samples else 5,
block_multiple_of_the_same_type=True,
)
for is_fast in BOOL_SET
),
),
language_generator=language_generator,
)
def make_move_imprecise_temporal_descriptions(
num_samples: Optional[int],
noise_objects: Optional[int],
language_generator: LanguageGenerator[
HighLevelSemanticsSituation, LinearizedDependencyTree
],
) -> Phase1InstanceGroup:
self_mover_0 = standard_object(
"self-mover_0",
THING,
required_properties=[SELF_MOVING],
banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
)
other_mover_0 = standard_object("mover_0", THING, required_properties=[ANIMATE])
movee_0 = standard_object("movee_0", THING, required_properties=[INANIMATE])
move_goal_reference = standard_object(
"move-goal-reference", THING, required_properties=[INANIMATE]
)
background = make_noise_objects(noise_objects)
return phase1_instances(
"move-with-temporal-descriptions",
chain(
# bare move (e.g. "a box moves") is about half of uses in child speed
flatten(
sampled(
bare_move_template(
self_mover_0,
move_goal_reference,
spatial_properties=[FAST] if is_fast else [SLOW],
background=background,
),
ontology=GAILA_PHASE_1_ONTOLOGY,
chooser=CHOOSER_FACTORY(),
max_to_sample=num_samples if num_samples else 5,
block_multiple_of_the_same_type=True,
)
for is_fast in BOOL_SET
),
# Transitive Move
flatten(
sampled(
transitive_move_template(
other_mover_0,
movee_0,
move_goal_reference,
spatial_properties=[FAST] if is_fast else [SLOW],
background=background,
),
ontology=GAILA_PHASE_1_ONTOLOGY,
chooser=CHOOSER_FACTORY(),
max_to_sample=num_samples if num_samples else 5,
block_multiple_of_the_same_type=True,
)
for is_fast in BOOL_SET
),
),
language_generator=language_generator,
)
def make_jump_imprecise_temporal_descriptions(
num_samples: Optional[int],
noise_objects: Optional[int],
language_generator: LanguageGenerator[
HighLevelSemanticsSituation, LinearizedDependencyTree
],
) -> Phase1InstanceGroup:
jumper = standard_object(
"jumper_0",
THING,
required_properties=[CAN_JUMP],
banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
)
background = make_noise_objects(noise_objects)
return phase1_instances(
"jumping",
chain(
flatten(
[
sampled(
# "A person jumps"
make_jump_template(
jumper,
use_adverbial_path_modifier=use_adverbial_path_modifier,
spatial_properties=[FAST] if is_fast else [SLOW],
background=background,
),
ontology=GAILA_PHASE_1_ONTOLOGY,
chooser=CHOOSER_FACTORY(),
max_to_sample=num_samples if num_samples else 5,
block_multiple_of_the_same_type=True,
)
for use_adverbial_path_modifier in (True, False)
for is_fast in BOOL_SET
]
)
),
language_generator=language_generator,
)
def make_take_grab_subtle_verb_distinction(
num_samples: Optional[int],
noise_objects: Optional[int],
language_generator: LanguageGenerator[
HighLevelSemanticsSituation, LinearizedDependencyTree
],
) -> Phase1InstanceGroup:
taker = standard_object(
"tosser_passer_0",
THING,
required_properties=[ANIMATE],
banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
)
takee = standard_object("tossee_passee_0", THING, required_properties=[INANIMATE])
background = make_noise_objects(noise_objects)
return phase1_instances(
"taking-grabbing",
chain(
flatten(
[
sampled(
make_take_template(
taker,
takee,
use_adverbial_path_modifier=use_adverbial_path_modifier,
operator=operator,
spatial_properties=[HARD_FORCE]
if hard_force
else [SOFT_FORCE],
background=background,
),
ontology=GAILA_PHASE_1_ONTOLOGY,
chooser=CHOOSER_FACTORY(),
max_to_sample=num_samples if num_samples else 5,
block_multiple_of_the_same_type=True,
)
for use_adverbial_path_modifier in BOOL_SET
for hard_force in BOOL_SET
for operator in [TOWARD, AWAY_FROM]
]
)
),
language_generator=language_generator,
)
def make_push_shove_subtle_verb_distinctions(
num_samples: Optional[int],
noise_objects: Optional[int],
language_generator: LanguageGenerator[
HighLevelSemanticsSituation, LinearizedDependencyTree
],
) -> Phase1InstanceGroup:
pusher = standard_object(
"pusher_0",
THING,
required_properties=[ANIMATE],
banned_properties=[IS_SPEAKER, IS_ADDRESSEE],
)
pushee = standard_object("pushee_0", THING, required_properties=[INANIMATE])
push_surface = | |
return as two integer arrays of some_length, and an ntheta*some_length array of theta power
if not os.path.isfile(xyt_filename):
# Fast Failure Case - This file does not exist.
if match_only:
return False
else:
raise ValueError('Input xyt_filename in getXYT matches no existing file')
else:
# Attempts to extract header information for Matching, or else the data itself
if xyt_filename.endswith('.npz'):
# Allows very large files to be read in.
data = np.load(xyt_filename, mmap_mode='r')
if match_only:
try:
return all([ match_only[x] == data[str.lower(x)] for x in list(match_only.keys()) ])
except KeyError:
return False
Hi = data['hi']
Hj = data['hj']
Hthets = data['hthets']
elif xyt_filename.endswith('.fits'):
hdu_list = fits.open(xyt_filename, mode='readonly', memmap=True, save_backup=False, checksum=True) #Allows for reading in very large files!
header = hdu_list[0].header
if match_only:
try:
return all([ match_only[x] == header[str.upper(x)] for x in list(match_only.keys()) ])
except KeyError:
return False
data = hdu_list[1].data
Hi = data['hi']
Hj = data['hj']
Hthets = data['hthets']
else:
raise ValueError('Supported input types in getXYT include .npz and .fits only')
rebuild = None
# Formats output properly
if rebuild and filepath is not None:
# Can recreate an entire 3D array of mostly 0s.
data = getData(filepath)
datay, datax = data.shape
ntheta = Hthets[0].shape
if BUFFER:
xyt = np.memmap(tempfile.TemporaryFile(), dtype=DTYPE, mode='w+', shape=(datay, datax, ntheta))
xyt.fill(0.0)
else:
print('Warning: Reconstructing very large array in memory! Set BUFFER to True!')
xyt = np.zeros((datay, datax, ntheta))
coords = list(zip(Hj, Hi))
for c in range(len(coords)):
j,i = coords[c]
xyt[j,i,:] = Hthets[c]
return xyt
else:
# Returns the sparse, memory mapped form only.
return Hi, Hj, Hthets
def bad_pixels(data):
# Returns an array of the same shape as data
# NaN values MUST ALWAYS be considered bad.
# Bad values become 1, all else become 0
data = np.array(data, np.float) #TODO________________________Double Check This?
# IMPLEMENTATION1: Do Comparisons which are VERY different depending on boolean choices .
try:
if BAD_INF:
if BAD_0:
if BAD_Neg:
return np.logical_or(np.logical_not(np.isfinite(data)), np.less_equal(data, 0.0))
else:
return np.logical_or(np.logical_not(np.isfinite(data)), np.equal(data, 0.0))
else:
if BAD_Neg:
return np.logical_or(np.logical_not(np.isfinite(data)), np.less(data, 0.0))
else:
return np.logical_not(np.isfinite(data))
else:
if BAD_0:
if BAD_Neg:
return np.logical_or(np.isnan(data), np.less_equal(data, 0.0))
else:
return np.logical_not(np.nan_to_num(data)) #(Nans or 0) ---> (0) ---> (1)
else:
if BAD_Neg:
return np.logical_or(np.isnan(data), np.less(data, 0.0))
else:
return np.isnan(data)
'''
#IMPLEMENTATION2: Map values determined by flags into the data array
not_that = np.zeros_like(data)
infs = np.empty_like(data).fill(BAD_INF)
zeros = np.empty_like(data).fill(BAD_0)
negs = np.empty_like(data).fill(BAD_Neg)
isinf = np.where(np.isinf(data), infs, not_that)
iszero = np.where(np.logical_not(data), zeros, not_that)
isneg = np.where(np.less(0.0), negs, not_that)
return np.logical_or(np.isnan(data), np.logical_or(isinf, np.logical_or(iszero, isneg)))
'''
except:
# IMPLEMENTATION3: Give up?
print('Unable to properly mask data in bad_pixels()...')
return data.astype(np.bool)
def all_within_diameter_are_good(data, diameter):
assert diameter%2
r = int(np.int(diameter/2))
# Base case, 'assume all pixels are bad'
mask = np.zeros_like(data)
# Edge case, 'any pixel not within r of the edge might be ok'
datay, datax = data.shape
mask[r:datay-r, r:datax-r] = 1
# Identifiably bad case, 'all pixels within r of me are not bad'
circle = circ_kern(diameter)
y_arr, x_arr = np.nonzero(circle)
y_arr = y_arr - r
x_arr = x_arr - r
# IMPLEMENTATION1: Zero any mask pixel within r of a bad pixel
update_progress(0.0)
coords = list(zip(*np.nonzero(bad_pixels(data))))
N = len(coords)
for c in range(N):
j,i = coords[c]
x = (x_arr + i).astype(np.int).clip(0, datax-1)
y = (y_arr + j).astype(np.int).clip(0, datay-1)
mask[y, x] = 0
update_progress((c+1)/float(N), message='Masking:', final_message='Finished Masking:')
'''
#IMPLEMENTATION2: For each good pixel, 'Not Any Bad pixels near me'
update_progress(0.0)
coords = zip(*np.nonzero(mask))
for c in range(len(coords)):
j,i = coords[c]
x = (x_arr + i).astype(np.int).clip(0, datax-1)
y = (y_arr + j).astype(np.int).clip(0, datay-1)
mask[j][i] = np.logical_not(np.any(bad_pixels( data[y, x] )))
update_progress((c+1)/float(N), message='Masking:', final_message='Finished Masking:')
'''
return mask
def getData(filepath):
# Reads in data for images from various sources
# Supports .fits, .npy, and PIL formats
try:
# Reading Data
if filepath.endswith('.fits'):
# Fits file handling
hdu = fits.open(filepath, memmap=True)[0] #TODO___________________Assumes all data is in first HDU
data = hdu.data
elif filepath.endswith('.npy'):
# Numpy file handling
data = np.load(filepath, mmap_mode='r')
elif filepath.endswith('.npz'):
data = np.load(filepath, mmap_mode='r')[0] #TODO___________________Assumes data in first ndarray is 2D
else:
data = scipy.ndimage.imread(filepath, flatten=True)[::-1] #Makes B/W array, reversing y-coords
except:
# Failure Reading Data
print('Failure in getData({})... Returning'.format(filepath))
return None
return data
def getMask(data, smr=SMR, wlen=WLEN):
# Makes proper masks for images from data
# smr_mask masks any pixel within smr of any bad pixels, and the edge
# wlen_mask masks any pixel within wlen of any bad pixels, and the edge
# Cuts away smr radius from bads, then wlen from bads
smr_mask = all_within_diameter_are_good(data, 2*smr+1)
nans = np.empty(data.shape, dtype=np.float).fill(np.nan)
wlen_mask = all_within_diameter_are_good( np.where(smr_mask, data,
nans), wlen)
return smr_mask, wlen_mask
# Performs a circle-cut of given diameter on inkernel.
# Outkernel is 0 anywhere outside the window.
def circ_kern(diameter):
assert diameter%2
r = diameter//2 #int(np.floor(diameter/2))
mnvals = np.indices((diameter, diameter)) - r
rads = np.hypot(mnvals[0], mnvals[1])
return np.less_equal(rads, r).astype(np.int)
# Unsharp mask. Returns binary data.
def umask(data, radius, smr_mask=None):
assert data.ndim == 2
kernel = circ_kern(2*radius+1)
outdata = scipy.ndimage.filters.correlate(data, kernel)
# Correlation is the same as convolution here because kernel is symmetric
# Our convolution has scaled outdata by sum(kernel), so we will divide out these weights.
kernweight = np.sum(kernel)
subtr_data = data - outdata/kernweight
# Convert to binary data
bindata = np.greater(subtr_data, 0.0)
if smr_mask is None:
return bindata
else:
return np.logical_and(smr_mask, bindata)
def fast_hough(in_arr, xyt):
assert in_arr.ndim == 2
assert xyt.ndim == 3
assert in_arr.shape[0] == xyt.shape[0]
assert in_arr.shape[1] == xyt.shape[1]
# IMPLEMENTATION0: Let python figure out the implementation. (FASTEST)
return np.einsum('ijk,ij', xyt, in_arr)
'''
if hout == None:
return np.einsum('ijk,ij', xyt, in_arr) #, dtype=np.int)
else:
assert hout.ndim == 1
assert hout.shape[0] == xyt.shape[2]
np.einsum('ijk,ij', xyt, in_arr, out=hout)
'''
# IMPLEMENTATION1: Copy 2D array into 3D stack, and multiply by other stack (SLOW)
# cube = np.repeat(in_arr[:,:,np.newaxis], repeats=ntheta, axis=2)*xyt
# IMPLEMENTATION2: Broadcast 2D array against 3D stack and multiply (FAST)
# cube = np.multiply( in_arr.reshape((in_arr.shape[0],in_arr.shape[1],1)), xyt).astype(np.float, copy=False)
# IMPLEMENTATION3: Broadcast 2D array against 3D stack and AND them together (VERY FAST)
# assert in_arr.dtype == np.bool_
# assert xyt.dtype == np.bool_
# cube = np.logical_and( in_arr.reshape((in_arr.shape[0],in_arr.shape[1],1)), xyt)
# return np.sum(np.sum( cube , axis=0, dtype=np.int), axis=0, dtype=np.float) #WORKS FAST AND DIVIDES PROPERLY
# return np.sum(cube, axis=(1,0), dtype=np.int)
def houghnew(image, cos_theta, sin_theta):
assert image.ndim == 2
assert cos_theta.ndim == 1
assert sin_theta.ndim == 1
assert len(cos_theta) == len(sin_theta)
assert image.shape[0] == image.shape[1]
# Midpoint is wlen/2
wmid = image.shape[0]//2
# Compute the distance from each cell.
nr_bins = np.ceil(np.hypot(*image.shape))
# Allocate the output data.
out = np.zeros((int(nr_bins), len(cos_theta)), dtype=np.int)
# Find the indices of the non-zero values in the input data.
y, x = np.nonzero(image)
# x and y can be large, so we can't just broadcast to 2D arrays as we may run out of memory.
# Instead we process one vertical slice at a time.
for i, (cT, sT) in enumerate(zip(cos_theta, sin_theta)):
# Compute the base distances
distances = (x - wmid) * cT + (y - wmid) * sT
# Round the distances to the nearest integer and shift them to a nonzero bin.
shifted = np.round(distances) + nr_bins/2
# Cast the shifted values to ints to use as indices
indices = shifted.astype(np.int)
# Use bin count to accumulate the HT coefficients
bincount = np.bincount(indices)
# Assign the proper values to the out array
out[:len(bincount), i] = bincount
return out[np.int(nr_bins/2), :]
def all_thetas(wlen, theta, original):
assert theta.ndim == 1
assert wlen%2
# Initialize a circular window of ones
window = circ_kern(wlen)
assert window.shape[0] == window.shape[1]
if not original:
window[:,:wlen//2] = 0
# Precompute the sin and cos of the angles.
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
# Makes prism; output has dimensions (y, x, theta).
ntheta = len(theta)
#outshape = (wlen, wlen, ntheta)
out = np.zeros(window.shape+(ntheta,), np.int)
coords = list(zip( *np.nonzero(window)))
for (j, | |
a list of data dictionary with given pair, (length=1)
Otherwise, a list of data dictionaries of all pairs.
Example of Response Format
.. code-block:: python
[
{
'pairSymbol': '<Requested pair symbol>',
'pairSymbolNormalized': '<Requested pair symbol with "_" in between.>',
'timestamp': '<Current Unix time in milliseconds>'
'last': '<Last price>',
'high': '<Highest trade price in last 24 hours>',
'low': '<Lowest trade price in last 24 hours>',
'bid': '<Highest current bid>',
'ask': '<Lowest current ask>',
'open': '<Price of the opening trade in last 24 hours>',
'volume': '<Total volume in last 24 hours>',
'average': '<Average Price in last 24 hours>',
'daily': '<Price change in last 24 hours>',
'dailyPercent': '<Price change percent in last 24 hours>',
'denominatorSymbol': '<Denominator currency symbol of the pair>',
'numeratorSymbol': '<Numerator currency symbol of the pair>',
},
...
]
"""
request_url = self._create_public_endpoint_url("ticker")
params = kwargs if kwargs else {}
if pair:
return self._get(request_url, {"pairSymbol": pair})
return self._get(request_url, params)
def get_ohlc_data(self, pair=None, last=10, **kwargs):
""" Gets daily OHLC data for given pair
If you specify kwargs, the other parameters will be **overridden.**
Only keyword arguments you specified will be used to construct a query.
Therefore, it is your choice to use kwargs.
But i strongly discourage you to use that for avoiding any invalid requests
Parameters
----------
pair : str, optional
pair symbol like 'BTC_TRY', 'ETH_BTC', ...
last : int, optional
number of days
kwargs
Returns
-------
list
a list of data dictionary for given pair
Example of Response Format
.. code-block:: python
[
{
'pairSymbol': '<Requested pair symbol>',
'pairSymbolNormalized': '<Requested pair symbol with "_" in between.>',
'time': '<Current Unix time in milliseconds>'
'open': '<Price of the opening trade on the time>',
'high': '<Highest trade price on the time>',
'low': '<Lowest trade price on the time>',
'close': '<Price of the closing trade on the time>',
'volume': '<Total volume on the time>',
'average': '<Average price on the time>',
'dailyChangeAmount': '<Amount of difference between Close and Open on the Date>',
'dailyChangePercentage': '<Percentage of difference between Close and Open on the Date>',
},
...
]
"""
request_url = self._create_public_endpoint_url("ohlc")
params = kwargs if kwargs else {}
if pair:
return self._get(request_url, {"pairSymbol": pair, "last": last})
return self._get(request_url, params)
def get_order_book(self, pair=None, limit=100, **kwargs):
""" Gets the order book of given pair
If you specify kwargs, the other parameters will be **overridden**.
Only keyword arguments you specified will be used to construct a query.
Therefore, it is your choice to use kwargs.
But i strongly discourage you to use that for avoiding any invalid requests
Parameters
----------
pair : str, mandatory
pair symbol like 'BTC_TRY', 'ETH_BTC', ...
limit : int, optional
default 100 max 1000
kwargs
Returns
-------
dict
data dictionary
Example of Response Format
.. code-block:: python
[
{
'timestamp': '<Current Unix time in milliseconds>',
'bids': '<Array of current open bids on the orderbook>',
'asks': '<Array of current open asks on the orderbook>',
},
...
]
"""
request_url = self._create_public_endpoint_url("orderbook")
params = kwargs if kwargs else {"pairSymbol": pair, "limit": limit}
return self._get(request_url, params)
def get_trades(self, pair=None, last=50, **kwargs):
""" Gets a list of Trades for given pair
If you specify kwargs, the other parameters will be **overridden.**
Only keyword arguments you specified will be used to construct a query.
Therefore, it is your choice to use kwargs.
But i strongly discourage you to use that for avoiding any invalid requests
Parameters
----------
pair : str, mandatory
pair symbol like 'BTC_TRY', 'ETH_BTC'..
last : int, optional
default 50 max 1000
Returns
-------
dict
Data dictionary
Example of Response Format
.. code-block:: python
{
'pair': '<Requested pair symbol>',
'pairNormalized': '<Request Pair symbol with "_" in between.>',
'numerator': '<Numerator currency for the requested pair>',
'denominator': '<Denominator currency for the requested pair>',
'date': '<Unix time of the trade in milliseconds>',
'tid': '<Trade ID>',
'price': '<Price of the trade>',
'amount': '<Amount of the trade>',
},
"""
request_url = self._create_public_endpoint_url("trades")
params = kwargs if kwargs else {"pairSymbol": pair, "last": last}
return self._get(request_url, params=params)
# AUTHENTICATION REQUIRED GET ENDPOINT IMPLEMENTATIONS
@authentication_required
def get_account_balance(self, assets=None):
""" Gets the list of balances which user have
Parameters
----------
assets: optional
List of assets like ['BTC', 'TRY', ...]
Returns
-------
list
Example of Response Format
.. code-block:: python
[
{
'asset': 'EUR',
'assetname': 'Euro',
'balance': '0',
'locked': '0',
'free': '0',
'orderFund': '0',
'requestFund': '0',
'precision': 2
},
...
]
"""
url = self._create_auth_endpoint_url("users/balances")
self._update_session_headers()
balance_list = self._get(url)
if not assets:
return balance_list
assets = [asset.upper() for asset in assets]
filtered_balance_list = list(
filter(lambda bl: bl["asset"].upper() in assets, balance_list)
)
return filtered_balance_list
@authentication_required
def get_trade_history(
self, trade_type=None, symbol=None, start_date=None, end_date=None, **kwargs
):
""" Gets the history of user's trades.
If trade_type not specified, both 'buy' and 'sell' types will be used
If symbol not specified, all crypto symbols will be used
If start_date not specified, it will get trades for last 30 days.
If you specify kwargs, the other parameters will be **overridden.**
Only keyword arguments you specified will be used to construct a query.
Therefore, it is your choice to use kwargs.
But i strongly discourage you to use that for avoiding any invalid requests
Parameters
----------
trade_type : list, optional
["buy", "sell"], ["buy"] or ["sell"]
symbol : list -> str, optional
["btc", "try", ...etc.],
start_date : timestamp, optional
end_date : timestamp, optional
kwargs
Returns
-------
list
List of trade data dictionaries,
Example of Response Format
.. code-block:: python
[
{
'price': '<Trade price>',
'numeratorSymbol': '<Trade pair numerator symbol>',
'denominatorSymbol': '<Trade pair denominator symbol>',
'orderType': '<Trade type (buy,sell)>',
'id': '<Trade id>',
'timestamp': '<Unix timestamp>',
'amount': '<Trade Amount (always negative if order type is sell)>',
'fee': '<Trade fee>',
'tax': '<Trade tax>'
},
...
]
"""
if not start_date:
last_30_days_timestamp = dt.datetime.timestamp(
dt.datetime.today() - dt.timedelta(days=30)
)
start_date = int(last_30_days_timestamp * 1000)
if not end_date:
end_date = int(time.time() * 1000)
if not symbol:
symbol = CRYPTO_SYMBOLS
if not trade_type:
trade_type = TRADE_TYPES
request_url = self.API_BASE + self.API_ENDPOINT_TRANSACTIONS + "trade"
params = (
kwargs
if kwargs
else {
"type": trade_type,
"symbol": symbol,
"startDate": start_date,
"endDate": end_date,
}
)
self._update_session_headers()
history = self._get(request_url, params)
return history
@authentication_required
def get_crypto_history(
self,
symbol=None,
transaction_type=None,
start_date=None,
end_date=None,
**kwargs,
):
""" Gets the history of user's crypto transactions.
If symbol not specified, all crypto symbols will be used
If transaction_type not specified, both 'withdrawal' and 'deposit' types will be used
If start_date not specified, it will get trades for last 30 days.
If you specify kwargs, the other parameters will be **overridden**. Only keyword arguments you specified
will be used to construct a query. Therefore, it is your choice to use kwargs.
But i strongly discourage you to use that for avoiding any invalid requests
Parameters
----------
symbol : list, optional
["btc", "try", ...etc.]
transaction_type : list , optional
["deposit", "withdrawal"], ["deposit"] or ["withdrawal"]
start_date : timestamp, optional
end_date : timestamp, optional
kwargs
Returns
-------
list
List of trade data dictionaries,
Example of Response Format
.. code-block:: python
[
{
'balanceType': '<Type of transaction (deposit, withdrawal)>',
'currencySymbol': '<Transaction currency symbol>',
'id': '<Transaction id>',
'timestamp': '<Unix timestamp>',
'funds': '<Funds>',
'orderFund': '<Transaction Amount>',
'fee': '<Transaction fee>',
'tax': <Transaction tax>
},
...
]
"""
if not start_date:
last_30_days_timestamp = dt.datetime.timestamp(
dt.datetime.today() - dt.timedelta(days=30)
)
start_date = int(last_30_days_timestamp * 1000)
if not end_date:
end_date = int(time.time() * 1000)
if not symbol:
symbol = CRYPTO_SYMBOLS
if not transaction_type:
transaction_type = DEPOSIT_OR_WITHDRAWAL
request_url = self.API_BASE + self.API_ENDPOINT_TRANSACTIONS + "crypto"
params = (
kwargs
if kwargs
else {
"type": transaction_type,
"symbol": symbol,
"startDate": start_date,
"endDate": end_date,
}
)
self._update_session_headers()
history = self._get(request_url, params)
return history
@authentication_required
def get_fiat_history(
self,
balance_types=None,
currency_symbols=None,
start_date=None,
end_date=None,
**kwargs,
):
""" Gets the history of user's fiat transactions.
If balance_types not specified, both 'withdrawal' and 'deposit' types will be used
If currency_symbols not specified, all currency symbols will be used
If start_date not specified, it will get trades for last 30 days.
If you specify kwargs, the | |
"""All assumptions are either loaded in this file or definied here
"""
import os
from datetime import date
from energy_demand.read_write import read_data
from energy_demand.technologies import technologies_related
from energy_demand.technologies import dummy_technologies
from energy_demand.basic import testing_functions as testing
from energy_demand.assumptions import assumptions_fuel_shares
from energy_demand.initalisations import helpers
from energy_demand.read_write import write_data
from energy_demand.basic import date_handling
from energy_demand.read_write import data_loader
# pylint: disable=I0011,C0321,C0301,C0103, C0325
#TODO: Write function which insersts zeros if a fueltype is not provided
#TODO: Make that HLC can be improved
# Assumption share of existing dwelling stock which is assigned new HLC coefficients
def load_assumptions(data):
"""All assumptions of the energy demand model are loaded and added to the data dictionary
Returns
-------
data : dict
Data dictionary with added ssumption dict
"""
print("... load assumptions")
assumptions = {}
data['sim_param'] = {}
data['sim_param']['base_yr'] = 2015
data['sim_param']['end_yr'] = 2020
data['sim_param']['sim_years_intervall'] = 5 # Make calculation only every X year
data['sim_param']['sim_period'] = range(data['sim_param']['base_yr'], data['sim_param']['end_yr'] + 1, data['sim_param']['sim_years_intervall'])
data['sim_param']['sim_period_yrs'] = int(data['sim_param']['end_yr'] + 1 - data['sim_param']['base_yr'])
data['sim_param']['curr_yr'] = data['sim_param']['base_yr']
data['sim_param']['list_dates'] = date_handling.fullyear_dates(
start=date(data['sim_param']['base_yr'], 1, 1),
end=date(data['sim_param']['base_yr'], 12, 31))
# ============================================================
# If unconstrained mode (False), heat demand is provided per technology. If True, heat is delievered with fueltype
assumptions['mode_constrained'] = False # True --> Technologies are defined in ED model, False: heat is delievered
# ============================================================
# Residential dwelling stock assumptions
# ============================================================
# Dwelling types lookup table
data['dwtype_lu'] = {
0: 'detached',
1: 'semi_detached',
2: 'terraced',
3: 'flat',
4: 'bungalow'
}
# Change in floor area per person up to end_yr 1.0 = 100%
# ASSUMPTION (if minus, check if new dwellings are needed)
assumptions['assump_diff_floorarea_pp'] = 1
# Specific Energy Demand factors per dwelling type could be defined
# (e.g. per dwelling type or GVA class or residents....) #TODO
# Dwelling type distribution base year (fixed)
assumptions['assump_dwtype_distr_by'] = {
'semi_detached': 0.26,
'terraced': 0.283,
'flat': 0.203,
'detached': 0.166,
'bungalow': 0.088
}
# Dwelling type distribution end year
assumptions['assump_dwtype_distr_ey'] = {
'semi_detached': 0.26,
'terraced': 0.283,
'flat': 0.203,
'detached': 0.166,
'bungalow': 0.088
}
# Floor area per dwelling type
assumptions['assump_dwtype_floorarea_by'] = {
'semi_detached': 96,
'terraced': 82.5,
'flat': 61,
'detached': 147,
'bungalow': 77
} # SOURCE?
# Floor area per dwelling type #TODO
assumptions['assump_dwtype_floorarea_ey'] = {
'semi_detached': 96,
'terraced': 82.5,
'flat': 61,
'detached': 147,
'bungalow': 77
} # SOURCE?
# Assumption about age distribution
assumptions['dwtype_age_distr'] = {
2015.0: {
'1918':0.21, #Average builing age within age class, fraction
'1941': 0.36,
'1977.5': 0.3,
'1996.5': 0.08,
'2002': 0.05}
}
# TODO: Get assumptions for heat loss coefficient
# TODO: INCLUDE HAT LOSS COEFFICIEN ASSUMPTIONS
# TODO: Include refurbishment of houses --> Change percentage of age distribution of houses -->
# Which then again influences HLC
# ============================================================
# Scenario drivers
# ============================================================
assumptions['scenario_drivers'] = {}
# --Residential SubModel
assumptions['scenario_drivers']['rs_submodule'] = {
'rs_space_heating': ['floorarea', 'hlc'], #Do not use also pop because otherwise problems that e.g. existing stock + new has smaller scen value than... floorarea already contains pop, Do not use HDD because otherweise double count
'rs_water_heating': ['population'],
'rs_lighting': ['population', 'floorarea'],
'rs_cooking': ['population'],
'rs_cold': ['population'],
'rs_wet': ['population'],
'rs_consumer_electronics': ['population'],
'rs_home_computing': ['population']
}
# --Servicse Submodel
assumptions['scenario_drivers']['ss_submodule'] = {
'ss_space_heating': ['floorarea'],
'ss_water_heating': [],
'ss_lighting': ['floorarea'],
'ss_catering': [],
'ss_computing': [],
'ss_space_cooling': ['floorarea'],
'ss_other_gas': ['floorarea'],
'ss_other_electricity': ['floorarea']
}
# --Industry Submodel
assumptions['scenario_drivers']['is_submodule'] = {
'is_high_temp_process': ['GVA'],
'is_low_temp_process': [],
'is_drying_separation': [],
'is_motors': [],
'is_compressed_air': [],
'is_lighting': [],
'is_space_heating': [],
'is_other': [],
'is_refrigeration': []
}
# Change in floor depending on sector (if no change set to 1, if e.g. 10% decrease change to 0.9)
# TODO: READ IN FROM READL BUILDING SCENARIOS...
# TODO: Project future demand based on seperate methodology
assumptions['ss_floorarea_change_ey_p'] = {
'community_arts_leisure': 1,
'education': 1,
'emergency_services': 1,
'health': 1,
'hospitality': 1,
'military': 1,
'offices': 1,
'retail': 1,
'storage': 1,
'other': 1
}
# ========================================================================================================================
# Climate Change assumptions
# Temperature changes for every month until end year for every month
# ========================================================================================================================
assumptions['climate_change_temp_diff_month'] = [
0, # January (can be plus or minus)
0, # February
0, # March
0, # April
0, # May
0, # June
0, # July
0, # August
0, # September
0, # October
0, # November
0 # December
]
#assumptions['climate_change_temp_diff_month'] = [0] * 12 # No change
# ============================================================
# Base temperature assumptions for heating and cooling demand
# The diffusion is asumed to be linear
# ============================================================
assumptions['rs_t_base_heating'] = {
'base_yr': 15.5,
'end_yr': 15.5 #replaced by rs_t_base_heating_ey
}
assumptions['ss_t_base_heating'] = {
'base_yr': 15.5,
'end_yr': 15.5
}
# Cooling base temperature
assumptions['rs_t_base_cooling'] = {
'base_yr': 21.0,
'end_yr': 21.0
}
assumptions['ss_t_base_cooling'] = {
'base_yr': 15.5,
'end_yr': 15.5
}
# Sigmoid parameters for diffusion of penetration of smart meters
assumptions['base_temp_diff_params'] = {}
assumptions['base_temp_diff_params']['sig_midpoint'] = 0
assumptions['base_temp_diff_params']['sig_steeppness'] = 1
# Penetration of cooling devices
# COLING_OENETRATION ()
# Or Assumkp Peneetration curve in relation to HDD from PAPER #Residential
# Assumption on recovered heat (lower heat demand based on heat recovery)
# ============================================================
# Smart meter assumptions (Residential)
#
# DECC 2015: Smart Metering Early Learning Project: Synthesis report
# https://www.gov.uk/government/publications/smart-metering-early-learning-project-and-small-scale-behaviour-trials
# Reasonable assumption is between 0.03 and 0.01 (DECC 2015)
# NTH: saturation year
# ============================================================
# Fraction of population with smart meters (Across all sectors. If wants to be spedified, needs some extra code. Easily possible)
assumptions['smart_meter_p_by'] = 0.1
assumptions['smart_meter_p_ey'] = 0.1
# Long term smart meter induced general savings, purley as a result of having a smart meter
assumptions['savings_smart_meter'] = {
# Residential
'rs_cold': -0.03,
'rs_cooking': -0.03,
'rs_lighting': -0.03,
'rs_wet': -0.03,
'rs_consumer_electronics': -0.03,
'rs_home_computing': -0.03,
'rs_space_heating': -0.03,
# Service
'ss_space_heating': -0.03,
# Industry
'is_space_heating': -0.03
}
# Sigmoid parameters for diffusion of penetration of smart meters
assumptions['smart_meter_diff_params'] = {}
assumptions['smart_meter_diff_params']['sig_midpoint'] = 0
assumptions['smart_meter_diff_params']['sig_steeppness'] = 1
# ============================================================
# Heat recycling & Reuse
# ============================================================
assumptions['heat_recovered'] = {
'rs_space_heating': 0.0, # e.g. 0.2 = 20% reduction
'ss_space_heating': 0.0,
'is_space_heating': 0.0
}
# ---------------------------------------------------------------------------------------------------------------------
# General change in fuel consumption for specific enduses
# ---------------------------------------------------------------------------------------------------------------------
# With these assumptions, general efficiency gain (across all fueltypes) can be defined
# for specific enduses. This may be e.g. due to general efficiency gains or anticipated increases in demand.
# NTH: Specific hanges per fueltype (not across al fueltesp)
#
# Change in fuel until the simulation end year (if no change set to 1, if e.g. 10% decrease change to 0.9)
# ---------------------------------------------------------------------------------------------------------------------
assumptions['enduse_overall_change_ey'] = {
# Lighting: E.g. how much floor area / % (social change - how much floor area is lighted (smart monitoring)) (smart-lighting)
# Submodel Residential
'rs_model': {
'rs_space_heating': 1,
'rs_water_heating': 1,
'rs_lighting': 1,
'rs_cooking': 1,
'rs_cold': 1,
'rs_wet': 1,
'rs_consumer_electronics': 1,
'rs_home_computing': 1
},
# Submodel Service
'ss_model': {
'ss_catering': 1,
'ss_computing': 1,
'ss_cooling_ventilation': 1,
'ss_space_heating': 1,
'ss_water_heating': 1,
'ss_lighting': 1,
'ss_other_gas': 1,
'ss_other_electricity': 1
},
# Submodel Industry
'is_model': {
'is_high_temp_process': 1,
'is_low_temp_process': 1,
'is_drying_separation': 1,
'is_motors': 1,
'is_compressed_air': 1,
'is_lighting': 1,
'is_space_heating': 1,
'is_other': 1,
'is_refrigeration': 1
}
}
# Specific diffusion information for the diffusion of enduses
assumptions['other_enduse_mode_info'] = {
'diff_method': 'linear', # sigmoid or linear
'sigmoid': {
'sig_midpoint': 0,
'sig_steeppness': 1
}
}
# ============================================================
# Technologies & efficiencies
# ============================================================
assumptions['technology_list'] = {}
# Load all technologies
assumptions['technologies'], assumptions['technology_list'] = read_data.read_technologies(
data['paths']['path_technologies'],
data['lu_fueltype'])
# Share of installed heat pumps for every fueltype (ASHP to GSHP) (0.7 e.g. 0.7 ASHP and 0.3 GSHP)
split_heat_pump_ASHP_GSHP = 0.5
# --Assumption how much of technological efficiency is reached
efficiency_achieving_factor = 1.0
# --Heat pumps
assumptions['installed_heat_pump'] = technologies_related.generate_ashp_gshp_split(
split_heat_pump_ASHP_GSHP,
data)
# Add heat pumps to technologies #SHARK
assumptions['technologies'], assumptions['technology_list']['tech_heating_temp_dep'], assumptions['heat_pumps'] = technologies_related.generate_heat_pump_from_split(
data,
[],
assumptions['technologies'],
assumptions['installed_heat_pump']
)
# --Hybrid technologies
assumptions['technologies'], assumptions['technology_list']['tech_heating_hybrid'], assumptions['hybrid_technologies'] = technologies_related.get_all_defined_hybrid_technologies(
assumptions,
assumptions['technologies'],
hybrid_cutoff_temp_low=2, #TODO :DEFINE PARAMETER
hybrid_cutoff_temp_high=7)
# ----------
# Enduse definition list
# ----------
assumptions['enduse_space_heating'] = ['rs_space_heating', 'rs_space_heating', 'is_space_heating']
assumptions['enduse_space_cooling'] = ['rs_space_cooling', 'ss_space_cooling', 'is_space_cooling']
assumptions['technology_list']['enduse_water_heating'] = ['rs_water_heating', 'ss_water_heating']
# Helper function
assumptions['technologies'] = helpers.helper_set_same_eff_all_tech(
assumptions['technologies'],
efficiency_achieving_factor
)
# ============================================================
# Fuel Stock Definition (necessary to define before model run)
# Provide for every fueltype of an enduse the share of fuel which is used by technologies
| |
<filename>ivadomed/evaluation.py
import nibabel as nib
import numpy as np
import pandas as pd
from loguru import logger
from scipy.ndimage import label, generate_binary_structure
from tqdm import tqdm
from pathlib import Path
from ivadomed import inference as imed_inference
from ivadomed import metrics as imed_metrics
from ivadomed import postprocessing as imed_postpro
from ivadomed.loader import utils as imed_loader_utils
# labels of paint_objects method
TP_COLOUR = 1
FP_COLOUR = 2
FN_COLOUR = 3
def evaluate(bids_df, path_output, target_suffix, eval_params):
"""Evaluate predictions from inference step.
Args:
bids_df (BidsDataframe): Object containing dataframe with all BIDS image files and their metadata.
path_output (str): Folder where the output folder "results_eval" is be created.
target_suffix (list): List of suffixes that indicates the target mask(s).
eval_params (dict): Evaluation parameters.
Returns:
pd.Dataframe: results for each image.
"""
path_preds = Path(path_output, 'pred_masks')
logger.info('\nRun Evaluation on {}\n'.format(path_preds))
# OUTPUT RESULT FOLDER
path_results = Path(path_output, 'results_eval')
if not path_results.is_dir():
path_results.mkdir(parents=True)
# INIT DATA FRAME
df_results = pd.DataFrame()
# LIST PREDS
subj_acq_lst = [f.name.split('_pred')[0] for f in path_preds.iterdir() if f.name.endswith('_pred.nii.gz')]
# Get all derivatives filenames
all_deriv = bids_df.get_deriv_fnames()
# LOOP ACROSS PREDS
for subj_acq in tqdm(subj_acq_lst, desc="Evaluation"):
# Fnames of pred and ground-truth
fname_pred = path_preds.joinpath(subj_acq + '_pred.nii.gz')
derivatives = bids_df.df[bids_df.df['filename']
.str.contains('|'.join(bids_df.get_derivatives(subj_acq, all_deriv)))]['path'].to_list()
# Ordering ground-truth the same as target_suffix
fname_gt = [None] * len(target_suffix)
for deriv in derivatives:
for idx, suffix in enumerate(target_suffix):
if suffix in deriv:
fname_gt[idx] = deriv
# Get filename extension of first ground-truth before updating path to NifTI
extension = imed_loader_utils.get_file_extension(fname_gt[0])
# Check fname_gt extentions and update paths if not NifTI
fname_gt = [imed_loader_utils.update_filename_to_nifti(fname) for fname in fname_gt]
# Uncertainty
data_uncertainty = None
# 3D evaluation
nib_pred = nib.load(fname_pred)
data_pred = nib_pred.get_fdata()
h, w, d = data_pred.shape[:3]
n_classes = len(fname_gt)
data_gt = np.zeros((h, w, d, n_classes))
for idx, file in enumerate(fname_gt):
if Path(file).exists():
data_gt[..., idx] = nib.load(file).get_fdata()
else:
data_gt[..., idx] = np.zeros((h, w, d), dtype='u1')
eval = Evaluation3DMetrics(data_pred=data_pred,
data_gt=data_gt,
dim_lst=nib_pred.header['pixdim'][1:4],
params=eval_params)
results_pred, data_painted = eval.run_eval()
# SAVE PAINTED DATA, TP FP FN
fname_paint = str(fname_pred).split('.nii.gz')[0] + '_painted.nii.gz'
nib_painted = nib.Nifti1Image(data_painted, nib_pred.affine)
nib.save(nib_painted, fname_paint)
# For Microscopy PNG/TIF files (TODO: implement OMETIFF behavior)
if "nii" not in extension:
painted_list = imed_inference.split_classes(nib_painted)
imed_inference.pred_to_png(painted_list,
target_suffix,
str(path_preds.joinpath(subj_acq)),
suffix="_painted")
# SAVE RESULTS FOR THIS PRED
results_pred['image_id'] = subj_acq
df_results = df_results.append(results_pred, ignore_index=True)
df_results = df_results.set_index('image_id')
df_results.to_csv(str(path_results.joinpath('evaluation_3Dmetrics.csv')))
logger.info(df_results.head(5))
return df_results
class Evaluation3DMetrics(object):
"""Computes 3D evaluation metrics.
Args:
data_pred (ndarray): Network prediction mask.
data_gt (ndarray): Ground-truth mask.
dim_lst (list): Resolution (mm) along each dimension.
params (dict): Evaluation parameters.
Attributes:
data_pred (ndarray): Network prediction mask.
data_gt (ndarray): Ground-truth mask.
n_classes (int): Number of classes.
px (float): Resolution (mm) along the first axis.
py (float): Resolution (mm) along the second axis.
pz (float): Resolution (mm) along the third axis.
bin_struct (ndarray): Binary structure.
size_min (int): Minimum size of objects. Objects that are smaller than this limit can be removed if
"removeSmall" is in params.
overlap_vox (int): A prediction and ground-truth are considered as overlapping if they overlap for at least this
amount of voxels.
overlap_ratio (float): A prediction and ground-truth are considered as overlapping if they overlap for at least
this portion of their volumes.
data_pred_label (ndarray): Network prediction mask that is labeled, ie each object is filled with a different
value.
data_gt_label (ndarray): Ground-truth mask that is labeled, ie each object is filled with a different
value.
n_pred (int): Number of objects in the network prediction mask.
n_gt (int): Number of objects in the ground-truth mask.
data_painted (ndarray): Mask where each predicted object is labeled depending on whether it is a TP or FP.
"""
def __init__(self, data_pred, data_gt, dim_lst, params=None):
if params is None:
params = {}
self.data_pred = data_pred
if len(self.data_pred.shape) == 3:
self.data_pred = np.expand_dims(self.data_pred, -1)
self.data_gt = data_gt
if len(self.data_gt.shape) == 3:
self.data_gt = np.expand_dims(self.data_gt, -1)
h, w, d, self.n_classes = self.data_gt.shape
self.px, self.py, self.pz = dim_lst
self.bin_struct = generate_binary_structure(3, 2) # 18-connectivity
self.postprocessing_dict = {}
self.size_min = 0
if "target_size" in params:
self.size_rng_lst, self.size_suffix_lst = \
self._get_size_ranges(thr_lst=params["target_size"]["thr"],
unit=params["target_size"]["unit"])
self.label_size_lst = []
self.data_gt_per_size = np.zeros(self.data_gt.shape)
self.data_pred_per_size = np.zeros(self.data_gt.shape)
for idx in range(self.n_classes):
self.data_gt_per_size[..., idx] = self.label_per_size(self.data_gt[..., idx])
label_gt_size_lst = list(set(self.data_gt_per_size[np.nonzero(self.data_gt_per_size)]))
self.data_pred_per_size[..., idx] = self.label_per_size(self.data_pred[..., idx])
label_pred_size_lst = list(set(self.data_pred_per_size[np.nonzero(self.data_pred_per_size)]))
self.label_size_lst.append([label_gt_size_lst + label_pred_size_lst,
['gt'] * len(label_gt_size_lst) + ['pred'] * len(label_pred_size_lst)])
else:
self.label_size_lst = [[[], []]] * self.n_classes
# 18-connected components
self.data_pred_label = np.zeros((h, w, d, self.n_classes), dtype='u1')
self.data_gt_label = np.zeros((h, w, d, self.n_classes), dtype='u1')
self.n_pred = [None] * self.n_classes
self.n_gt = [None] * self.n_classes
for idx in range(self.n_classes):
self.data_pred_label[..., idx], self.n_pred[idx] = label(self.data_pred[..., idx],
structure=self.bin_struct)
self.data_gt_label[..., idx], self.n_gt[idx] = label(self.data_gt[..., idx],
structure=self.bin_struct)
# painted data, object wise
self.data_painted = np.copy(self.data_pred)
# overlap_vox is used to define the object-wise TP, FP, FN
if "overlap" in params:
if params["overlap"]["unit"] == 'vox':
self.overlap_vox = params["overlap"]["thr"]
elif params["overlap"]["unit"] == 'mm3':
self.overlap_vox = np.round(params["overlap"]["thr"] / (self.px * self.py * self.pz))
elif params["overlap"]["unit"] == 'ratio': # The ratio of the GT object
self.overlap_ratio = params["overlap"]["thr"]
self.overlap_vox = None
else:
self.overlap_vox = 3
def _get_size_ranges(self, thr_lst, unit):
"""Get size ranges of objects in image.
Args:
thr_lst (list): Bins ranging each size category.
unit (str): Choice between 'vox' for voxel of 'mm3'.
Returns:
list, list: range list, suffix related to range
"""
assert unit in ['vox', 'mm3']
rng_lst, suffix_lst = [], []
for i, thr in enumerate(thr_lst):
if i == 0:
thr_low = self.size_min
else:
thr_low = thr_lst[i - 1] + 1
thr_high = thr
if unit == 'mm3':
thr_low = np.round(thr_low / (self.px * self.py * self.pz))
thr_high = np.round(thr_high / (self.px * self.py * self.pz))
rng_lst.append([thr_low, thr_high])
suffix_lst.append('_' + str(thr_low) + '-' + str(thr_high) + unit)
# last subgroup
thr_low = thr_lst[i] + 1
if unit == 'mm3':
thr_low = np.round(thr_low / (self.px * self.py * self.pz))
thr_high = np.inf
rng_lst.append([thr_low, thr_high])
suffix_lst.append('_' + str(thr_low) + '-INF' + unit)
return rng_lst, suffix_lst
def label_per_size(self, data):
"""Get data with labels corresponding to label size.
Args:
data (ndarray): Input data.
Returns:
ndarray
"""
data_label, n = label(data,
structure=self.bin_struct)
data_out = np.zeros(data.shape)
for idx in range(1, n + 1):
data_idx = (data_label == idx).astype(np.int)
n_nonzero = np.count_nonzero(data_idx)
for idx_size, rng in enumerate(self.size_rng_lst):
if n_nonzero >= rng[0] and n_nonzero <= rng[1]:
data_out[np.nonzero(data_idx)] = idx_size + 1
return data_out.astype(np.int)
def get_vol(self, data):
"""Get volume."""
vol = np.sum(data)
vol *= self.px * self.py * self.pz
return vol
def get_rvd(self):
"""Relative volume difference.
The volume is here defined by the physical volume, in mm3, of the non-zero voxels of a given mask.
Relative volume difference equals the difference between the ground-truth and prediction volumes, divided by the
ground-truth volume.
Optimal value is zero. Negative value indicates over-segmentation, while positive value indicates
under-segmentation.
"""
vol_gt = self.get_vol(self.data_gt)
vol_pred = self.get_vol(self.data_pred)
if vol_gt == 0.0:
return np.nan
rvd = (vol_gt - vol_pred)
rvd /= vol_gt
return rvd
def get_avd(self):
"""Absolute volume difference.
The volume is here defined by the physical volume, in mm3, of the non-zero voxels of a given mask.
Absolute volume difference equals the absolute value of the Relative Volume Difference.
Optimal value is zero.
"""
return abs(self.get_rvd())
def _get_ltp_lfn(self, label_size, class_idx=0):
"""Number of true positive and false negative lesion.
Args:
label_size (int): Size of label.
class_idx (int): Label index. If monolabel 0, else ranges from 0 to number of output channels - 1.
Note1: if two lesion_pred overlap with the current lesion_gt,
then only one detection is counted.
"""
ltp, lfn, n_obj = 0, 0, 0
for idx in range(1, self.n_gt[class_idx] + 1):
data_gt_idx = (self.data_gt_label[..., class_idx] == idx).astype(np.int)
overlap = (data_gt_idx * self.data_pred).astype(np.int)
# if label_size is None, then we look at all object sizes
# we check if the currrent object belongs to the current size range
if label_size is None or \
np.max(self.data_gt_per_size[..., class_idx][np.nonzero(data_gt_idx)]) == label_size:
if self.overlap_vox is None:
overlap_vox = np.round(np.count_nonzero(data_gt_idx) * self.overlap_ratio)
else:
overlap_vox = self.overlap_vox
if np.count_nonzero(overlap) >= overlap_vox:
ltp += 1
else:
lfn += 1
if label_size is None: # painting is done while considering all objects
self.data_painted[..., class_idx][self.data_gt_label[..., class_idx] == idx] = FN_COLOUR
n_obj += 1
return ltp, | |
import calendar
import datetime
import numpy as np
import pandas as pd
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
assert_index_equal)
from numpy.testing import assert_allclose
import pytest
from pvlib.location import Location
from pvlib import solarposition, spa
from conftest import (requires_ephem, needs_pandas_0_17,
requires_spa_c, requires_numba)
# setup times and locations to be tested.
times = pd.date_range(start=datetime.datetime(2014,6,24),
end=datetime.datetime(2014,6,26), freq='15Min')
tus = Location(32.2, -111, 'US/Arizona', 700) # no DST issues possible
# In 2003, DST in US was from April 6 to October 26
golden_mst = Location(39.742476, -105.1786, 'MST', 1830.14) # no DST issues possible
golden = Location(39.742476, -105.1786, 'America/Denver', 1830.14) # DST issues possible
times_localized = times.tz_localize(tus.tz)
tol = 5
@pytest.fixture()
def expected_solpos():
return pd.DataFrame({'elevation': 39.872046,
'apparent_zenith': 50.111622,
'azimuth': 194.340241,
'apparent_elevation': 39.888378},
index=['2003-10-17T12:30:30Z'])
@pytest.fixture()
def expected_solpos_multi():
return pd.DataFrame({'elevation': [39.872046, 39.505196],
'apparent_zenith': [50.111622, 50.478260],
'azimuth': [194.340241, 194.311132],
'apparent_elevation': [39.888378, 39.521740]},
index=[['2003-10-17T12:30:30Z', '2003-10-18T12:30:30Z']])
# the physical tests are run at the same time as the NREL SPA test.
# pyephem reproduces the NREL result to 2 decimal places.
# this doesn't mean that one code is better than the other.
@requires_spa_c
def test_spa_c_physical(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,12,30,30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.spa_c(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@requires_spa_c
def test_spa_c_physical_dst(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.spa_c(times, golden.latitude,
golden.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_spa_python_numpy_physical(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,12,30,30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.spa_python(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numpy')
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_spa_python_numpy_physical_dst(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.spa_python(times, golden.latitude,
golden.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numpy')
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@requires_numba
def test_spa_python_numba_physical(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,12,30,30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.spa_python(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numba', numthreads=1)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@requires_numba
def test_spa_python_numba_physical_dst(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.spa_python(times, golden.latitude,
golden.longitude, pressure=82000,
temperature=11, delta_t=67,
atmos_refract=0.5667,
how='numba', numthreads=1)
expected_solpos.index = times
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@needs_pandas_0_17
def test_get_sun_rise_set_transit():
south = Location(-35.0, 0.0, tz='UTC')
times = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 0),
datetime.datetime(2004, 12, 4, 0)]
).tz_localize('UTC')
sunrise = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 7, 8, 15),
datetime.datetime(2004, 12, 4, 4, 38, 57)]
).tz_localize('UTC').tolist()
sunset = pd.DatetimeIndex([datetime.datetime(1996, 7, 5, 17, 1, 4),
datetime.datetime(2004, 12, 4, 19, 2, 2)]
).tz_localize('UTC').tolist()
result = solarposition.get_sun_rise_set_transit(times, south.latitude,
south.longitude,
delta_t=64.0)
frame = pd.DataFrame({'sunrise':sunrise, 'sunset':sunset}, index=times)
result_rounded = pd.DataFrame(index=result.index)
# need to iterate because to_datetime does not accept 2D data
# the rounding fails on pandas < 0.17
for col, data in result.iteritems():
result_rounded[col] = pd.to_datetime(
np.floor(data.values.astype(np.int64) / 1e9)*1e9, utc=True)
del result_rounded['transit']
assert_frame_equal(frame, result_rounded)
# tests from USNO
# Golden
golden = Location(39.0, -105.0, tz='MST')
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 2),
datetime.datetime(2015, 8, 2),]
).tz_localize('MST')
sunrise = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 7, 19, 2),
datetime.datetime(2015, 8, 2, 5, 1, 26)
]).tz_localize('MST').tolist()
sunset = pd.DatetimeIndex([datetime.datetime(2015, 1, 2, 16, 49, 10),
datetime.datetime(2015, 8, 2, 19, 11, 31)
]).tz_localize('MST').tolist()
result = solarposition.get_sun_rise_set_transit(times, golden.latitude,
golden.longitude,
delta_t=64.0)
frame = pd.DataFrame({'sunrise':sunrise, 'sunset':sunset}, index=times)
result_rounded = pd.DataFrame(index=result.index)
# need to iterate because to_datetime does not accept 2D data
# the rounding fails on pandas < 0.17
for col, data in result.iteritems():
result_rounded[col] = (pd.to_datetime(
np.floor(data.values.astype(np.int64) / 1e9)*1e9, utc=True)
.tz_convert('MST'))
del result_rounded['transit']
assert_frame_equal(frame, result_rounded)
@requires_ephem
def test_pyephem_physical(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,12,30,30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.pyephem(times, golden_mst.latitude,
golden_mst.longitude, pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos.round(2),
ephem_data[expected_solpos.columns].round(2))
@requires_ephem
def test_pyephem_physical_dst(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30), periods=1,
freq='D', tz=golden.tz)
ephem_data = solarposition.pyephem(times, golden.latitude,
golden.longitude, pressure=82000,
temperature=11)
expected_solpos.index = times
assert_frame_equal(expected_solpos.round(2),
ephem_data[expected_solpos.columns].round(2))
@requires_ephem
def test_calc_time():
import pytz
import math
# validation from USNO solar position calculator online
epoch = datetime.datetime(1970,1,1)
epoch_dt = pytz.utc.localize(epoch)
loc = tus
loc.pressure = 0
actual_time = pytz.timezone(loc.tz).localize(
datetime.datetime(2014, 10, 10, 8, 30))
lb = pytz.timezone(loc.tz).localize(datetime.datetime(2014, 10, 10, tol))
ub = pytz.timezone(loc.tz).localize(datetime.datetime(2014, 10, 10, 10))
alt = solarposition.calc_time(lb, ub, loc.latitude, loc.longitude,
'alt', math.radians(24.7))
az = solarposition.calc_time(lb, ub, loc.latitude, loc.longitude,
'az', math.radians(116.3))
actual_timestamp = (actual_time - epoch_dt).total_seconds()
assert_allclose((alt.replace(second=0, microsecond=0) -
epoch_dt).total_seconds(), actual_timestamp)
assert_allclose((az.replace(second=0, microsecond=0) -
epoch_dt).total_seconds(), actual_timestamp)
@requires_ephem
def test_earthsun_distance():
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D')
distance = solarposition.pyephem_earthsun_distance(times).values[0]
assert_allclose(1, distance, atol=0.1)
def test_ephemeris_physical(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,12,30,30),
periods=1, freq='D', tz=golden_mst.tz)
ephem_data = solarposition.ephemeris(times, golden_mst.latitude,
golden_mst.longitude,
pressure=82000,
temperature=11)
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_ephemeris_physical_dst(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.ephemeris(times, golden.latitude,
golden.longitude, pressure=82000,
temperature=11)
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_get_solarposition_error():
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
with pytest.raises(ValueError):
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
pressure=82000,
temperature=11,
method='error this')
@pytest.mark.parametrize(
"pressure, expected", [
(82000, expected_solpos()),
(90000, pd.DataFrame(
np.array([[ 39.88997, 50.11003, 194.34024, 39.87205, 14.64151,
50.12795]]),
columns=['apparent_elevation', 'apparent_zenith', 'azimuth', 'elevation',
'equation_of_time', 'zenith'],
index=expected_solpos().index))
])
def test_get_solarposition_pressure(pressure, expected):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
pressure=pressure,
temperature=11)
this_expected = expected.copy()
this_expected.index = times
this_expected = np.round(this_expected, 5)
ephem_data = np.round(ephem_data, 5)
assert_frame_equal(this_expected, ephem_data[this_expected.columns])
@pytest.mark.parametrize(
"altitude, expected", [
(golden.altitude, expected_solpos()),
(2000, pd.DataFrame(
np.array([[ 39.88788, 50.11212, 194.34024, 39.87205, 14.64151,
50.12795]]),
columns=['apparent_elevation', 'apparent_zenith', 'azimuth', 'elevation',
'equation_of_time', 'zenith'],
index=expected_solpos().index))
])
def test_get_solarposition_altitude(altitude, expected):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
altitude=altitude,
temperature=11)
this_expected = expected.copy()
this_expected.index = times
this_expected = np.round(this_expected, 5)
ephem_data = np.round(ephem_data, 5)
assert_frame_equal(this_expected, ephem_data[this_expected.columns])
@pytest.mark.parametrize(
"delta_t, method, expected", [
(None, 'nrel_numpy', expected_solpos_multi()),
(67.0, 'nrel_numpy', expected_solpos_multi()),
pytest.mark.xfail(raises=ValueError, reason = 'spa.calculate_deltat not implemented for numba yet')
((None, 'nrel_numba', expected_solpos_multi())),
(67.0, 'nrel_numba', expected_solpos_multi())
])
def test_get_solarposition_deltat(delta_t, method, expected):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=2, freq='D', tz=golden.tz)
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
pressure=82000,
delta_t=delta_t,
temperature=11,
method=method)
this_expected = expected.copy()
this_expected.index = times
this_expected = np.round(this_expected, 5)
ephem_data = np.round(ephem_data, 5)
assert_frame_equal(this_expected, ephem_data[this_expected.columns])
def test_get_solarposition_no_kwargs(expected_solpos):
times = pd.date_range(datetime.datetime(2003,10,17,13,30,30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude)
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
@requires_ephem
def test_get_solarposition_method_pyephem(expected_solpos):
times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30),
periods=1, freq='D', tz=golden.tz)
ephem_data = solarposition.get_solarposition(times, golden.latitude,
golden.longitude,
method='pyephem')
expected_solpos.index = times
expected_solpos = np.round(expected_solpos, 2)
ephem_data = np.round(ephem_data, 2)
assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_nrel_earthsun_distance():
times = pd.DatetimeIndex([datetime.datetime(2015, 1, 2),
datetime.datetime(2015, 8, 2),]
).tz_localize('MST')
result = solarposition.nrel_earthsun_distance(times, delta_t=64.0)
expected = pd.Series(np.array([0.983289204601, 1.01486146446]),
index=times)
assert_series_equal(expected, result)
times = datetime.datetime(2015, 1, 2)
result = solarposition.nrel_earthsun_distance(times, delta_t=64.0)
expected = pd.Series(np.array([0.983289204601]),
index=pd.DatetimeIndex([times, ]))
assert_series_equal(expected, result)
def test_equation_of_time():
times = pd.DatetimeIndex(start="1/1/2015 0:00", end="12/31/2015 23:00",
freq="H")
output = solarposition.spa_python(times, 37.8, -122.25, 100)
eot = output['equation_of_time']
eot_rng = eot.max() - eot.min() # range of values, around 30 minutes
eot_1 = solarposition.equation_of_time_spencer71(times.dayofyear)
eot_2 = solarposition.equation_of_time_pvcdrom(times.dayofyear)
assert np.allclose(eot_1 / eot_rng, eot / eot_rng, atol=0.3) # spencer
assert np.allclose(eot_2 / eot_rng, eot / eot_rng, atol=0.4) # pvcdrom
def test_declination():
times = pd.DatetimeIndex(start="1/1/2015 0:00", end="12/31/2015 23:00",
freq="H")
atmos_refract = 0.5667
delta_t = spa.calculate_deltat(times.year, times.month)
unixtime = np.array([calendar.timegm(t.timetuple()) for t in times])
_, _, declination = spa.solar_position(unixtime, 37.8, -122.25, 100,
1013.25, 25, delta_t, atmos_refract,
sst=True)
declination = np.deg2rad(declination)
declination_rng = declination.max() - declination.min()
declination_1 = solarposition.declination_cooper69(times.dayofyear)
declination_2 = solarposition.declination_spencer71(times.dayofyear)
a, b = declination_1 / declination_rng, declination / declination_rng
assert np.allclose(a, b, atol=0.03) # cooper
a, b = declination_2 / declination_rng, declination / declination_rng
assert np.allclose(a, b, atol=0.02) # spencer
def test_analytical_zenith():
times = pd.DatetimeIndex(start="1/1/2015 0:00", end="12/31/2015 23:00",
freq="H").tz_localize('Etc/GMT+8')
lat, lon = 37.8, -122.25
lat_rad = np.deg2rad(lat)
output = solarposition.spa_python(times, lat, lon, 100)
solar_zenith = np.deg2rad(output['zenith']) # spa
# spencer
eot = solarposition.equation_of_time_spencer71(times.dayofyear)
hour_angle = np.deg2rad(solarposition.hour_angle(times, lon, eot))
decl = solarposition.declination_spencer71(times.dayofyear)
zenith_1 = solarposition.solar_zenith_analytical(lat_rad, hour_angle, decl)
# pvcdrom and cooper
eot = solarposition.equation_of_time_pvcdrom(times.dayofyear)
hour_angle = np.deg2rad(solarposition.hour_angle(times, lon, eot))
decl = solarposition.declination_cooper69(times.dayofyear)
zenith_2 = solarposition.solar_zenith_analytical(lat_rad, hour_angle, decl)
assert np.allclose(zenith_1, solar_zenith, atol=0.015)
assert np.allclose(zenith_2, solar_zenith, atol=0.025)
def test_analytical_azimuth():
times = pd.DatetimeIndex(start="1/1/2015 0:00", end="12/31/2015 23:00",
freq="H").tz_localize('Etc/GMT+8')
lat, lon = 37.8, -122.25
lat_rad = np.deg2rad(lat)
output = solarposition.spa_python(times, lat, lon, 100)
solar_azimuth = np.deg2rad(output['azimuth']) # spa
solar_zenith = np.deg2rad(output['zenith'])
# spencer
eot = solarposition.equation_of_time_spencer71(times.dayofyear)
hour_angle = np.deg2rad(solarposition.hour_angle(times, lon, eot))
decl = solarposition.declination_spencer71(times.dayofyear)
zenith = solarposition.solar_zenith_analytical(lat_rad, hour_angle, decl)
azimuth_1 = solarposition.solar_azimuth_analytical(lat_rad, hour_angle,
decl, zenith)
# pvcdrom and cooper
eot = solarposition.equation_of_time_pvcdrom(times.dayofyear)
hour_angle = np.deg2rad(solarposition.hour_angle(times, lon, eot))
decl = solarposition.declination_cooper69(times.dayofyear)
zenith = solarposition.solar_zenith_analytical(lat_rad, hour_angle, decl)
azimuth_2 = solarposition.solar_azimuth_analytical(lat_rad, hour_angle,
decl, zenith)
idx = np.where(solar_zenith < np.pi/2)
assert np.allclose(azimuth_1[idx], solar_azimuth.as_matrix()[idx],
atol=0.01)
assert np.allclose(azimuth_2[idx], solar_azimuth.as_matrix()[idx],
atol=0.017)
# test for NaN values at boundary conditions (PR #431)
test_angles = np.radians(np.array(
[[ 0., -180., -20.],
[ 0., 0., -5.],
[ 0., 0., 0.],
[ 0., 0., 15.],
[ 0., 180., 20.],
[ 30., 0., -20.],
[ 30., 0., -5.],
[ 30., 0., 0.],
[ 30., 180., 5.],
[ 30., | |
# Bottom left diagonal
diagBRRange = [-7, -14, -21, -28, -35, -42, -49] # Bottom right diagonal
diagTLRange = [7, 14, 21, 28, 35, 42, 49] # Top left diagonal
diagTRRange = [9, 18, 27, 36, 45, 54, 63] # Top right diagonal
# Conditions:
# If I'm at or one space from an edge, consider me safe unless I spot an enemy.
# If I spot an enemy first, consider me NOT safe.
# If I spot an ally first, consider me safe.
myProtection = 0
theirProtection = 0
for king in myKings:
for dif in colNegRange:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64:
if king + dif in allTheirPieces:
break
elif king + dif in allMyPieces:
myProtection = myProtection + 1
break
elif colNegRange.index(dif) < 1:
myProtection = myProtection + 1
break
else:
break
for dif in colPosRange:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64:
if king + dif in allTheirPieces:
break
elif king + dif in allMyPieces:
myProtection = myProtection + 1
break
elif colPosRange.index(dif) < 1:
myProtection = myProtection + 1
break
else:
break
for dif in rowNegRange:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64 and math.floor(king + dif / 8) == math.floor(king / 8):
if king + dif in allTheirPieces:
break
elif king + dif in allMyPieces:
myProtection = myProtection + 1
break
elif rowNegRange.index(dif) < 1:
myProtection = myProtection + 1
break
else:
break
for dif in rowPosRange:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64 and math.floor(king + dif / 8) == math.floor(king / 8):
if king + dif in allTheirPieces:
break
elif king + dif in allMyPieces:
myProtection = myProtection + 1
break
elif rowPosRange.index(dif) < 1:
myProtection = myProtection + 1
break
else:
break
for dif in diagTLRange:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64 and abs(math.floor(king + dif / 8) - math.floor(king / 8)) == diagTLRange.index(dif) + 1:
if king + dif in allTheirPieces:
break
elif king + dif in allMyPieces:
myProtection = myProtection + 1
break
elif diagTLRange.index(dif) < 1:
myProtection = myProtection + 1
break
else:
break
for dif in diagTRRange:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64 and abs(math.floor(king + dif / 8) - math.floor(king / 8)) == diagTRRange.index(dif) + 1:
if king + dif in allTheirPieces:
break
elif king + dif in allMyPieces:
myProtection = myProtection + 1
break
elif diagTRRange.index(dif) < 1:
myProtection = myProtection + 1
break
else:
break
for dif in diagBLRange:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64 and abs(math.floor(king + dif / 8) - math.floor(king / 8)) == diagBLRange.index(dif) + 1:
if king + dif in allTheirPieces:
break
elif king + dif in allMyPieces:
myProtection = myProtection + 1
break
elif diagBLRange.index(dif) < 1:
myProtection = myProtection + 1
break
else:
break
for dif in diagBRRange:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64 and abs(math.floor(king + dif / 8) - math.floor(king / 8)) == diagBRRange.index(dif) + 1:
if king + dif in allTheirPieces:
break
elif king + dif in allMyPieces:
myProtection = myProtection + 1
break
elif diagBRRange.index(dif) < 1:
myProtection = myProtection + 1
break
else:
break
for king in theirKings:
for dif in colNegRange:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64:
if king + dif in allMyPieces:
break
elif king + dif in allTheirPieces:
theirProtection = theirProtection + 1
break
elif colNegRange.index(dif) < 1:
theirProtection = theirProtection + 1
break
else:
break
for dif in colPosRange:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64:
if king + dif in allMyPieces:
break
elif king + dif in allTheirPieces:
theirProtection = theirProtection + 1
break
elif colPosRange.index(dif) < 1:
theirProtection = theirProtection + 1
break
else:
break
for dif in rowNegRange:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64 and math.floor(king + dif / 8) == math.floor(king / 8):
if king + dif in allMyPieces:
break
elif king + dif in allTheirPieces:
theirProtection = theirProtection + 1
break
elif rowNegRange.index(dif) < 1:
theirProtection = theirProtection + 1
break
else:
break
for dif in rowPosRange:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64 and math.floor(king + dif / 8) == math.floor(king / 8):
if king + dif in allMyPieces:
break
elif king + dif in allTheirPieces:
theirProtection = theirProtection + 1
break
elif rowPosRange.index(dif) < 1:
theirProtection = theirProtection + 1
break
else:
break
for dif in diagTLRange:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64 and abs(math.floor(king + dif / 8) - math.floor(king / 8)) == diagTLRange.index(dif) + 1:
if king + dif in allMyPieces:
break
elif king + dif in allTheirPieces:
theirProtection = theirProtection + 1
break
elif diagTLRange.index(dif) < 1:
theirProtection = theirProtection + 1
break
else:
break
for dif in diagTRRange:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64 and abs(math.floor(king + dif / 8) - math.floor(king / 8)) == diagTRRange.index(dif) + 1:
if king + dif in allMyPieces:
break
elif king + dif in allTheirPieces:
theirProtection = theirProtection + 1
break
elif diagTRRange.index(dif) < 1:
theirProtection = theirProtection + 1
break
else:
break
for dif in diagBLRange:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64 and abs(math.floor(king + dif / 8) - math.floor(king / 8)) == diagBLRange.index(dif) + 1:
if king + dif in allMyPieces:
break
elif king + dif in allTheirPieces:
theirProtection = theirProtection + 1
break
elif diagBLRange.index(dif) < 1:
theirProtection = theirProtection + 1
break
else:
break
for dif in diagBRRange:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64 and abs(math.floor(king + dif / 8) - math.floor(king / 8)) == diagBRRange.index(dif) + 1:
if king + dif in allMyPieces:
break
elif king + dif in allTheirPieces:
theirProtection = theirProtection + 1
break
elif diagBRRange.index(dif) < 1:
theirProtection = theirProtection + 1
break
else:
break
protectionValue = myProtection - theirProtection
kingSafetyVal = protectionValue + attackerValue + defenderVal + pawnShieldVal + escapeVal
# Finalize weights, some weights have caps.
# Activity caps at +/- 1.5
if activityVal > 1.5:
activityVal = 1.5
elif activityVal < -1.5:
activityVal = -1.5
# King safety caps at +/- 1 or 2
if round(kingSafetyVal) > 2:
kingSafetyVal = 2
elif round(kingSafetyVal) < -2:
kingSafetyVal = -1
elif round(kingSafetyVal) == 0:
if kingSafetyVal > 0:
kingSafetyVal = 1
else:
kingSafetyVal = -1
else:
kingSafetyVal = round(kingSafetyVal)
totalScore | |
#!/usr/bin/env python3
import board
import copy
DEBUG = False
class Empty:
def __str__(self):
return "#"
class Player:
def __init__(self, name, email):
self.name = name
assert (len(str(name)) >= 1), "name must be one or more character"
self.email = email
# Unit
# name: One or more character
# symbol: One single character
# speed: speed 10 is to move once per clock tick and 1 is to move once every 10th tick
# attack: damage per attack
# health: total amount of health
class UnitType:
NONE = 0;
NORTH = 1;
EAST = 2;
SOUTH = 3;
WEST = 4;
INITIAL = 0;
MOVING = 1;
NOP = 2;
def __init__(self, name, symbol, attack, health, energy):
self.name = name
self.type_name = name
# XXX this is a rather not so nice way of preserving the original type name whe this object is copied and turned into a unit
assert (len(str(name)) >= 1), "name must be one or more character"
self.symbol = symbol
assert (len(str(symbol)) == 1), "symbol must be only one character"
self.attack = attack
assert isinstance(attack, int), "attack must be an integer value"
assert ((attack >= 1) and (attack <= 10)), "attack must be a value from 1 to 10"
self.health = health
assert isinstance(health, int), "health must be an integer value"
assert ((health >= 1) and (health <= 10)), "health must be a value from 1 to 10"
self.energy = energy
assert isinstance(energy, int), "health must be an integer value"
assert ((energy >= 1) and (energy <= 100)), "energy must be a value from 1 to 100"
self.state = UnitType.INITIAL
self.direction = UnitType.NONE
self.destroyed = False
self.on_board = False
self.seen_by = []
self.player = None
def move(self, direction):
self.state = UnitType.MOVING
self.direction = direction
def setName(self, name):
self.name = name
def setBoard(self, board, board_max_x, board_max_y):
self.board = board
self.board_max_x = board_max_x
self.board_max_y = board_max_y
self.on_board = True
def setCoords(self, x, y):
self.x = x
self.y = y
def setHealth(self, health):
self.health = health
def setEnergy(self, energy):
self.energy = energy
def setDestroyed(self, destroyed):
self.destroyed = destroyed
def setOnBoard(self, on_board):
self.on_board = on_board
def setPlayer(self, player):
self.player = player
assert (type(player) is Player), "player object must be provided"
def incomingAttack(self, attack):
if DEBUG:
print(f"incoingAttack: {self.name} being attacked")
self.health = self.health - attack
if self.health <= 0:
self.destroyed = True
# calculates attacks and marks units as DESTROYED, creates arrays of units in squares where multiple units are
# trying to move simultaneously into the same square
def preCommit(self):
if self.state == UnitType.INITIAL:
# make sure that location on the board is empty
assert type(self.board[self.x, self.y]) is Empty, f"can't add {self.name} to board at ({self.x},{self.y})"
elif self.state == UnitType.MOVING:
dest_x = self.x
dest_y = self.y
if self.direction == UnitType.NORTH:
dest_y = self.y - 1
self.direction = UnitType.NONE
if dest_y < 0:
self.y = 0
self.state = UnitType.NOP
return
elif self.direction == UnitType.EAST:
dest_x = self.x + 1
self.direction = UnitType.NONE
if dest_x > self.board_max_x - 1:
self.x = self.board_max_x - 1
self.state = UnitType.NOP
return
elif self.direction == UnitType.SOUTH:
dest_y = self.y + 1
self.direction = UnitType.NONE
if dest_y > self.board_max_y - 1:
self.y = self.board_max_y - 1
self.state = UnitType.NOP
return
elif self.direction == UnitType.WEST:
dest_x = self.x - 1
self.direction = UnitType.NONE
if dest_x < 0:
self.x = 0
self.state = UnitType.NOP
return
else:
self.state = UnitType.NOP
return
if type(self.board[dest_x, dest_y]) is Empty:
energy = self.energy - (self.energy // 100 + 1)
# only act if the unit has enough energy
if energy >= 0:
self.energy = energy
self.board[self.x, self.y] = Empty()
self.setCoords(dest_x, dest_y)
self.board[self.x, self.y] = [ self ]
if DEBUG:
print(f"preCommit: {self.name} move to [{self.x},{self.y}]")
elif type(self.board[dest_x, dest_y]) is list:
energy = self.energy - (self.energy // 100 + 1)
# only act if the unit has enough energy
if energy >= 0:
self.energy = energy
self.board[self.x, self.y] = Empty()
self.setCoords(dest_x, dest_y)
self.board[dest_x, dest_y].append(self)
if DEBUG:
print(f"preCommit: {self.name} added to list in [{self.x},{self.y}]")
elif type(self.board[dest_x, dest_y]) is UnitType:
energy = self.energy - self.attack
# only act if the unit has enough energy
if energy >= 0:
self.energy = energy
target = self.board[dest_x, dest_y]
target.incomingAttack(self.attack)
# populuate seen_by
self.seen_by.append(target)
target.seen_by.append(self)
if DEBUG:
print(f"preCommit: {self.name} attack {target.name}")
self.state = UnitType.NOP
return
else:
pass
# processes all arrays created in the precommit phase, by calculating attacks and marking units DESTROYED
# removes all DESTROYED units from the board
def commit(self):
if self.state == UnitType.INITIAL:
# make sure that location on the board is empty
assert type(self.board[self.x, self.y]) is Empty, f"can't add {name} to board at ({x},{y})"
# add the unit to the board
self.board[self.x, self.y] = self
self.state = UnitType.NOP
elif self.state == UnitType.MOVING:
assert not(self.state == UnitType.MOVING), "During commit, no unit should be in the MOVING state"
else:
if type(self.board[self.x, self.y]) is list:
unit_count = len(self.board[self.x, self.y])
if DEBUG:
print(f"{self.name} commit process list in [{self.x},{self.y}]: {self.board[self.x, self.y]}")
while unit_count > 1:
if DEBUG:
print(f"{self.name} commit process {unit_count} units in square [{self.x},{self.y}]")
for unit in self.board[self.x, self.y]:
for target in self.board[self.x, self.y]:
if DEBUG:
print(f"{self.name} commit processing {unit.name} -> {target.name}")
if not(unit is target):
energy = unit.energy - unit.attack
if energy >= 0:
unit.energy = energy
if DEBUG:
print(f"commit: {target.name} attack {unit.name}")
target.incomingAttack(unit.attack)
# populuate seen_by
unit.seen_by.append(target)
target.seen_by.append(unit)
for unit in self.board[self.x, self.y]:
if unit.destroyed:
unit_count = unit_count - 1
for unit in self.board[self.x, self.y]:
if unit.destroyed == False:
self.board[self.x, self.y] = unit
if DEBUG:
print(f"{self.name} commit add unit to square [{self.x},{self.y}]")
else:
unit.on_board = False
if unit_count == 0:
self.board[self.x, self.y] = Empty()
else:
if self.destroyed:
self.board[self.x, self.y] = Empty()
self.on_board = False
if DEBUG:
print(f"{self.name} commit removing unit from square [{self.x},{self.y}]")
def dump(self):
result = f"player: \"{self.player.name}\", type: \"{self.type_name}\", name: \"{self.name}\", symbol: \"{self.symbol}\", attack: \"{self.attack}\", health: \"{self.health}\", energy: \"{self.energy}\", x: {self.x}, y: {self.y}, state: {self.state}, direction: {self.direction}, destroyed: {self.destroyed}, on_board: {self.on_board}"
if DEBUG:
print(result)
return(result)
def __str__(self):
return(self.symbol)
# Board
# size_x: board size x
# size_y: board size y
class Board:
def __init__(self, size_x, size_y):
self.size_x = size_x
assert isinstance(size_x, int), "size_x must be an integer value"
assert ((size_x >= 2) and (size_x <= 10)), "size_x must be a value from 2 to 10"
self.size_y = size_y
assert isinstance(size_y, int), "size_x must be an integer value"
assert ((size_y >= 2) and (size_y <= 10)), "size_y must be a value from 2 to 10"
self.board = board.Board((size_x, size_y))
for x in range(0, size_x):
for y in range(0, size_y):
self.board[x, y] = Empty()
self.units = []
self.unit_dict = {}
self.types = {}
def add(self, player, x, y, name, unit_type, health = None, energy = None, destroyed = False, on_board = True):
if DEBUG:
print(type(unit_type))
print(type(player))
assert x >= 0 and x < self.size_x and y >= 0 and y < self.size_y, f"cordinates ({x}, {y}) are out of bounds for this board"
# add the unit to a dictionary of types organised by player
if not(player.name in self.types.keys()):
self.types[player.name] = {}
self.types[player.name][unit_type.name] = unit_type
# make a shallow copy of the unit type to create a new unit instance
unit = copy.copy(unit_type)
# reset the unit name
unit.setName(name)
# set the player
unit.setPlayer(player)
# add a ref to the board into the unit + the size
unit.setBoard(self.board, self.size_x, self.size_y)
# keep a copy of the unit coords in the unit
unit.setCoords(x, y)
# if the health value has been supplied, set it
if health != None:
unit.setHealth(health)
# if the energy value has been supplied, set it
if energy != None:
unit.setEnergy(energy)
# mark the unit destroyed if required (needed when loading ongoing games)
unit.setDestroyed(destroyed)
# mark the unit on the board (needed when loading ongoing games)
unit.setOnBoard(on_board)
# set the coordinates
unit.setCoords(x,y)
# add it to the unit list
self.units.append(unit)
# add it to the unit dict
if name in self.unit_dict:
for instance in self.unit_dict[name]:
assert instance.player != player, f"unit {name} already exists for {player.name}"
self.unit_dict[name].append(unit)
else:
self.unit_dict[name] = [unit]
# | |
<gh_stars>10-100
import functools
import json
import sys
import uuid
from dataclasses import dataclass
from datetime import date, datetime
from pathlib import Path
from typing import Dict, Generator, Iterable, List, Optional
import datacube.drivers.postgres._api as postgres_api
import fiona
import shapely.ops
import structlog
from datacube import Datacube
from datacube.drivers.postgres._fields import PgDocField, RangeDocField
from datacube.index import Index
from datacube.model import Dataset, DatasetType, Field, MetadataType, Range
from geoalchemy2 import Geometry, WKBElement
from geoalchemy2.shape import from_shape, to_shape
from psycopg2._range import Range as PgRange
from shapely.geometry import shape
from sqlalchemy import (
BigInteger,
Integer,
SmallInteger,
String,
and_,
bindparam,
case,
column,
func,
literal,
null,
select,
)
from sqlalchemy.dialects import postgresql as postgres
from sqlalchemy.engine import Engine
from sqlalchemy.sql.elements import ClauseElement, Label
from cubedash._utils import ODC_DATASET as DATASET, alchemy_engine, infer_crs
from cubedash.summary._schema import DATASET_SPATIAL, SPATIAL_REF_SYS
_LOG = structlog.get_logger()
_WRS_PATH_ROW = [
Path(__file__).parent.parent / "data" / "WRS2_descending" / "WRS2_descending.shp",
Path(__file__).parent.parent / "data" / "WRS2_ascending" / "WRS2_acsending.shp",
]
class UnsupportedWKTProductCRS(NotImplementedError):
"""We can't, within Postgis, support arbitrary WKT CRSes at the moment."""
def __init__(self, reason: str) -> None:
self.reason = reason
def get_dataset_extent_alchemy_expression(md: MetadataType, default_crs: str = None):
"""
Build an SQLAlchemy expression to get the extent for a dataset.
It's returned as a postgis geometry.
The logic here mirrors the extent() function of datacube.model.Dataset.
"""
doc = _jsonb_doc_expression(md)
if "grid_spatial" not in md.definition["dataset"]:
# Non-spatial product
return None
projection_offset = _projection_doc_offset(md)
if expects_eo3_metadata_type(md):
return func.ST_SetSRID(
case(
[
# If we have geometry, use it as the polygon.
(
doc[["geometry"]] != None,
func.ST_GeomFromGeoJSON(doc[["geometry"]], type_=Geometry),
)
],
# Otherwise construct a polygon from the computed bounds that ODC added on index.
else_=_bounds_polygon(doc, projection_offset),
),
get_dataset_srid_alchemy_expression(md, default_crs),
)
else:
valid_data_offset = projection_offset + ["valid_data"]
return func.ST_SetSRID(
case(
[
# If we have valid_data offset, use it as the polygon.
(
doc[valid_data_offset] != None,
func.ST_GeomFromGeoJSON(doc[valid_data_offset], type_=Geometry),
)
],
# Otherwise construct a polygon from the four corner points.
else_=_bounds_polygon(doc, projection_offset),
),
get_dataset_srid_alchemy_expression(md, default_crs),
type_=Geometry,
)
def expects_eo3_metadata_type(md: MetadataType) -> bool:
"""
Does the given metadata type expect EO3 datasets?
"""
# We don't have a clean way to say that a product expects EO3
measurements_offset = md.definition["dataset"]["measurements"]
# In EO3, the measurements are in ['measurments'],
# In EO1, they are in ['image', 'bands'].
return measurements_offset == ["measurements"]
def _projection_doc_offset(md):
projection_offset = md.definition["dataset"]["grid_spatial"]
return projection_offset
def _jsonb_doc_expression(md):
doc = md.dataset_fields["metadata_doc"].alchemy_expression
return doc
def _bounds_polygon(doc, projection_offset):
geo_ref_points_offset = projection_offset + ["geo_ref_points"]
return func.ST_MakePolygon(
func.ST_MakeLine(
postgres.array(
tuple(
_gis_point(doc, geo_ref_points_offset + [key])
for key in ("ll", "ul", "ur", "lr", "ll")
)
)
),
type_=Geometry,
)
def _size_bytes_field(dt: DatasetType):
md_fields = dt.metadata_type.dataset_fields
if "size_bytes" in md_fields:
return md_fields["size_bytes"].alchemy_expression
return _jsonb_doc_expression(dt.metadata_type)["size_bytes"].astext.cast(BigInteger)
def get_dataset_srid_alchemy_expression(md: MetadataType, default_crs: str = None):
doc = md.dataset_fields["metadata_doc"].alchemy_expression
if "grid_spatial" not in md.definition["dataset"]:
# Non-spatial product
return None
projection_offset = md.definition["dataset"]["grid_spatial"]
if expects_eo3_metadata_type(md):
spatial_ref = doc[["crs"]].astext
else:
# Most have a spatial_reference field we can use directly.
spatial_ref = doc[projection_offset + ["spatial_reference"]].astext
# When datasets have no CRS, optionally use this as default.
default_crs_expression = None
if default_crs:
if not default_crs.lower().startswith(
"epsg:"
) and not default_crs.lower().startswith("esri:"):
# HACK: Change default CRS with inference
inferred_crs = infer_crs(default_crs)
if inferred_crs is None:
raise UnsupportedWKTProductCRS(
f"WKT Product CRSes are not currently well supported, and "
f"we can't infer this product's one. "
f"(Ideally use an auth-name format for CRS, such as 'EPSG:1234') "
f"Got: {default_crs!r}"
)
default_crs = inferred_crs
auth_name, auth_srid = default_crs.split(":")
default_crs_expression = (
select([SPATIAL_REF_SYS.c.srid])
.where(func.lower(SPATIAL_REF_SYS.c.auth_name) == auth_name.lower())
.where(SPATIAL_REF_SYS.c.auth_srid == int(auth_srid))
.as_scalar()
)
expression = func.coalesce(
case(
[
(
# If matches shorthand code: eg. "epsg:1234"
spatial_ref.op("~")(r"^[A-Za-z0-9]+:[0-9]+$"),
select([SPATIAL_REF_SYS.c.srid])
.where(
func.lower(SPATIAL_REF_SYS.c.auth_name)
== func.lower(func.split_part(spatial_ref, ":", 1))
)
.where(
SPATIAL_REF_SYS.c.auth_srid
== func.split_part(spatial_ref, ":", 2).cast(Integer)
)
.as_scalar(),
)
],
else_=None,
),
case(
[
(
# Plain WKT that ends in an authority code.
# Extract the authority name and code using regexp. Yuck!
# Eg: ".... AUTHORITY["EPSG","32756"]]"
spatial_ref.op("~")(r'AUTHORITY\["[a-zA-Z0-9]+", *"[0-9]+"\]\]$'),
select([SPATIAL_REF_SYS.c.srid])
.where(
func.lower(SPATIAL_REF_SYS.c.auth_name)
== func.lower(
func.substring(
spatial_ref,
r'AUTHORITY\["([a-zA-Z0-9]+)", *"[0-9]+"\]\]$',
)
)
)
.where(
SPATIAL_REF_SYS.c.auth_srid
== func.substring(
spatial_ref, r'AUTHORITY\["[a-zA-Z0-9]+", *"([0-9]+)"\]\]$'
).cast(Integer)
)
.as_scalar(),
)
],
else_=None,
),
# Some older datasets have datum/zone fields instead.
# The only remaining ones in DEA are 'GDA94'.
case(
[
(
doc[(projection_offset + ["datum"])].astext == "GDA94",
select([SPATIAL_REF_SYS.c.srid])
.where(func.lower(SPATIAL_REF_SYS.c.auth_name) == "epsg")
.where(
SPATIAL_REF_SYS.c.auth_srid
== (
"283"
+ func.abs(
doc[(projection_offset + ["zone"])].astext.cast(Integer)
)
).cast(Integer)
)
.as_scalar(),
)
],
else_=None,
),
default_crs_expression,
# TODO: Handle arbitrary WKT strings (?)
# 'GEOGCS[\\"GEOCENTRIC DATUM of AUSTRALIA\\",DATUM[\\"GDA94\\",SPHEROID[
# \\"GRS80\\",6378137,298.257222101]],PRIMEM[\\"Greenwich\\",0],UNIT[\\
# "degree\\",0.0174532925199433]]'
)
# print(as_sql(expression))
return expression
def _gis_point(doc, doc_offset):
return func.ST_MakePoint(
doc[doc_offset + ["x"]].astext.cast(postgres.DOUBLE_PRECISION),
doc[doc_offset + ["y"]].astext.cast(postgres.DOUBLE_PRECISION),
)
def refresh_spatial_extents(
index: Index,
product: DatasetType,
clean_up_deleted=False,
assume_after_date: datetime = None,
):
"""
Update the spatial extents to match any changes upstream in ODC.
:param assume_after_date: Only scan datasets that have changed after the given (db server) time.
If None, all datasets will be regenerated.
:param clean_up_deleted: Scan for any manually deleted rows too. Slow.
"""
engine: Engine = alchemy_engine(index)
log = _LOG.bind(product_name=product.name, after_date=assume_after_date)
# First, remove any archived datasets from our spatial table.
datasets_to_delete = (
select([DATASET.c.id])
.where(DATASET.c.archived.isnot(None))
.where(DATASET.c.dataset_type_ref == product.id)
)
if assume_after_date is not None:
# Note that we use "dataset_changed_expression" to scan the datasets,
# rather than "where archived > date", because the latter has no index!
# (.... and we're using dataset_changed_expression's index everywhere else,
# so it's probably still in memory and super fast!)
datasets_to_delete = datasets_to_delete.where(
dataset_changed_expression() > assume_after_date
)
log.info(
"spatial_archival",
)
changed = engine.execute(
DATASET_SPATIAL.delete().where(DATASET_SPATIAL.c.id.in_(datasets_to_delete))
).rowcount
log.info(
"spatial_archival.end",
change_count=changed,
)
# Forcing? Check every other dataset for removal, so we catch manually-deleted rows from the table.
if clean_up_deleted:
log.warning(
"spatial_deletion_full_scan",
)
changed += engine.execute(
DATASET_SPATIAL.delete().where(
DATASET_SPATIAL.c.dataset_type_ref == product.id,
)
# Where it doesn't exist in the ODC dataset table.
.where(
~DATASET_SPATIAL.c.id.in_(
select([DATASET.c.id]).where(
DATASET.c.dataset_type_ref == product.id,
)
)
)
).rowcount
log.info(
"spatial_deletion_scan.end",
change_count=changed,
)
# We'll update first, then insert new records.
# -> We do it in this order so that inserted records aren't immediately updated.
# (Note: why don't we do this in one upsert? Because we get our sqlalchemy expressions
# through ODC's APIs and can't choose alternative table aliases to make sub-queries.
# Maybe you can figure out a workaround, though?)
column_values = {c.name: c for c in _select_dataset_extent_columns(product)}
only_where = [
DATASET.c.dataset_type_ref
== bindparam("product_ref", product.id, type_=SmallInteger),
DATASET.c.archived.is_(None),
]
if assume_after_date is not None:
only_where.append(dataset_changed_expression() > assume_after_date)
else:
log.warning("spatial_update.recreating_everything")
# Update any changed datasets
log.info(
"spatial_update",
product_name=product.name,
after_date=assume_after_date,
)
changed += engine.execute(
DATASET_SPATIAL.update()
.values(**column_values)
.where(DATASET_SPATIAL.c.id == column_values["id"])
.where(and_(*only_where))
).rowcount
log.info("spatial_update.end", product_name=product.name, change_count=changed)
# ... and insert new ones.
log.info(
"spatial_insert",
product_name=product.name,
after_date=assume_after_date,
)
changed += engine.execute(
postgres.insert(DATASET_SPATIAL)
.from_select(
column_values.keys(),
select(column_values.values())
.where(and_(*only_where))
.order_by(column_values["center_time"]),
)
.on_conflict_do_nothing(index_elements=["id"])
).rowcount
log.info("spatial_insert.end", product_name=product.name, change_count=changed)
# If we changed data...
if changed:
# And it's a non-spatial product...
if get_dataset_extent_alchemy_expression(product.metadata_type) is None:
# And it has WRS path/rows...
if "sat_path" in product.metadata_type.dataset_fields:
# We can synthesize the polygons!
log.info(
"spatial_synthesizing",
)
shapes = _get_path_row_shapes()
rows = [
row
for row in index.datasets.search_returning(
("id", "sat_path", "sat_row"), product=product.name
)
if row.sat_path.lower is not None
]
if rows:
engine.execute(
DATASET_SPATIAL.update()
.where(DATASET_SPATIAL.c.id == bindparam("dataset_id"))
.values(footprint=bindparam("footprint")),
[
dict(
dataset_id=id_,
footprint=from_shape(
shapely.ops.unary_union(
[
shapes[(int(sat_path.lower), row)]
for row in range(
int(sat_row.lower),
int(sat_row.upper) + 1,
)
]
),
srid=4326,
extended=True,
),
)
for id_, sat_path, sat_row in rows
],
)
log.info(
"spatial_synthesizing.end",
)
return changed
def _select_dataset_extent_columns(dt: DatasetType) -> List[Label]:
"""
Get columns for all fields which go into the spatial table
for this DatasetType.
"""
md_type = dt.metadata_type
# If this product has lat/lon fields, we can take spatial bounds.
footprint_expression = get_dataset_extent_alchemy_expression(
md_type, default_crs=_default_crs(dt)
)
# Some time-series-derived products have seemingly-rectangular but *huge* footprints
# (because they union many almost-indistinguishable footprints)
# If they specify a resolution, we can simplify the geometry based on it.
if footprint_expression is not None and dt.grid_spec and dt.grid_spec.resolution:
resolution = min(abs(r) for r in dt.grid_spec.resolution)
footprint_expression = func.ST_SimplifyPreserveTopology(
footprint_expression, resolution / 4
)
return [
DATASET.c.id,
DATASET.c.dataset_type_ref,
center_time_expression(md_type),
(null() if footprint_expression is None else footprint_expression).label(
"footprint"
),
_region_code_field(dt).label("region_code"),
_size_bytes_field(dt).label("size_bytes"),
_dataset_creation_expression(md_type).label("creation_time"),
]
def center_time_expression(md_type: MetadataType):
"""
The center time for the given metadata doc.
(Matches the logic in ODC's Dataset.center_time)
"""
time = md_type.dataset_fields["time"].alchemy_expression
center_time = (func.lower(time) + (func.upper(time) - func.lower(time)) / 2).label(
"center_time"
)
return center_time
def dataset_changed_expression(dataset=DATASET):
"""Expression for the latest time a dataset was | |
= []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/termBases', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TermBaseDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_term_by_job(self, job_uid, project_uid, **kwargs): # noqa: E501
"""Create term in job's term bases # noqa: E501
Create new term in the write term base assigned to the job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_term_by_job(job_uid, project_uid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str job_uid: (required)
:param str project_uid: (required)
:param CreateTermsDto body:
:return: TermPairDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_term_by_job_with_http_info(job_uid, project_uid, **kwargs) # noqa: E501
else:
(data) = self.create_term_by_job_with_http_info(job_uid, project_uid, **kwargs) # noqa: E501
return data
def create_term_by_job_with_http_info(self, job_uid, project_uid, **kwargs): # noqa: E501
"""Create term in job's term bases # noqa: E501
Create new term in the write term base assigned to the job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_term_by_job_with_http_info(job_uid, project_uid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str job_uid: (required)
:param str project_uid: (required)
:param CreateTermsDto body:
:return: TermPairDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['job_uid', 'project_uid', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_term_by_job" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'job_uid' is set
if ('job_uid' not in params or
params['job_uid'] is None):
raise ValueError("Missing the required parameter `job_uid` when calling `create_term_by_job`") # noqa: E501
# verify the required parameter 'project_uid' is set
if ('project_uid' not in params or
params['project_uid'] is None):
raise ValueError("Missing the required parameter `project_uid` when calling `create_term_by_job`") # noqa: E501
collection_formats = {}
path_params = {}
if 'job_uid' in params:
path_params['jobUid'] = params['job_uid'] # noqa: E501
if 'project_uid' in params:
path_params['projectUid'] = params['project_uid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/projects/{projectUid}/jobs/{jobUid}/termBases/createByJob', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TermPairDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_concept(self, term_base_id, concept_id, **kwargs): # noqa: E501
"""Delete concept # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_concept(term_base_id, concept_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int term_base_id: (required)
:param str concept_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_concept_with_http_info(term_base_id, concept_id, **kwargs) # noqa: E501
else:
(data) = self.delete_concept_with_http_info(term_base_id, concept_id, **kwargs) # noqa: E501
return data
def delete_concept_with_http_info(self, term_base_id, concept_id, **kwargs): # noqa: E501
"""Delete concept # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_concept_with_http_info(term_base_id, concept_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int term_base_id: (required)
:param str concept_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['term_base_id', 'concept_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_concept" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'term_base_id' is set
if ('term_base_id' not in params or
params['term_base_id'] is None):
raise ValueError("Missing the required parameter `term_base_id` when calling `delete_concept`") # noqa: E501
# verify the required parameter 'concept_id' is set
if ('concept_id' not in params or
params['concept_id'] is None):
raise ValueError("Missing the required parameter `concept_id` when calling `delete_concept`") # noqa: E501
collection_formats = {}
path_params = {}
if 'term_base_id' in params:
path_params['termBaseId'] = params['term_base_id'] # noqa: E501
if 'concept_id' in params:
path_params['conceptId'] = params['concept_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/termBases/{termBaseId}/concepts/{conceptId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_concepts(self, term_base_id, **kwargs): # noqa: E501
"""Delete concepts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_concepts(term_base_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int term_base_id: (required)
:param ConceptListReference body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_concepts_with_http_info(term_base_id, **kwargs) # noqa: E501
else:
(data) = self.delete_concepts_with_http_info(term_base_id, **kwargs) # noqa: E501
return data
def delete_concepts_with_http_info(self, term_base_id, **kwargs): # noqa: E501
"""Delete concepts # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_concepts_with_http_info(term_base_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int term_base_id: (required)
:param ConceptListReference body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['term_base_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_concepts" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'term_base_id' is set
if ('term_base_id' not in params or
params['term_base_id'] is None):
raise ValueError("Missing the required parameter `term_base_id` when calling `delete_concepts`") # noqa: E501
collection_formats = {}
path_params = {}
if 'term_base_id' in params:
path_params['termBaseId'] = params['term_base_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/termBases/{termBaseId}/concepts', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_term(self, term_base_id, term_id, **kwargs): # noqa: E501
"""Delete term # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_term(term_base_id, term_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int term_base_id: (required)
:param str term_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_term_with_http_info(term_base_id, term_id, **kwargs) # noqa: E501
else:
(data) = self.delete_term_with_http_info(term_base_id, term_id, **kwargs) # noqa: E501
return data
def delete_term_with_http_info(self, term_base_id, term_id, **kwargs): # noqa: E501
"""Delete term # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_term_with_http_info(term_base_id, term_id, async_req=True)
>>> result | |
aVal, newVal, newAVal, True)
return False
def doNop(cpu, instruction):
return False
def doOra(cpu, instruction):
value = GetValue(cpu, instruction)
aVal = cpu.get_register('A')
newVal = aVal | value
cpu.set_register('A', newVal)
cpu.ctrl_update_flags(instruction.flags, aVal, value, newVal, False)
return False
def doPha(cpu, instruction):
aVal = cpu.get_register('A')
cpu.push_byte(aVal)
return False
def doPhp(cpu, instruction):
srVal = cpu.get_register('P')
cpu.push_byte(srVal | 0x10)
return False
def doPla(cpu, instruction):
aVal = cpu.pop_byte()
cpu.set_register('A', aVal)
cpu.ctrl_update_flags(instruction.flags, aVal, aVal, aVal, False)
return False
def doPlp(cpu, instruction):
cpu.set_register('P', cpu.pop_byte() & 0xEF | 0x20)
return False
def doRolAcc(cpu, instruction):
value = cpu.get_register('A')
oldC = cpu.get_flag('C')
newC = 1 if (value & 0x80) else 0
newVal = (value << 1) + oldC
cpu.set_register('A', newVal)
cpu.set_flag('C', newC)
cpu.ctrl_update_flags(instruction.flags, value, value, newVal, False)
return False
def doRol(cpu, instruction):
addr = GetAddress(cpu, instruction)
value = cpu.read_memory(addr)
oldC = cpu.get_flag('C')
newC = 1 if (value & 0x80) else 0
newVal = (value << 1) + oldC
cpu.set_memory(addr, newVal)
cpu.set_flag('C', newC)
cpu.ctrl_update_flags(instruction.flags, value, value, newVal, False)
return False
def doRorAcc(cpu, instruction):
value = cpu.get_register('A')
oldC = cpu.get_flag('C')
newC = 1 if (value & 0x1) else 0
newVal = (value >> 1) + (oldC * 0x80)
cpu.set_register('A', newVal)
cpu.set_flag('C', newC)
cpu.ctrl_update_flags(instruction.flags, value, value, newVal, False)
return False
def doRor(cpu, instruction):
addr = GetAddress(cpu, instruction)
value = cpu.read_memory(addr)
oldC = cpu.get_flag('C')
newC = 1 if (value & 0x1) else 0
newVal = (value >> 1) + (oldC * 0x80)
cpu.set_memory(addr, newVal)
cpu.set_flag('C', newC)
cpu.ctrl_update_flags(instruction.flags, value, value, newVal, False)
return False
def doRra(cpu, instruction):
addr = GetAddress(cpu, instruction)
aVal = cpu.get_register('A')
value = cpu.read_memory(addr)
oldC = cpu.get_flag('C')
newC = 1 if (value & 0x1) else 0
newVal = (value >> 1) + (oldC * 0x80)
newAVal = aVal + newVal + newC
cpu.set_register('A', newAVal)
cpu.set_memory(addr, newVal)
cpu.ctrl_update_flags(instruction.flags, aVal, newVal, newAVal, False)
return False
def doRti(cpu, instruction):
cpu.set_register('P', cpu.pop_byte())
cpu.set_pc(cpu.pop_word())
return True
def doRts(cpu, instruction):
cpu.set_pc(cpu.pop_word()+1)
return True
def doSbc(cpu, instruction):
aVal = cpu.get_register('A')
value = GetValue(cpu, instruction)
cVal = cpu.get_flag('C')
newVal = aVal - value - (1-cVal)
cpu.set_register('A', newVal)
cpu.ctrl_update_flags(instruction.flags, aVal, value, newVal, True)
return False
def doIns(cpu, instruction):
aVal = cpu.get_register('A')
value = GetValue(cpu, instruction) + 1
addr = GetAddress(cpu, instruction)
cVal = cpu.get_flag('C')
newVal = aVal - value - (1-cVal)
cpu.set_memory(addr, value)
cpu.set_register('A', newVal)
cpu.ctrl_update_flags(instruction.flags, aVal, value, newVal, True)
return False
def doSec(cpu, instruction):
cpu.set_flag('C', 1)
return False
def doSed(cpu, instruction):
cpu.set_flag('D', 1)
return False
def doSei(cpu, instruction):
cpu.set_flag('I', 1)
return False
def doSta(cpu, instruction):
aVal = cpu.get_register('A')
addr = GetAddress(cpu, instruction)
cpu.set_memory(addr, aVal)
return False
def doStx(cpu, instruction):
xVal = cpu.get_register('X')
addr = GetAddress(cpu, instruction)
cpu.set_memory(addr, xVal)
return False
def doSty(cpu, instruction):
yVal = cpu.get_register('Y')
addr = GetAddress(cpu, instruction)
cpu.set_memory(addr, yVal)
return False
def doTax(cpu, instruction):
aVal = cpu.get_register('A')
cpu.set_register('X', aVal)
cpu.ctrl_update_flags(instruction.flags, aVal, aVal, aVal, False)
return False
def doTay(cpu, instruction):
aVal = cpu.get_register('A')
cpu.set_register('Y', aVal)
cpu.ctrl_update_flags(instruction.flags, aVal, aVal, aVal, False)
return False
def doTsx(cpu, instruction):
sVal = cpu.get_register('S')
cpu.set_register('X', sVal)
cpu.ctrl_update_flags(instruction.flags, sVal, sVal, sVal, False)
return False
def doTxa(cpu, instruction):
xVal = cpu.get_register('X')
cpu.set_register('A', xVal)
cpu.ctrl_update_flags(instruction.flags, xVal, xVal, xVal, False)
return False
def doTxs(cpu, instruction):
xVal = cpu.get_register('X')
cpu.set_register('S', xVal)
cpu.ctrl_update_flags(instruction.flags, xVal, xVal, xVal, False)
return False
def doTya(cpu, instruction):
yVal = cpu.get_register('Y')
cpu.set_register('A', yVal)
cpu.ctrl_update_flags(instruction.flags, yVal, yVal, yVal, False)
return False
def doDcm(cpu, instruction):
addr = GetAddress(cpu, instruction)
value = (GetValue(cpu, instruction) - 1)
aVal = cpu.get_register('A')
newVal = aVal - value
if aVal >= value:
cpu.set_flag('C', 1)
else:
cpu.set_flag('C', 0)
cpu.set_memory(addr, value)
cpu.ctrl_update_flags(instruction.flags, aVal, value, newVal, True)
return False
# http://www.e-tradition.net/bytes/6502/6502_instruction_set.html - better clock info
# http://www.6502.org/tutorials/6502opcodes.html - better descriptions
# http://6502.org/tutorials/65c02opcodes.html#3 - additional instructions
flags = { 'ADC': ['N', 'Z', 'C', 'V'],
'AND': ['N', 'Z'],
'ASL': ['N', 'Z'], # set C manually
'AOS': ['N', 'Z'],
'AXS': [],
'BCC': [],
'BCS': [],
'BEQ': [],
'BIT': [],
'BMI': [],
'BNE': [],
'BPL': [],
'BRK': [],
'BVC': [],
'BVS': [],
'CLC': [],
'CLD': [],
'CLI': [],
'CLV': [],
'CMP': ['N', 'Z'],
'CPX': ['N', 'Z'],
'CPY': ['N', 'Z'],
'DCM': ['N', 'Z'],
'DEC': ['N', 'Z'],
'DEX': ['N', 'Z'],
'DEY': ['N', 'Z'],
'EOR': ['N', 'Z'],
'INC': ['N', 'Z'],
'INS': ['N', 'Z', 'V'],
'INX': ['N', 'Z'],
'INY': ['N', 'Z'],
'JMP': [],
'JSR': [],
'LDA': ['N', 'Z'],
'LDX': ['N', 'Z'],
'LDY': ['N', 'Z'],
'LAX': ['N', 'Z'],
'LSR': ['N', 'Z'], # set C manually
'LSE': ['N', 'Z'], # set C manually
'NOP': [],
'ORA': ['N', 'Z'],
'PHA': [],
'PHP': [],
'PLA': ['N', 'Z'],
'PLP': [],
'RLA': ['N', 'Z'], # manual C
'ROL': ['N', 'Z'], # manual C
'ROR': ['N', 'Z'], # manual C
'RRA': ['N', 'Z', 'C', 'V'], # manual C
'RTI': [],
'RTS': [],
'SAX': ['N', 'C', 'Z'],
'SBC': ['N', 'C', 'Z', 'V'],
'SEC': [],
'SED': [],
'SEI': [],
'STA': [],
'STX': [],
'STY': [],
'TAX': ['N', 'Z'],
'TAY': ['N', 'Z'],
'TSX': ['N', 'Z'],
'TXA': ['N', 'Z'],
'TXS': [],
'TYA': ['N', 'Z'],
}
# opcode : Instruction(mnem, function, operType, size, cycles)
instructions = {0x69: Instruction('ADC', doAdc, 'IMM', 2, 2),
0x65: Instruction('ADC', doAdc, 'ZERO', 2, 3),
0x75: Instruction('ADC', doAdc, 'ZEROX', 2, 4),
0x6D: Instruction('ADC', doAdc, 'ABS', 3, 4),
0x7D: Instruction('ADC', doAdc, 'ABSX', 3, 4),
0x79: Instruction('ADC', doAdc, 'ABSY', 3, 4),
0x61: Instruction('ADC', doAdc, 'INDX', 2, 6),
0x71: Instruction('ADC', doAdc, 'INDY', 2, 5),
0x29: Instruction('AND', doAnd, 'IMM', 2, 2),
0x25: Instruction('AND', doAnd, 'ZERO', 2, 3),
0x35: Instruction('AND', doAnd, 'ZEROX', 2, 4),
0x2d: Instruction('AND', doAnd, 'ABS', 3, 4),
0x3d: Instruction('AND', doAnd, 'ABSX', 3, 4),
0x39: Instruction('AND', doAnd, 'ABSY', 3, 4),
0x21: Instruction('AND', doAnd, 'INDX', 2, 6),
0x31: Instruction('AND', doAnd, 'INDY', 2, 5),
0x0A: Instruction('ASL', doAslAccu, '', 1, 2),
0x06: Instruction('ASL', doAsl, 'ZERO', 2, 5),
0x16: Instruction('ASL', doAsl, 'ZEROX', 2, 6),
0x0E: Instruction('ASL', doAsl, 'ABS', 3, 6),
0x1E: Instruction('ASL', doAsl, 'ABSX', 3, 7),
0x90: Instruction('BCC', doBcc, 'PCREL', 2, 2),
0xB0: Instruction('BCS', doBcs, 'PCREL', 2, 2),
0xF0: Instruction('BEQ', doBeq, 'PCREL', 2, 2),
0x24: Instruction('BIT', doBit, 'ZERO', 2, 3),
0x2C: Instruction('BIT', doBit, 'ABS', 3, 4),
0x30: Instruction('BMI', doBmi, 'PCREL', 2, 2),
0xD0: Instruction('BNE', doBne, 'PCREL', 2, 2),
0x10: Instruction('BPL', doBpl, 'PCREL', 2, 2),
0x00: Instruction('BRK', doBrk, '', 1, 7),
0x50: Instruction('BVC', doBvc, 'PCREL', 2, 2),
0x70: Instruction('BVS', doBvs, 'PCREL', 2, 2),
0x18: Instruction('CLC', doClc, '', 1, 2),
0xD8: Instruction('CLD', doCld, '', 1, 2),
0x58: Instruction('CLI', doCli, '', 1, 2),
0xB8: Instruction('CLV', doClv, '', 1, 2),
0xC9: Instruction('CMP', doCmp, 'IMM', 2, 2),
0xC5: Instruction('CMP', doCmp, 'ZERO', 2, 3),
0xD5: Instruction('CMP', doCmp, 'ZEROX', 2, 4),
0xCD: Instruction('CMP', doCmp, 'ABS', 3, 4),
0xDD: Instruction('CMP', doCmp, 'ABSX', 3, 4),
0xD9: Instruction('CMP', doCmp, 'ABSY', 3, 4),
0xC1: Instruction('CMP', doCmp, 'INDX', 2, 6),
0xD1: Instruction('CMP', doCmp, 'INDY', 2, 5),
0xE0: Instruction('CPX', doCpx, 'IMM', 2, 2),
0xE4: Instruction('CPX', doCpx, 'ZERO', 2, 3),
0xEC: Instruction('CPX', doCpx, 'ABS', 3, 4),
0xC0: Instruction('CPY', doCpy, 'IMM', 2, 2),
0xC4: Instruction('CPY', doCpy, 'ZERO', 2, 3),
0xCC: Instruction('CPY', doCpy, 'ABS', 3, 4),
0xC6: Instruction('DEC', doDec, 'ZERO', 2, 5),
0xD6: Instruction('DEC', doDec, 'ZEROX', 2, 6),
0xCE: Instruction('DEC', doDec, 'ABS', 3, 3),
0xDE: Instruction('DEC', doDec, 'ABSX', 3, 7),
0xCA: Instruction('DEX', doDex, '', 1, 2),
0x88: Instruction('DEY', doDey, '', 1, 2),
0x49: Instruction('EOR', doEor, 'IMM', 2, 2),
0x45: Instruction('EOR', doEor, 'ZERO', 2, 3),
0x55: Instruction('EOR', doEor, 'ZEROX', 2, 4),
0x4D: Instruction('EOR', doEor, 'ABS', 3, 4),
0x5D: Instruction('EOR', doEor, 'ABSX', 3, 4),
0x59: Instruction('EOR', doEor, 'ABSY', 3, 4),
0x41: Instruction('EOR', doEor, 'INDX', 2, 6),
0x51: Instruction('EOR', doEor, 'INDY', 2, 5),
0xE6: Instruction('INC', doInc, 'ZERO', 2, 5),
0xF6: Instruction('INC', doInc, 'ZEROX', 2, 6),
0xEE: Instruction('INC', doInc, 'ABS', 3, 6),
0xFE: Instruction('INC', doInc, 'ABSX', 3, 7),
0xE8: Instruction('INX', doInx, '', 1, 2),
0xC8: Instruction('INY', doIny, '', 1, 2),
0x4C: Instruction('JMP', doJmp, 'ABS', 3, 3),
0x6C: Instruction('JMP', doJmp, 'IND', 3, 5),
0x20: Instruction('JSR', doJsr, 'ABS', 3, 6),
0xA9: Instruction('LDA', doLda, 'IMM', 2, 2),
0xA5: Instruction('LDA', doLda, 'ZERO', 2, 3),
0xB5: Instruction('LDA', doLda, 'ZEROX', 2, 4),
0xAD: Instruction('LDA', doLda, 'ABS', 3, 4),
0xBD: Instruction('LDA', doLda, 'ABSX', 3, 4),
0xB9: Instruction('LDA', doLda, 'ABSY', 3, 4),
0xA1: Instruction('LDA', doLda, 'INDX', 2, 6),
0xB1: Instruction('LDA', doLda, 'INDY', 2, 5),
0xA2: Instruction('LDX', doLdx, 'IMM', 2, 2),
0xA6: Instruction('LDX', doLdx, 'ZERO', 2, 3),
0xB6: Instruction('LDX', doLdx, 'ZEROY', 2, 4),
0xAE: Instruction('LDX', doLdx, 'ABS', 3, 4),
0xBE: Instruction('LDX', doLdx, 'ABSY', 3, 4),
0xA0: Instruction('LDY', doLdy, 'IMM', 2, 2),
| |
Address = 0x5680, Length = 128
0x5680,0x80,
0x40,0x5,0xbe,0x6b,0xd5,0x62,0x21,0x4d,0x6a,0x4,0x2d,0x13,0x59,0xf3,0x32,0x6f,
0x2d,0x6a,0xc7,0x56,0xed,0x99,0x64,0xdb,0x4a,0xdb,0x64,0x21,0x2f,0x9b,0x30,0xd0,
0x5b,0xb8,0x2,0x5,0x61,0xb0,0x1e,0x1f,0xe0,0x68,0x3d,0xca,0xa4,0x2e,0x81,0x78,
0x67,0x23,0x51,0xbc,0x3c,0x88,0x51,0xd2,0xa0,0xa3,0x8e,0xfd,0x2c,0x94,0x24,0xad,
0x53,0xb4,0x62,0x44,0x46,0x90,0x61,0xf2,0xaf,0xa1,0x45,0xc9,0x21,0x3b,0xf2,0xab,
0x75,0xe4,0x64,0xb8,0x92,0xa2,0x97,0xdd,0x63,0x93,0x15,0x51,0x0,0xd0,0xf,0x33,
0x4c,0xd8,0x28,0x97,0x3b,0x6a,0x7f,0xa0,0x6f,0x56,0xea,0x8b,0x5f,0xe9,0x1,0x9c,
0x36,0x13,0xc4,0x50,0xbc,0xd6,0x41,0xd2,0x96,0x97,0xe0,0x3c,0x56,0xe2,0x94,0x5e],[
# Record 142: Address = 0x5700, Length = 128
0x5700,0x80,
0xee,0xd,0xb7,0xd5,0xc1,0x2,0x46,0x95,0x6b,0xe6,0xd1,0x8e,0x43,0xb3,0x5a,0xb,
0x33,0x28,0x12,0x81,0x63,0x5a,0x97,0x5d,0x85,0x47,0x76,0x79,0x8d,0x2e,0xd7,0xde,
0xa,0x83,0xd0,0xfd,0x5c,0x1f,0xb4,0x89,0xbe,0xa3,0x5d,0xc3,0xac,0xa7,0xc2,0x66,
0x53,0xae,0x15,0x83,0x5b,0xd4,0xe3,0xa5,0xa8,0x37,0xd5,0x38,0xad,0x49,0x24,0x7c,
0x4a,0x6c,0x28,0xdf,0x42,0xe0,0x99,0x7c,0x9a,0x98,0x66,0x60,0xf4,0x34,0xc4,0x91,
0xfa,0x84,0x89,0xbc,0xd,0x9,0xae,0x54,0x7d,0xa9,0x86,0xb4,0xae,0x1b,0xcc,0x62,
0xac,0xdd,0xe9,0x4d,0x42,0x3e,0x55,0x25,0x23,0x5f,0xf0,0x21,0xa5,0xdd,0x80,0xb6,
0x62,0x47,0x66,0xae,0x82,0x84,0x60,0x42,0x43,0x8c,0x80,0xb,0xe4,0xc,0xac,0xb0],[
# Record 143: Address = 0x5780, Length = 128
0x5780,0x80,
0xb5,0x82,0x84,0xf7,0x5c,0xbb,0xbe,0xeb,0x7,0xd1,0x8f,0xb1,0x7b,0x6,0x5a,0xa1,
0x62,0x4c,0x40,0x94,0xfe,0xec,0xe0,0xee,0x5c,0xfe,0xcb,0x46,0xab,0x28,0xf6,0x96,
0xc,0x8a,0x1a,0x84,0x31,0x56,0xa1,0x28,0x64,0xa1,0x82,0xe2,0xd8,0x73,0xe2,0xc0,
0x7e,0x27,0x67,0x56,0xc1,0xa9,0x7c,0x2,0x48,0xd3,0xb7,0x5c,0xcc,0x6a,0x78,0x9d,
0xa9,0xd8,0xb6,0x8e,0xe1,0x9a,0xf6,0xde,0xf5,0x3e,0xad,0x29,0x8a,0x34,0x8,0xbf,
0x82,0x8,0x8e,0xfb,0x5c,0x4b,0x55,0x12,0x8f,0xbc,0xd5,0x8d,0x21,0xf1,0x9d,0x22,
0x2c,0x95,0xec,0x98,0x43,0x5,0x67,0xbf,0x68,0xc2,0x51,0xb7,0x74,0xed,0xff,0xa8,
0xf4,0xfa,0x87,0x1a,0x66,0x11,0xd4,0x5c,0xd9,0x63,0x7e,0xa5,0x99,0x13,0xcd,0x29],[
# Record 144: Address = 0x5800, Length = 128
0x5800,0x80,
0xad,0xdb,0x70,0x38,0xeb,0x86,0x3f,0x8f,0xd,0x6a,0xdc,0x59,0x31,0x99,0xec,0x46,
0x39,0xe5,0x89,0xf0,0x41,0x72,0xa0,0x88,0xd,0x3a,0xe1,0x3e,0xc7,0x33,0x8d,0x6f,
0x8d,0x32,0xa8,0x94,0x4,0x75,0x37,0x3c,0xc4,0xf7,0x89,0x9b,0xdb,0xb4,0x88,0xd8,
0xbf,0x40,0x23,0xcd,0xbe,0x4c,0x73,0x23,0xdf,0xff,0xd9,0x52,0xa3,0x9a,0x56,0x6e,
0x85,0xd7,0x80,0x53,0x41,0x9f,0x5e,0xc9,0x71,0xac,0xc9,0x7e,0x7e,0xb2,0x93,0x64,
0xfb,0xff,0x49,0x9f,0x4c,0x97,0xad,0x63,0x6c,0xc8,0x5,0x71,0x82,0x19,0xbf,0xd9,
0x61,0x4c,0x94,0x2c,0x1a,0xd6,0x89,0x5c,0x18,0x75,0x6d,0xe3,0x93,0xc6,0x4f,0xe1,
0x8e,0xc,0xbf,0xe4,0xb1,0x5c,0x25,0xfe,0x70,0xea,0x51,0xef,0xdf,0x9a,0x4a,0x3c],[
# Record 145: Address = 0x5880, Length = 128
0x5880,0x80,
0x23,0xed,0xc9,0xa6,0xc8,0x1d,0x18,0xa7,0xd5,0xcc,0xc9,0x75,0x81,0xb,0x82,0x5e,
0xee,0x23,0x1b,0x1e,0x4b,0xcf,0xf8,0xc9,0xf8,0xb9,0x54,0x38,0x66,0x30,0xa,0x76,
0x33,0x75,0xc0,0x48,0x45,0x18,0x54,0x13,0x0,0x41,0xaa,0xc7,0xb,0x2d,0x6b,0x8e,
0xd9,0xf1,0xca,0x73,0xb1,0x6d,0x95,0x5c,0x65,0x42,0x9d,0x42,0xb1,0x9b,0xa0,0x19,
0x5f,0xb9,0x31,0x6,0xf7,0x44,0x4b,0x88,0x53,0x7f,0xaf,0xd8,0xff,0xa5,0x8d,0x7c,
0x28,0x28,0x79,0xdd,0x80,0xd,0xc0,0x11,0xd1,0x82,0xc0,0xf8,0x3a,0x9c,0xd9,0xd0,
0x6c,0x39,0x78,0xbe,0x6a,0xca,0x1d,0xaf,0xd0,0xe4,0x97,0xb,0x2e,0xc2,0x1a,0xe7,
0x9f,0xe1,0xc2,0x89,0x38,0x40,0x4c,0xdd,0xe,0xfb,0x43,0x3,0x28,0xc0,0x45,0xb1],[
# Record 146: Address = 0x5900, Length = 128
0x5900,0x80,
0xa5,0xe,0xfe,0x7b,0xbf,0xba,0x6f,0x32,0xa5,0xec,0xc7,0xcd,0x47,0xe5,0xf8,0xb0,
0xd8,0xc9,0xe,0xd2,0xdc,0x57,0xa,0x48,0xec,0xe5,0x2a,0x7,0x2,0x38,0x7c,0x40,
0x3d,0xd1,0xc7,0x9a,0xe1,0x87,0x8f,0xa,0xba,0xe3,0x36,0x8,0x43,0x80,0x37,0x75,
0xae,0xb4,0x87,0x5,0xe0,0x95,0x5c,0x27,0x8,0x5,0x54,0xfd,0x11,0x2,0xed,0x6e,
0x3d,0xb3,0x3b,0x58,0x6c,0xd2,0xfb,0xf0,0x8,0xca,0x37,0x5b,0xaa,0x71,0xa2,0x5c,
0x4b,0x13,0x68,0x81,0x8f,0x41,0xd6,0x36,0xba,0x72,0x29,0xa5,0xf6,0xab,0xc3,0x6c,
0x19,0x43,0xd7,0x58,0x9a,0x75,0x8,0x5b,0xb8,0xab,0x3c,0xa5,0xfa,0x43,0xef,0xd,
0xbe,0x17,0xc8,0xb4,0x84,0x2a,0xc5,0x80,0x70,0x40,0x95,0x25,0xb5,0x9b,0xf2,0xdd],[
# Record 147: Address = 0x5980, Length = 128
0x5980,0x80,
0xad,0xf9,0xa,0x8a,0xf,0xb8,0xd7,0x4e,0xa7,0x8,0xe1,0xd9,0x56,0x92,0x81,0xc3,
0xcd,0x8d,0x5c,0x72,0x48,0x32,0xb9,0x57,0xcc,0x24,0x38,0xc0,0xce,0x6c,0x2f,0xd9,
0xe1,0x57,0xdb,0x89,0xba,0xb8,0x10,0x50,0x86,0xdf,0xe1,0x1e,0x34,0xa2,0x9d,0xd9,
0x21,0xb,0x42,0x2d,0x29,0x76,0xce,0xe9,0x7e,0x9a,0x35,0xed,0x10,0x97,0xca,0x64,
0x39,0x70,0xb,0x5,0x4b,0x89,0x46,0x19,0xca,0xd5,0x81,0xed,0x8b,0x19,0xd0,0x42,
0xab,0x23,0x4e,0x32,0xa7,0xc2,0xf8,0xf0,0x89,0x17,0x1e,0xf5,0xe8,0xbd,0x20,0xce,
0xc9,0x4b,0xca,0x52,0x8e,0xc0,0x7e,0x20,0xf8,0x91,0xc7,0xe5,0xd0,0x70,0x7c,0x5c,
0xed,0x1f,0xb,0xfa,0x2a,0x58,0xd8,0x80,0x33,0xda,0xe7,0x11,0xe7,0x37,0xd1,0xc],[
# Record 148: Address = 0x5a00, Length = 128
0x5a00,0x80,
0xe5,0x7f,0xbd,0x1f,0xac,0x25,0xc3,0x2c,0x52,0xc5,0xf5,0x5e,0xa2,0xcd,0xc9,0xa4,
0x84,0x7a,0x19,0x33,0xf0,0x34,0xfb,0x1a,0x92,0x18,0x28,0x5e,0x26,0x56,0x5a,0xee,
0x19,0x1b,0x2f,0x72,0xd,0x12,0x6e,0xce,0x1,0x73,0x2,0xda,0xc4,0x49,0x84,0x25,
0xdb,0x3,0x78,0x4b,0x26,0x5d,0x92,0xa4,0x8f,0x86,0xc4,0x13,0xdc,0xf0,0x5f,0xd4,
0xfa,0xb3,0x27,0x1a,0xd6,0xde,0x2a,0x6a,0xec,0x1a,0xfe,0x1b,0x84,0xfc,0x81,0x6d,
0x97,0x73,0x3c,0x72,0x34,0x6b,0x88,0x2f,0x5,0x9c,0x71,0xa4,0x1e,0xf7,0xb6,0xca,
0xd8,0x2b,0xbd,0x58,0x9a,0xcb,0xb3,0x1c,0x90,0xd0,0xf2,0xde,0x97,0x82,0x5b,0xe1,
0x1d,0x4b,0x28,0x8b,0x8b,0x33,0xc7,0xeb,0x66,0x6a,0xa3,0x43,0xa4,0x4f,0xc6,0xc8],[
# Record 149: Address = 0x5a80, Length = 128
0x5a80,0x80,
0xe9,0x60,0x6f,0x36,0xb3,0xdc,0xeb,0x47,0x2a,0x9a,0xa7,0xfa,0x2,0xb,0x1a,0x79,
0xab,0x7,0x18,0x82,0xde,0xe6,0xf0,0xc2,0x26,0x70,0x1a,0x93,0x51,0xf0,0xc7,0xd4,
0x9e,0xd5,0xac,0x8b,0x30,0xde,0xbe,0xb9,0xfb,0xdd,0x92,0x4d,0x8f,0x40,0x4a,0xf1,
0x9c,0x5e,0x27,0xc5,0x98,0xfd,0xe8,0x1e,0x8e,0x17,0x67,0xe7,0x4b,0xfc,0x14,0x54,
0xd1,0xf9,0xf9,0xaa,0xfe,0xb1,0xc,0xe7,0x56,0x8e,0xb2,0x8c,0x16,0x2,0xcf,0xb2,
0xbf,0x8c,0xe2,0x97,0x2a,0xdd,0x22,0xe3,0x1,0x8e,0x5b,0x2,0x79,0xf8,0x35,0xec,
0x63,0x18,0xce,0x68,0x9a,0xb,0x3a,0xc6,0xbc,0x53,0x6c,0xfe,0x5,0x1d,0x81,0x2d,
0x80,0xf6,0x44,0x21,0x0,0x6,0x56,0x3a,0x3e,0xae,0x2e,0x94,0xc7,0x53,0xf1,0xab],[
# Record 150: Address = 0x5b00, Length = 128
0x5b00,0x80,
0xd3,0x11,0xab,0x3e,0x9d,0x70,0xf1,0x2c,0x76,0xb6,0x3c,0x60,0x8b,0xbc,0xfa,0x9c,
0xe9,0x8b,0x26,0xf3,0xfc,0x51,0x78,0xc3,0x4,0x84,0x35,0x20,0xb9,0x6d,0x58,0xe2,
0x6a,0xd5,0x0,0xe7,0x1f,0x1c,0xd0,0x28,0x49,0x68,0x52,0x46,0xcc,0xe3,0x17,0xe,
0x3f,0xef,0x72,0xc4,0x70,0x54,0x69,0xdb,0xfe,0xfb,0xf4,0x54,0xf,0xa9,0xcc,0xf2,
0xb5,0x6e,0x36,0xb,0xe2,0x62,0x4d,0x5f,0x52,0xb2,0x36,0x41,0x37,0x99,0x63,0xbd,
0xcb,0x1d,0x41,0xe3,0xc8,0x8b,0x1a,0xf5,0x72,0x57,0x3b,0xca,0xb6,0xab,0xfc,0xee,
0x6d,0x55,0xd1,0x57,0xb6,0xbe,0xf8,0xb7,0xf8,0xe1,0x27,0x90,0x37,0x57,0x20,0x45,
0x69,0x82,0xe8,0x33,0xe6,0xea,0xd9,0x7b,0x20,0x17,0xd3,0x7c,0x8e,0xe5,0x19,0x15],[
# Record 151: Address = 0x5b80, Length = 128
0x5b80,0x80,
0x90,0x61,0x79,0xee,0x23,0x14,0x67,0x53,0x8,0x4b,0x5f,0x10,0x20,0xf7,0xb8,0xee,
0x41,0xe5,0x20,0x7e,0xaa,0xc6,0xed,0xfe,0xfe,0xba,0xf2,0x6c,0x23,0xeb,0x25,0x22,
0xab,0xad,0x79,0x9e,0x20,0xe0,0x36,0xfa,0xcb,0xa,0x15,0xd6,0x19,0x8a,0xc7,0x5a,
0x94,0x3d,0xbd,0x18,0x71,0xf7,0x68,0xb0,0x8f,0x4a,0xfa,0x40,0xdc,0x8f,0x55,0x69,
0xc8,0xc4,0x9f,0xba,0x24,0xc2,0xaf,0xcb,0x3,0x32,0x4d,0xb3,0x66,0x21,0xb2,0x54,
0x3,0x4b,0x17,0x18,0x61,0x88,0x5b,0x8f,0x8,0x2d,0x52,0xd7,0x20,0x60,0xa6,0x2b,
0xe0,0x80,0x18,0x64,0x82,0x99,0xef,0x18,0xf2,0xb0,0x37,0xd7,0x41,0xad,0x88,0x0,
0x43,0x87,0xac,0x3f,0xab,0x75,0x91,0x66,0xd9,0x1,0xf4,0x68,0xe5,0x1,0x62,0xf8],[
# Record 152: Address = 0x5c00, Length = 128
0x5c00,0x80,
0xe6,0xfe,0x23,0x48,0xde,0x14,0x31,0x85,0xf1,0x17,0x5a,0x96,0x9f,0x21,0x5e,0x44,
0x57,0xd1,0xc8,0x47,0x49,0x8d,0x18,0x69,0x79,0x2b,0xb7,0x79,0xeb,0x8,0x9f,0xe7,
0x79,0x5b,0x11,0x36,0x61,0xda,0x1,0xc4,0x20,0x20,0xed,0x96,0x6d,0xd6,0x41,0xb9,
0x27,0x5a,0x6b,0xd6,0x98,0x2c,0xea,0xd6,0xbe,0x47,0x1d,0x53,0x4e,0x2,0xfd,0xd5,
0x5c,0x7b,0xb2,0xe2,0x5b,0x66,0xad,0xbb,0x25,0xdf,0x5,0x5e,0x9e,0x49,0xc4,0x85,
0x83,0x9c,0xd6,0x75,0xc3,0x1c,0x4d,0x78,0x59,0x5d,0x1c,0xbc,0xed,0xc7,0xc7,0xb8,
0x6b,0xf5,0xff,0x29,0xe3,0x37,0x5e,0xd2,0x8a,0xdd,0xc9,0x77,0xc,0x4b,0xe3,0xc8,
0xbd,0x1b,0xfa,0xe,0xd8,0xb2,0xa9,0x57,0xa3,0x38,0x98,0xed,0x6f,0xab,0xcf,0x4b],[
# Record 153: Address = 0x5c80, Length = 128
0x5c80,0x80,
0x78,0x34,0x32,0xcd,0xd3,0x76,0xd6,0xe8,0x38,0x5,0xa6,0x4c,0x2,0x79,0x83,0x12,
0x8a,0x9a,0xc1,0x39,0xe0,0xf9,0xe6,0x12,0xe0,0xae,0x15,0xe8,0x28,0x8b,0x9b,0x8a,
0x3f,0x5,0x5,0xd9,0xb1,0xa9,0xa,0x91,0x5d,0xec,0xc2,0x3d,0x4a,0x50,0xc4,0x8e,
0xd1,0xe9,0xef,0xb4,0xb4,0x82,0x4a,0x72,0xa5,0x42,0xd,0x88,0x92,0xa2,0x0,0xe,
0x6d,0x73,0xeb,0x6d,0x6,0x66,0x79,0xd0,0x76,0xb8,0xff,0x2f,0xfe,0xb3,0x6c,0xe9,
0x4a,0xd6,0xe3,0x13,0x23,0x5a,0x41,0x1e,0x91,0x25,0xde,0xeb,0x3,0x43,0xba,0x8,
0x9d,0x9,0xb0,0xd7,0xef,0x25,0xd7,0xc6,0x4d,0x1b,0x95,0x63,0xb2,0x47,0x5a,0x29,
0x80,0x4a,0xa8,0xe5,0xe8,0x75,0xd0,0x81,0x47,0x8,0x5b,0xbd,0x30,0xb7,0xd6,0x87],[
# Record 154: Address = 0x5d00, Length = 128
0x5d00,0x80,
0x63,0x12,0x7b,0xfe,0xd3,0x9c,0x58,0xc7,0x76,0xa9,0x41,0x5f,0xc8,0x2a,0x15,0xe3,
0xea,0xab,0x35,0x4,0x3a,0xf8,0xce,0xcb,0x64,0xd3,0x56,0x5a,0x8b,0x52,0x45,0xab,
0x49,0x50,0x79,0x57,0x30,0x56,0x28,0xdf,0x3c,0xdd,0x6f,0x59,0x2d,0x92,0x96,0x28,
0xa4,0x2f,0x74,0x9c,0xaa,0xf3,0xfd,0xdc,0x66,0xd7,0x2e,0x13,0x86,0x6e,0xdf,0xd9,
0xa5,0x7c,0x7c,0x8,0x57,0x13,0xad,0x57,0xd8,0x87,0xd,0x23,0x9d,0x53,0x1a,0x31,
0x9f,0x91,0x6d,0xd6,0x1d,0x89,0x2a,0x9a,0xd8,0x8e,0x77,0xa7,0x20,0x39,0x29,0xe6,
0x40,0xd9,0x31,0xce,0x23,0x28,0xd3,0xd8,0x9d,0x8,0x44,0x1a,0x5f,0xc6,0xf,0x4,
0x94,0xad,0x90,0x6d,0x46,0x1a,0x9b,0x47,0x6b,0x4f,0x21,0x79,0x27,0x16,0xb3,0x5f],[
# Record 155: Address = 0x5d80, Length = 128
0x5d80,0x80,
0x7c,0x5d,0xe8,0x9e,0xa3,0x90,0xf7,0x54,0x55,0xdb,0x6,0x86,0x73,0x22,0x71,0x69,
0x85,0x88,0x2a,0xa8,0x53,0x5a,0xf8,0x43,0x88,0xda,0xbc,0xdd,0x12,0x57,0xb,0xf8,
0x75,0x43,0x95,0x2c,0xcb,0x6a,0x55,0xf6,0xb4,0x92,0x7e,0x49,0xd0,0x6a,0xb2,0x3c,
0x21,0x94,0xf0,0xd,0x96,0x8f,0x20,0x89,0x9,0xe6,0xb0,0x1c,0x6b,0x9b,0xe9,0xe8,
0xf0,0x7b,0x81,0x39,0xe8,0xa4,0xce,0xd5,0x2d,0x49,0x68,0x12,0x53,0x39,0xc9,0x76,
0x19,0xeb,0x72,0x11,0x5d,0x9a,0x76,0xd,0xc4,0x9e,0x80,0x2d,0x39,0xc2,0x3a,0x52,
0xed,0x3c,0x4f,0xe0,0xc5,0x33,0x1b,0xcb,0xe1,0x76,0x22,0xf9,0x44,0x50,0xf2,0xaa,
0xc7,0xdd,0x8d,0xba,0x3c,0x8,0xe2,0xe4,0xa6,0x6d,0x2e,0xac,0x81,0xd6,0x85,0xc8],[
# Record 156: Address = 0x5e00, Length = 128
0x5e00,0x80,
0x62,0x2e,0x49,0xc,0x69,0xd,0x55,0x6e,0xab,0xb0,0x7e,0xe5,0x7a,0x40,0x21,0x18,
0x4,0x3c,0xf4,0x97,0x97,0x65,0xee,0x21,0x1e,0x40,0xaa,0x16,0xd,0x85,0x84,0xa7,
0x7f,0x50,0x36,0xe4,0xc7,0x3c,0xde,0x88,0xac,0xdd,0x83,0x50,0x8c,0xbd,0x1e,0x64,
0x9b,0x19,0xa,0x1d,0x98,0xe6,0x9a,0xfc,0x1b,0x30,0x74,0xbd,0x7b,0x6d,0xf1,0x11,
0x87,0xed,0x5a,0xab,0x9e,0x4e,0x82,0x75,0xc,0x4c,0xee,0xc9,0x8,0xdf,0xdd,0x7f,
0x4,0xc7,0xe3,0xed,0x2c,0x3,0x6a,0xb4,0xd6,0xe4,0x18,0x8f,0x79,0x40,0xd7,0x8a,
0x8c,0x3,0xda,0x2d,0x9,0x5d,0xe0,0xfb,0xe2,0x71,0xb0,0x20,0x1c,0x2d,0x6c,0xdc,
0xe4,0x9,0xc1,0xee,0xb5,0x53,0x7f,0xeb,0x2d,0x81,0x1,0xa1,0x5d,0x79,0xa2,0x66],[
# Record 157: Address = 0x5e80, Length = 128
0x5e80,0x80,
0x52,0xf5,0x59,0x41,0x84,0x50,0x18,0x22,0x29,0x7c,0x5e,0xf6,0x68,0x84,0xfb,0x20,
0x2c,0x96,0x9,0x9e,0xdf,0x5c,0xee,0xd4,0x7c,0x2e,0xa1,0xf3,0xb4,0x56,0x4a,0xd9,
0x7b,0x60,0xd3,0x8c,0x9b,0xeb,0x68,0x9d,0x54,0xe4,0x82,0xcf,0xba,0x96,0xaf,0x92,
0xf2,0x3e,0x32,0xd5,0xc7,0x69,0x1,0xf4,0x80,0x69,0x31,0xd3,0xed,0xfc,0x5e,0x3d,
0xe,0xed,0x43,0xf6,0xa,0xb2,0xd2,0x2b,0xc7,0x36,0xc,0xca,0x2a,0xec,0x6,0x0,
0x0,0x4,0xd8,0x97,0x1f,0x28,0xf9,0xdd,0x37,0x42,0xdb,0xb4,0x61,0x13,0xa2,0xb4,
0xa0,0xdd,0xc8,0xe2,0x6b,0xab,0x11,0x4d,0x5e,0xd9,0xb6,0x1a,0x1a,0x23,0x71,0x86,
0x2f,0x4e,0x23,0x7a,0x5a,0x46,0x8c,0x9d,0xe1,0x8f,0x22,0x91,0x5,0xea,0x84,0x8d],[
# Record 158: Address = 0x5f00, Length = 128
0x5f00,0x80,
0x6f,0xe5,0x1f,0x9c,0x48,0x6a,0x9f,0x4f,0x7,0x92,0xdd,0x3,0x1,0xc8,0x3,0x3b,
0x39,0x96,0x56,0x96,0xe,0x86,0xa,0xd3,0xd2,0x7c,0x50,0x41,0xd5,0x6f,0xff,0x80,
0xd5,0xe8,0xbb,0x67,0x84,0xf4,0xb5,0x5d,0xe3,0xf0,0xec,0xef,0x65,0xf2,0xdd,0x58,
0x1d,0x14,0x74,0x38,0xcf,0x64,0x82,0x21,0x4,0x8a,0xd1,0x21,0x4d,0xde,0xb0,0x88,
0x82,0xf,0x10,0x36,0x22,0x4e,0xcf,0xef,0xdd,0x8d,0x74,0xc0,0x20,0x10,0x54,0x8f,
0x92,0xf9,0xcb,0xcc,0xb6,0x2a,0x85,0x13,0xb6,0x3c,0xc2,0x72,0xda,0x4c,0xfd,0xc3,
0xe6,0x12,0x21,0x92,0x91,0xdf,0x3,0xf7,0xef,0x88,0x5f,0xb5,0x1b,0xbf,0xbe,0x28,
0xef,0x11,0xa1,0xb6,0xef,0x9b,0xb9,0x1b,0x18,0x87,0x4a,0xb4,0x64,0x90,0x9d,0xdd],[
# Record 159: Address = 0x5f80, Length = 128
0x5f80,0x80,
0x8a,0xe9,0x36,0x6e,0x7c,0x48,0x27,0x4d,0xda,0xbd,0x5f,0xd,0x35,0xfb,0x6,0x23,
0x3e,0x7c,0x2b,0x35,0x2,0xca,0x6e,0x46,0xad,0x5b,0x8,0xb6,0x1f,0x7,0x50,0x99,
0x50,0xbc,0xbb,0x58,0xb9,0x90,0x1e,0x14,0xb8,0xb8,0x75,0x36,0x4e,0xd1,0x5,0x1f,
0x0,0x7,0xdd,0x8d,0x8b,0xff,0x81,0xc7,0x74,0x76,0x5,0x8e,0xf5,0xd8,0xf0,0x73,
0x21,0xdb,0xec,0x92,0x74,0xb3,0x4a,0xe7,0x3d,0xc8,0xbd,0x1,0x54,0x7c,0xe6,0xc9,
0xcc,0x76,0x5,0xa4,0xea,0xf5,0x9e,0x13,0x47,0x33,0x32,0xe7,0x66,0x3b,0x1a,0xe1,
0xe0,0x82,0x6f,0x1e,0xf4,0x1e,0xfc,0x30,0x8,0xc8,0x4a,0x61,0x69,0xa,0xb6,0x9b,
0x59,0x87,0xf,0xbf,0x9d,0xc,0x15,0x4f,0x58,0xb4,0x17,0x51,0xdb,0x4,0x53,0xb9],[
# Record 160: Address = 0x6000, Length = 128
0x6000,0x80,
0xeb,0xd5,0x3b,0x8a,0x64,0xc9,0xf6,0x90,0x66,0x18,0x66,0x36,0xd3,0x21,0x9,0xdc,
0x30,0x4d,0x8b,0x3,0x28,0xf5,0x99,0x2b,0xcf,0x5a,0xfd,0x50,0x1a,0xee,0x36,0xab,
0xf6,0xa2,0x28,0x72,0x88,0xa9,0xb5,0xb4,0xf6,0xbc,0xeb,0x61,0x4a,0xef,0x4b,0x29,
0x7f,0x7c,0x1b,0x5e,0xc7,0x76,0xf1,0xc7,0x27,0x2a,0x3a,0x38,0x15,0x74,0xa4,0x8a,
0x95,0x67,0x5,0xb2,0x9f,0x5d,0x69,0xe1,0x0,0xc7,0x50,0x4b,0x5d,0x1f,0x66,0x3e,
0xc1,0x38,0x8f,0x4c,0x99,0x39,0x83,0x57,0x67,0xfd,0xd0,0xc3,0x67,0x41,0x44,0xba,
0x19,0xb0,0xa5,0x79,0x7e,0x96,0xa7,0xc7,0xeb,0x41,0x4d,0x98,0xee,0x8,0xe8,0x48,
0x97,0xf1,0xf9,0x66,0xe5,0xbc,0xa1,0xe8,0x1b,0x1a,0x0,0x54,0xa5,0xd7,0x6,0x23],[
# Record 161: Address = 0x6080, Length = 128
0x6080,0x80,
0x48,0x29,0x74,0x9e,0xf2,0x0,0x78,0x35,0xc5,0xa,0xf0,0x1a,0xe9,0xbb,0x96,0xf,
0x68,0x88,0x4d,0x82,0x9b,0xe8,0xf6,0xcf,0x79,0x75,0x5,0xd,0x32,0x93,0xf8,0xf5,
0xae,0x19,0xc0,0xdf,0xca,0xeb,0xb5,0x1,0x24,0x67,0x66,0x8b,0x3a,0x2,0x60,0xce,
0x7d,0x1a,0xb8,0xe4,0x3a,0x2b,0x1a,0x93,0xbf,0xb2,0x70,0xa8,0x7c,0xb6,0x49,0xa9,
0x24,0x79,0x29,0x44,0x5a,0x72,0x4f,0xd8,0xea,0xc2,0x6c,0x6d,0xdc,0x97,0x44,0x2c,
0x74,0x37,0xb,0x27,0x69,0x41,0x3d,0xea,0x88,0xa0,0xa3,0x43,0x4c,0x1c,0x35,0xcb,
0xc3,0x9d,0x4c,0x7d,0xa1,0xf4,0x65,0x3d,0x3a,0x55,0xd2,0x66,0xe3,0x19,0x68,0x0,
0x40,0xa4,0xb4,0xb4,0x2,0xc5,0x7e,0x8d,0x78,0x4f,0x90,0x7e,0xad,0x20,0xfa,0x6d],[
# Record 162: Address = 0x6100, Length = 128
0x6100,0x80,
0xc8,0x14,0x94,0xeb,0xc1,0xb4,0x2f,0x71,0x6f,0x2,0x25,0x68,0x1e,0x59,0xef,0x54,
0x8e,0xbe,0xbd,0x17,0x17,0x3c,0x7b,0xd5,0x94,0x33,0x83,0x97,0xbe,0xfa,0x5,0x1,
0xa1,0xfe,0xe2,0x95,0x87,0x3,0x86,0xfd,0xaf,0x71,0x40,0x25,0x19,0xd9,0x7f,0xaf,
0x43,0xbd,0x59,0xd7,0xc,0x2e,0xfc,0x7f,0x93,0xad,0x9c,0x95,0x73,0x17,0x65,0x17,
0xa3,0x90,0xd,0x96,0xf5,0x5c,0x73,0x61,0x2d,0x76,0xc4,0x88,0x58,0x8e,0x41,0xf5,
0x98,0x42,0xa1,0x96,0xa6,0x46,0xc4,0xe5,0xd,0x42,0x57,0x85,0xce,0x6,0xac,0x28,
0xd5,0xb4,0x24,0x7a,0x36,0x21,0x87,0xfb,0x72,0x9b,0xfa,0xe7,0xc8,0x9d,0xe9,0x84,
0x20,0x47,0x3b,0x39,0x7c,0xe1,0xf4,0x9c,0x25,0x4f,0xa0,0xda,0xbf,0xd0,0x5b,0x16],[
# Record 163: Address = 0x6180, Length = 128
0x6180,0x80,
0x3f,0x97,0xce,0xad,0xbe,0x2a,0x14,0xd5,0x87,0x97,0x92,0xb0,0x9f,0x61,0x8,0x6f,
0x81,0x5b,0x8f,0x28,0xda,0x50,0x3d,0x85,0xd3,0x2,0x77,0x82,0x23,0x51,0x7c,0xc1,
0x27,0xc1,0xde,0x62,0xcd,0x21,0x40,0xe8,0xe7,0x8e,0x13,0x22,0xb1,0x93,0xef,0xe,
0xad,0xb1,0x90,0xf7,0x78,0xbe,0xf7,0x7f,0x42,0x66,0x88,0xa1,0x36,0xe,0x47,0x6c,
0xbc,0xd4,0x99,0x21,0x92,0x24,0x51,0xba,0xca,0x3f,0x7b,0x86,0x5b,0xc8,0xa4,0x33,
0x5e,0x15,0xed,0xce,0xed,0x20,0xb2,0xed,0xc4,0x46,0xb,0xa2,0x6b,0xaa,0x87,0x61,
0x3f,0x5b,0xa8,0xad,0xc,0xcb,0x60,0x36,0xfc,0x48,0x8d,0xd,0x5d,0x4d,0x71,0xb4,
0x24,0xc5,0x77,0xe,0xa9,0x33,0xc8,0x2d,0x64,0xbb,0x47,0x27,0xd7,0x2e,0x98,0x6d],[
# Record 164: Address = 0x6200, Length = 128
0x6200,0x80,
0xb5,0x84,0xe6,0x2b,0x7e,0x93,0x9a,0x4c,0x62,0x17,0xe8,0x40,0x25,0x25,0x87,0x44,
0xfd,0xf6,0x5f,0x2c,0x74,0x3c,0xde,0xa8,0xd4,0xd8,0xd7,0x43,0xe4,0xc0,0xb2,0xe,
0xc8,0x8e,0xb1,0x52,0xa,0x4e,0x4e,0x27,0xde,0x40,0xb7,0xe0,0xe0,0xc4,0xfc,0xc6,
0x41,0x84,0x48,0x37,0x3e,0x96,0x9,0x2d,0x1f,0xdc,0xa9,0x5a,0x65,0xa0,0xa5,0x3b,
0x40,0x88,0x89,0x22,0x3d,0x13,0x13,0x6,0x6d,0x1f,0x1d,0x20,0xf5,0xd2,0x38,0x6b,
0xe0,0xcf,0x73,0x5e,0xb0,0xaf,0x29,0xfe,0xe4,0x8a,0xbd,0x95,0xc6,0xa0,0xe4,0x32,
0x9a,0x45,0xd7,0xb3,0x83,0x54,0x56,0x52,0x67,0x2b,0x6f,0x58,0xd6,0x4e,0x9f,0x40,
0xa,0x6,0xba,0xaa,0xdb,0xd2,0xba,0x2c,0xe,0xd4,0xf4,0x11,0xef,0x40,0xd3,0x6c],[
# Record 165: Address = 0x6280, Length = 128
0x6280,0x80,
0x83,0xfb,0x50,0xe7,0x9,0xf,0x89,0x4b,0xab,0xb1,0x72,0x61,0x76,0xb7,0x11,0x9f,
0xee,0x7a,0x58,0x55,0xde,0x35,0x80,0x5e,0x9c,0xe,0x2b,0xd7,0x95,0x93,0x82,0x8a,
0x5b,0x37,0xe7,0x6e,0x73,0x69,0xf5,0xb6,0x23,0xab,0xae,0x75,0xbc,0xd9,0x96,0x1a,
0x4d,0xe,0x16,0x94,0x5d,0x76,0x5b,0x8f,0xc6,0x58,0xd3,0xfa,0x74,0x53,0x60,0x72,
0xb2,0x78,0xd0,0x3d,0xd9,0x10,0xa9,0x4,0xd0,0xe2,0xf5,0xf9,0xa7,0x42,0x80,0xf5,
0x52,0xc1,0xe6,0x34,0x24,0xb1,0xc5,0xc5,0x5d,0xa8,0x1c,0xbf,0x8c,0x84,0x91,0xf2,
0xa5,0x6b,0x7e,0x45,0x2a,0x36,0xc0,0x94,0x2d,0xfc,0xec,0xc2,0x21,0x96,0x22,0x27,
0x79,0xa6,0x10,0xf7,0x8c,0xe2,0x66,0xa1,0xbe,0x3f,0xcd,0xc0,0x3d,0x67,0xd9,0x3f],[
# Record 166: Address = 0x6300, Length = 128
0x6300,0x80,
0xa7,0x1,0xfd,0xee,0x9d,0xe2,0x69,0xc2,0xf8,0xf9,0xb,0x82,0x2d,0x32,0xda,0x4a,
0x1c,0xa5,0x6b,0xc3,0xf3,0x5d,0xee,0x24,0xe3,0x8e,0x8e,0x29,0x62,0xb1,0x2f,0xd4,
0x5c,0x89,0x5d,0xf4,0x7d,0xd9,0x84,0x55,0x4e,0x8d,0x6b,0x5e,0x1f,0xf5,0x39,0x5d,
0x96,0x14,0x3c,0x29,0xc3,0x2,0xa4,0xa8,0x6b,0xe8,0x96,0x68,0x8e,0x76,0x35,0x2e,
0x19,0x1b,0xfc,0x77,0xbf,0xe8,0xe1,0x7e,0xbd,0x51,0x7f,0x3f,0x5e,0x45,0x1e,0x66,
0xa8,0x95,0x2e,0xb6,0x36,0x9c,0x3a,0x7,0x5d,0x2b,0xa6,0xa6,0xfa,0xf3,0xdb,0x40,
0x5b,0x7b,0xfa,0xa9,0xc0,0x74,0x30,0x26,0x26,0x2c,0x96,0x6f,0x18,0x77,0x8c,0x92,
0xa4,0x17,0xf5,0x70,0xd4,0x59,0xbd,0xff,0x4d,0x18,0x19,0xc2,0xfa,0x5a,0x61,0x57],[
# Record 167: Address = 0x6380, Length = 128
0x6380,0x80,
0xb,0xe4,0x8d,0x45,0xee,0x1f,0xd8,0xc3,0xd6,0xe8,0x48,0x9,0x5,0x17,0xcf,0x1e,
0xdb,0x50,0xd8,0x83,0x6,0x11,0x6e,0x18,0xee,0xf8,0xd4,0xa7,0x9b,0xfd,0x52,0xc5,
0x4f,0xd3,0x35,0xbc,0x1a,0xcb,0xf2,0xeb,0xc7,0x2f,0xf2,0x4a,0x61,0xe9,0xb6,0xa,
0xb6,0x62,0x2b,0x9e,0xbc,0xca,0x91,0x8b,0x8a,0x46,0x6d,0xf7,0xdb,0x5d,0x39,0xc1,
0x2b,0x27,0x3c,0x56,0x2b,0xbf,0x2,0x98,0x26,0x81,0xd6,0x64,0x71,0x3f,0x7f,0xcf,
0x1,0xfd,0x88,0x6f,0xbd,0x62,0x75,0xf9,0x64,0xa1,0x7f,0x89,0xc2,0xd,0xf8,0x5,
0xb0,0xb6,0x8a,0x2a,0x92,0x33,0xe0,0x18,0xf1,0xee,0x61,0x38,0x5d,0x6b,0xe0,0xbc,
0x86,0x5c,0xd2,0xd9,0x13,0x36,0xf,0x50,0xef,0x46,0x9,0xa4,0x86,0xdd,0xda,0x15],[
# Record 168: Address = 0x6400, Length = 128
0x6400,0x80,
0x55,0xa7,0x77,0x9,0xfc,0x33,0xe,0x4f,0xd,0x3e,0xf5,0x3a,0x28,0x68,0x9a,0xc6,
0x6c,0x9,0xd3,0xb7,0x21,0x10,0xf6,0xe,0xa0,0x59,0x94,0xbb,0xaa,0x63,0x9c,0xba,
0x3e,0x78,0x13,0xdf,0xda,0x59,0x14,0x86,0x5b,0xb3,0x8c,0xf7,0x82,0x9f,0x48,0x22,
0xd2,0xbb,0xf1,0x11,0xf3,0x69,0x65,0x96,0xd5,0x53,0x8b,0x84,0xe4,0x85,0x18,0x72,
0xc5,0x17,0xdf,0xca,0x87,0xed,0xdb,0x9f,0x17,0x9,0xeb,0xc9,0x64,0x2a,0x8f,0xe1,
0xcf,0x97,0x16,0x39,0x1,0x7f,0xf6,0xb6,0xff,0xbd,0xaf,0x57,0xee,0xad,0xd0,0x8,
0x7a,0x55,0x9c,0xd0,0xa6,0x99,0x61,0x28,0x19,0x1e,0xc1,0x48,0xce,0xf,0x42,0xd,
0x73,0x17,0x6b,0xef,0x5d,0xd7,0x9c,0x1,0x7b,0x8b,0x88,0xde,0xc4,0x6,0x2b,0xc2],[
# Record 169: Address = 0x6480, Length = 128
0x6480,0x80,
0x1,0x21,0x2f,0x48,0xe4,0x56,0x8a,0xe8,0x89,0x97,0x7e,0x6,0xa4,0x4c,0xe2,0x8c,
0xdc,0xde,0x57,0x55,0xd9,0x3c,0x3c,0x37,0xa4,0xd4,0x66,0x64,0x1,0xbd,0x6f,0x50,
0x7,0x29,0x5f,0x90,0xa2,0xaa,0xd3,0xd,0xc5,0x9,0x6d,0xb3,0x77,0x7,0xe9,0x41,
0xa6,0x44,0x5f,0x51,0x9c,0x89,0xfc,0x90,0xfd,0xf,0x48,0x16,0x6a,0xcb,0x69,0xf7,
0xbf,0x6,0xb0,0xed,0xa0,0xb6,0x38,0x4f,0x7d,0x7c,0x1a,0xcd,0x94,0x6d,0x60,0xe9,
0xb2,0xbe,0xd3,0xc8,0x9a,0x2a,0xcf,0xf4,0x83,0x9f,0xb2,0xf4,0xca,0x44,0x7c,0xf5,
0x3b,0xd5,0x98,0x9f,0xd6,0x28,0x9e,0x7d,0xae,0xf2,0x6e,0x92,0x84,0x77,0xea,0xbf,
0xfb,0xc,0x7a,0x83,0x93,0x6a,0xe0,0xe9,0xc0,0x9d,0xc0,0x93,0xdb,0x6a,0x6,0x47],[
# Record 170: Address = 0x6500, Length = 128
0x6500,0x80,
0x9a,0x38,0x2,0x3e,0xe2,0xf3,0x29,0xc1,0x3f,0x68,0x1a,0xd,0x43,0xd9,0x21,0xcf,
0xa1,0xc1,0x1e,0x6,0x22,0x89,0xd8,0xc1,0x3f,0xc4,0x3e,0x5c,0x87,0x2d,0x6e,0xb0,
0x36,0x9e,0x31,0xd2,0x1,0x7c,0x1e,0x29,0x3f,0x98,0x95,0xcf,0xea,0x1e,0xd8,0x70,
0xd5,0x32,0x9f,0x88,0xff,0xc8,0xb9,0x27,0x62,0x0,0x11,0x42,0x1b,0xfb,0xc0,0xa2,
0xf0,0x97,0xd0,0x87,0x27,0xf4,0x88,0x27,0x22,0xc7,0xc0,0x12,0x1e,0xb0,0x91,0x36,
0x77,0xef,0xb3,0xd0,0x2e,0xbb,0x92,0x43,0x67,0x4,0xca,0xd9,0xe,0xcf,0x20,0xb2,
0x5,0x5a,0x2e,0xa5,0x8e,0x5f,0xfd,0x92,0x9e,0x6b,0xc6,0x2e,0xd8,0xba,0x10,0x8a,
0x93,0xa6,0xc4,0x6,0x35,0xbf,0xfb,0xe3,0x14,0xb5,0xc3,0xc8,0xfc,0xaf,0x17,0xaf],[
# Record 171: Address = 0x6580, Length = 128
0x6580,0x80,
0x8d,0xf5,0x5d,0xe4,0xdf,0xf6,0x2f,0x93,0x63,0x67,0x99,0xc2,0x5e,0x4c,0x3,0x8d,
0x65,0xbf,0x5,0x54,0xed,0x44,0x47,0x12,0x11,0x47,0x33,0xe2,0x83,0xd,0x8e,0xd9,
0x76,0x9c,0xdf,0x9,0xc6,0xd7,0xbb,0x1c,0x99,0x5,0x14,0x80,0x1d,0x8b,0x9f,0x3a,
0xef,0x20,0xfc,0x9a,0xb,0x70,0x61,0x52,0x27,0xa2,0xbe,0xbe,0x5e,0x8f,0x80,0x3,
0xa1,0x1f,0xca,0x2b,0x97,0x97,0x31,0x37,0x52,0xab,0xf5,0x2,0xc8,0x47,0xaa,0xab,
0x5a,0x73,0x47,0x4,0x4c,0x7c,0xe6,0xf1,0xfb,0x15,0xdf,0xef,0x99,0x89,0xa7,0x3b,
0x62,0x5b,0x86,0xd8,0x2e,0xe6,0xb6,0xd6,0x14,0xef,0xf0,0x30,0xaa,0xd7,0xf6,0x1f,
0x54,0xa8,0xda,0xd1,0xc6,0x3e,0x2e,0xad,0x2f,0x27,0x97,0xe4,0x58,0x7c,0xd1,0x9e],[
# Record 172: Address = 0x6600, Length = 128
0x6600,0x80,
0x73,0x35,0x96,0x84,0x37,0xa1,0x17,0x67,0x78,0xe7,0x5,0xcc,0x6b,0x61,0x19,0xe5,
0xdf,0xf9,0x35,0xfe,0x2f,0x2f,0xf0,0x9c,0x9b,0xac,0x1a,0x41,0xa5,0x2b,0x4b,0x47,
0x36,0xc8,0x7b,0xc3,0x42,0x3c,0xa5,0x1,0xe9,0x82,0xc6,0xe9,0x7f,0xc7,0x96,0x83,
0x4,0xb7,0x62,0x1,0xa3,0x83,0x1f,0xbe,0xe3,0x48,0x5b,0x42,0x2c,0xef,0xaa,0x34,
0x4a,0x59,0x8c,0xb,0xe2,0xaa,0xd6,0xea,0x3d,0x9d,0xf2,0x50,0xe0,0x46,0xc1,0xe3,
0x48,0xd,0x53,0x96,0xc8,0x2,0x93,0xb0,0xcf,0xde,0xa1,0xa3,0xc5,0xfe,0x2,0x75,
0x5e,0xf1,0xbc,0xb7,0xe9,0x64,0xa8,0xd8,0xfd,0x98,0xed,0xea,0x0,0x3a,0x37,0x65,
0xf7,0xf2,0xb3,0x92,0xa2,0xf,0xb0,0xa7,0x5f,0x8b,0x61,0x6e,0xf0,0x23,0xa2,0xea],[
# Record 173: Address = 0x6680, Length = 128
0x6680,0x80,
0x69,0x79,0x39,0x5c,0xbe,0xba,0xe7,0xba,0xbd,0xfc,0x4,0xf1,0xef,0x5f,0x59,0x75,
0xb0,0x6c,0x94,0x3f,0x6d,0xe1,0x18,0xd0,0x37,0xff,0x4e,0x76,0x8a,0x5e,0xe2,0x1b,
0x42,0xb8,0x98,0x6a,0xd7,0x52,0x9b,0x8c,0x30,0x83,0x85,0x47,0x33,0x6e,0x44,0xe4,
0xdb,0x44,0x97,0x87,0x7c,0xc9,0x4,0x9d,0xdf,0xa9,0x82,0x88,0x3c,0xf0,0xfe,0x67,
0xac,0x63,0xaa,0xf9,0x6d,0x53,0x5,0xca,0x66,0x87,0x69,0x7c,0xfc,0x59,0x2b,0xf3,
0x59,0xc4,0x51,0x86,0x94,0x88,0x50,0x85,0x41,0xe0,0xc3,0xea,0x4c,0xd,0xa5,0x8a,
0x9c,0x8b,0xc7,0x5c,0x17,0x3c,0xe0,0xb9,0xd8,0x69,0x46,0x29,0x2a,0xb0,0x91,0xc6,
0xfd,0x8f,0xde,0x46,0x16,0x5f,0x85,0x12,0x66,0x3e,0x24,0x66,0x1e,0x27,0xa8,0xfb],[
# Record 174: Address = 0x6700, Length = 128
0x6700,0x80,
0x7,0x77,0x83,0x4,0x90,0xaf,0x39,0x52,0x5a,0xac,0xe4,0x19,0xe8,0x9b,0x9a,0x93,
0x35,0xeb,0x2,0x1c,0xe3,0xd8,0x8a,0x1c,0xa2,0x95,0x49,0x4e,0x2b,0xca,0xfc,0x76,
0xaf,0xd8,0x10,0x8e,0xce,0x7c,0x3b,0xad,0xd0,0x54,0x42,0xed,0x50,0x6d,0xd5,0xf9,
0xf5,0xc8,0xf5,0x66,0xbb,0x2b,0x3f,0xc6,0xfd,0xc,0x47,0x20,0xfd,0xab,0xd8,0xd0,
0x6c,0xc5,0xfa,0x22,0x60,0x99,0x6e,0xa6,0xdd,0xd7,0x38,0xaf,0x40,0xcf,0xb5,0x35,
0x98,0xf9,0xab,0xf8,0x9b,0xb0,0xa2,0x39,0x5e,0xc2,0xd6,0x8,0xd4,0x72,0x15,0xce,
0x96,0x4e,0x65,0x61,0xed,0x6f,0x89,0xae,0xec,0x56,0x5,0xb6,0x93,0x44,0x8d,0x64,
0x9c,0x73,0xd2,0xe0,0x74,0x40,0x68,0xd3,0x17,0x5e,0x79,0x5c,0xb9,0xc3,0xc8,0xd4],[
# Record 175: Address = 0x6780, Length = 128
0x6780,0x80,
0x37,0x6c,0x30,0x1e,0xb,0x3e,0xcd,0x24,0xff,0x4f,0xa3,0x82,0x48,0xc5,0x8c,0xbe,
0x8b,0xae,0xb4,0x3,0x17,0x3e,0x20,0xff,0xca,0x2b,0xfe,0x79,0xcf,0xac,0xd9,0x96,
0xa8,0xba,0x10,0xc0,0x9c,0xeb,0xb8,0x1,0x35,0x29,0x54,0xc9,0xa6,0x28,0xe6,0x59,
0x2b,0xf3,0x30,0x7a,0xd4,0x8f,0xe7,0xe8,0x10,0xf7,0x86,0x76,0x39,0x3b,0x20,0x85,
0x97,0xc,0x4,0xd4,0x34,0xe1,0x94,0x42,0x33,0xfa,0x64,0xa3,0x7f,0x25,0x8,0x42,
0x43,0x73,0x26,0xf4,0xf9,0x2c,0xd8,0x26,0xbd,0x7e,0x28,0x57,0xb5,0x8d,0x20,0x3d,
0x34,0x8e,0xa1,0x8a,0x70,0xe8,0xe9,0x49,0x7a,0xd5,0x5d,0x56,0x3c,0xe9,0x81,0x8,
0x73,0x7e,0xf8,0x38,0xc6,0xd5,0x2a,0xa2,0x4,0xe,0xd7,0xf1,0x47,0x60,0xdb,0xfb],[
# Record 176: Address = 0x6800, Length = 128
0x6800,0x80,
0x78,0x22,0xee,0xd5,0xba,0x91,0x1c,0xeb,0x5c,0x26,0x7e,0x3b,0x32,0x68,0x1a,0x5e,
0x6,0x38,0xbf,0x3c,0x72,0x48,0x40,0x64,0xe8,0xef,0x2f,0x48,0x2b,0x1c,0x3f,0x5c,
0xcb,0x26,0x9d,0xf5,0xe1,0x16,0x3f,0xaa,0x54,0x52,0xb8,0xe3,0xe0,0x11,0xcd,0x5e,
0xde,0x84,0xaa,0x81,0xdc,0x40,0xe,0xb2,0x69,0xc5,0xea,0x8e,0x8d,0x29,0xcb,0xdf,
0x42,0x24,0x12,0xa,0x92,0x91,0x3e,0x98,0xd0,0x73,0xa,0xd6,0x84,0x30,0x3d,0x1f,
0x23,0xbc,0xd4,0x5f,0xad,0x84,0xd3,0xe6,0xca,0xb3,0xf8,0x29,0x46,0x17,0xef,0x8a,
0x53,0xc6,0x96,0x50,0xd4,0x3a,0xdd,0x18,0xec,0x5b,0xbe,0x53,0x43,0xf6,0x3b,0x2f,
0xd5,0x2d,0x82,0x6e,0x97,0xd7,0xce,0xc1,0x9d,0x53,0xdd,0x9b,0x42,0x6,0xb5,0x3f],[
# Record 177: Address = 0x6880, Length = 128
0x6880,0x80,
0x19,0xf2,0x2d,0xeb,0x71,0x4a,0xdf,0x39,0xce,0x1,0x9c,0xf2,0x7d,0xc4,0x83,0xf9,
0xe5,0xee,0x65,0x75,0xa6,0xff,0x53,0x4d,0x78,0x64,0x61,0x96,0xfb,0x7c,0xa9,0xcc,
0x44,0x4b,0xba,0xcc,0x67,0x16,0x94,0xf4,0x62,0xde,0xc1,0x22,0x70,0x43,0x99,0x6f,
0x33,0x4a,0xc5,0x9b,0x25,0x1c,0xc8,0x37,0x5e,0x61,0x32,0x88,0x5f,0xa1,0x71,0x6,
0x62,0xce,0x2b,0x23,0x6,0xcf,0xfd,0xe6,0xc7,0x86,0x41,0x7b,0x24,0x19,0x45,0x28,
0x59,0x78,0xf3,0x88,0xc1,0xe,0xd,0x10,0xbc,0xa3,0x9c,0xc,0x64,0x9e,0xfc,0xd0,
0xd9,0xb4,0xe2,0xa3,0x5a,0xde,0x5,0x8e,0x59,0x1c,0xf7,0x28,0xd0,0x4b,0xe,0xdd,
0x3f,0x63,0xfa,0x49,0xc2,0x8d,0xbe,0x3a,0x4c,0x33,0x21,0xcd,0x92,0xfc,0x25,0x7b],[
# Record 178: Address = 0x6900, Length = 128
0x6900,0x80,
0x64,0x2c,0xf7,0xe1,0xa3,0x71,0xc5,0x63,0x7a,0x73,0x4d,0x41,0xa7,0xab,0x36,0x18,
0x26,0x38,0xb3,0x35,0xe2,0xd7,0x3e,0x6d,0xb4,0xb6,0xd8,0x94,0x23,0x6e,0x3c,0x9f,
0x3f,0x13,0xc,0x51,0xb0,0x34,0x6a,0xdc,0x2a,0xd6,0x9d,0x6d,0x3b,0xd4,0xec,0xdb,
0x8e,0xf7,0x11,0xd7,0x16,0xec,0xdb,0x7f,0x1d,0xb3,0xb2,0x39,0x30,0xce,0xba,0x27,
0x4,0xb4,0xd2,0xab,0x2e,0x5b,0x1c,0x8d,0xbf,0x54,0xf4,0xec,0x6d,0x76,0xdb,0xbf,
0x26,0x15,0x30,0x21,0x62,0xcd,0x10,0xf5,0xd9,0x81,0x24,0x29,0x45,0x49,0xcf,0x83,
0x4d,0x45,0x13,0x22,0x89,0x7d,0xdb,0x1f,0x54,0xc7,0xe0,0x79,0xce,0x37,0x2b,0x4d,
0x9e,0x2,0x94,0x47,0x97,0xb3,0xe4,0x26,0x6b,0x8e,0x3a,0xce,0x1a,0xcd,0xe3,0x2],[
# Record 179: Address = 0x6980, Length = 128
0x6980,0x80,
0x6,0x8e,0x22,0x64,0xe6,0xba,0xf6,0x64,0xdb,0xdd,0xb3,0xfa,0x3,0x35,0x42,0xb8,
0x8,0x19,0x27,0x78,0xe,0x4d,0x8b,0xc,0x71,0x13,0x81,0xc2,0x75,0x5f,0xe2,0x7a,
0x26,0xae,0xe3,0x80,0x1c,0x1c,0x68,0x10,0xff,0x81,0xb2,0xa9,0x7c,0x89,0x55,0x74,
0xf2,0xd0,0x2a,0x2c,0x84,0x9b,0x16,0xe2,0xb5,0xf,0x68,0xdc,0xe8,0x2d,0x62,0x5f,
0x62,0x66,0x98,0xff,0x48,0x5b,0x65,0x6,0x9d,0xd5,0xd1,0x30,0xaa,0xad,0x81,0xa9,
0xd,0x4a,0x75,0xc3,0x6d,0xf5,0x8f,0x2b,0xe5,0xec,0xb0,0x65,0xdb,0xc8,0xeb,0x4f,
0x64,0x9c,0xcd,0xb2,0x9e,0x6e,0x25,0x8f,0x16,0x8f,0x43,0x59,0x83,0xd0,0x9d,0x31,
0xc,0x3b,0x75,0x28,0x82,0xf9,0xe0,0x79,0x13,0x20,0xeb,0x5c,0xac,0xbc,0xf6,0x82],[
# Record 180: Address = 0x6a00, Length = 128
0x6a00,0x80,
0x7b,0xd6,0xc4,0xcc,0xd9,0x82,0x30,0x4c,0xbe,0x88,0xde,0x52,0x47,0xe6,0x6d,0xe3,
0x9b,0x93,0x7d,0x74,0xe2,0xf7,0x6b,0x82,0x24,0xb8,0x19,0xe6,0x3f,0xac,0xa7,0x5d,
0x90,0x70,0x0,0x15,0x39,0x41,0x14,0x8d,0xf,0xff,0x99,0x52,0x11,0xcf,0xde,0xa6,
0xce,0x0,0x0,0xe3,0xd3,0xbf,0xea,0x99,0x17,0xbc,0x96,0x83,0x70,0x15,0x2a,0xea,
0x9c,0xea,0xa,0x20,0xc0,0x61,0xf2,0x6d,0x2c,0x2f,0xcb,0x75,0x87,0xc4,0xbb,0x4b,
0xb4,0xb,0xf1,0x6f,0xbe,0x7b,0x74,0x4b,0xb7,0x39,0x49,0xc6,0x58,0x2b,0xf5,0x3f,
0x8a,0xf,0xa1,0x7d,0xb9,0x4a,0x45,0x78,0xc5,0xf1,0xf9,0xba,0xc5,0xd5,0x4c,0x2a,
0x8,0xcc,0xab,0xfd,0x17,0x38,0x98,0xcc,0xca,0xde,0x5a,0x13,0xb7,0xf7,0x85,0xe0],[
# Record 181: Address = 0x6a80, Length = 128
0x6a80,0x80,
0x8e,0x9a,0xbf,0x92,0x74,0xc7,0x11,0xf3,0x73,0xed,0x14,0x82,0x14,0x11,0x68,0x24,
0x97,0xc0,0x32,0xb3,0x2f,0x9c,0xb9,0xe0,0x8f,0x38,0x5c,0x2d,0xb9,0x97,0x70,0x21,
0xae,0x3a,0xdd,0xd4,0x89,0xd3,0x3,0xea,0xe2,0x6a,0xec,0xcc,0xc1,0xb9,0xa4,0xbb,
0x8b,0xe3,0xd0,0x82,0x7a,0x46,0xf3,0xe3,0x47,0xe8,0x1,0x66,0x51,0xdf,0x9e,0xbb,
0x82,0x76,0x82,0x23,0x95,0x8c,0x3b,0xb,0xd1,0xe1,0x26,0x7b,0x69,0x46,0xba,0x42,
0xcd,0x1a,0x58,0xf2,0x49,0xc9,0x34,0x2,0x7b,0xe2,0xf8,0xfb,0xb9,0x76,0x86,0x22,
0xdf,0x61,0x92,0xc,0x49,0xb1,0x6,0x15,0xb6,0x43,0x92,0x13,0x8c,0xd9,0x26,0x64,
0x3e,0x21,0xbf,0x87,0xc7,0x1d,0xf3,0x38,0xe0,0xcc,0x5b,0x9b,0x20,0x2c,0x5,0x1],[
# Record 182: Address = 0x6b00, Length = 128
0x6b00,0x80,
0x7a,0xb3,0x7a,0x32,0x1e,0x5c,0xe8,0xf3,0xf0,0x24,0xb2,0xb1,0x30,0xc9,0x97,0x9b,
0xb9,0x94,0x7e,0x98,0xd6,0x6e,0xb9,0xf0,0x2e,0xa7,0xe0,0x23,0xfc,0xd5,0x15,0xbf,
0xbc,0xf7,0x3c,0x8f,0xd0,0x4,0x4c,0xe7,0xe8,0xc7,0xf6,0xe7,0xce,0x46,0x6c,0xf8,
0x38,0x9a,0x90,0x4c,0x81,0x8c,0x79,0x25,0xf,0xba,0x55,0x5e,0x5e,0x29,0x75,0xea,
0x5f,0xf0,0xc4,0x22,0x83,0x9b,0x3e,0x79,0xa3,0x36,0x7,0xb0,0xa4,0xe,0xb7,0x21,
0x3a,0x24,0xb7,0x56,0xc1,0x5c,0x17,0xaf,0x6,0xb,0x5,0x3d,0x74,0x23,0x8d,0xc2,
0x9b,0xa8,0xcb,0x5f,0xde,0x4e,0xbd,0x16,0xa4,0x41,0x4a,0xfa,0x13,0x10,0xba,0xec,
0xa6,0x37,0xac,0x7e,0x74,0x6a,0x52,0x14,0x18,0x5d,0xe8,0x7d,0x63,0x1f,0xea,0x88],[
# Record 183: Address = 0x6b80, Length = 128
0x6b80,0x80,
0x5c,0xa0,0x1f,0xa5,0xe2,0xa7,0x7d,0xdf,0xca,0xe6,0xa5,0x9d,0xae,0xbf,0xf5,0xd0,
0x39,0xa0,0xc5,0xbf,0xb3,0x41,0x6e,0xdd,0x98,0xdb,0x4b,0x86,0x5b,0xb0,0x27,0xc9,
0x91,0x10,0xd,0x3f,0xaa,0xaa,0x75,0x66,0x9c,0xd0,0xa4,0x7d,0xa6,0xdf,0x33,0x3,
0xa7,0xd3,0x88,0xd8,0x24,0x4f,0x72,0xf7,0x75,0x2c,0xd,0x37,0x63,0xd8,0x14,0xb2,
0xf3,0xa0,0xc1,0x20,0xfe,0x12,0x95,0x77,0x4d,0x6c,0xb1,0x99,0x32,0xcd,0x29,0x52,
0x2f,0x77,0x62,0x7d,0x94,0xa1,0x5f,0x43,0x5e,0x7f,0xe7,0x2c,0x1e,0x9d,0x2e,0xdd,
0xa5,0xfd,0x58,0x66,0x85,0xaf,0xf1,0x6d,0x3f,0xef,0xd,0xf0,0xef,0x6a,0x5c,0x9d,
0x1d,0x6a,0x49,0x43,0x39,0x15,0xef,0xbc,0xb1,0xbf,0x9c,0x64,0x44,0x6f,0x79,0xfd],[
# Record 184: Address = 0x6c00, Length = 128
0x6c00,0x80,
0xe5,0x3b,0xa5,0xa8,0xd7,0x83,0x42,0xbd,0xee,0xe0,0x31,0xe7,0x2,0x45,0xa8,0xff,
0x43,0x37,0xc7,0xc8,0x1d,0x76,0xd7,0x1b,0x73,0x36,0xbc,0x5,0x19,0x7d,0x2e,0xb4,
0xde,0xaf,0x81,0x76,0x26,0xcc,0x9b,0x4b,0xda,0xbf,0x3c,0xe9,0x4d,0x7b,0x18,0xb0,
0x98,0xbe,0xce,0xe3,0x32,0x2a,0x1d,0x9c,0x22,0x1,0xdd,0xe9,0x57,0x7e,0x4d,0x36,
0x68,0xc,0x7e,0x65,0x85,0x27,0x95,0xa9,0xa0,0x4d,0xfe,0xda,0x24,0x7b,0xb,0x1a,
0xc8,0x35,0x50,0x68,0x37,0xf8,0x8b,0x8e,0x4,0x19,0x82,0xb5,0xdb,0xf4,0x70,0x2c,
0xa4,0xfb,0x1b,0xd8,0x84,0x45,0x82,0xc9,0x4e,0xb0,0xe,0x4d,0xb0,0x75,0x8c,0xae,
0x35,0x5d,0xdb,0x40,0x3,0x81,0x85,0x4d,0xd7,0x2d,0x48,0x51,0x72,0x5b,0x41,0x2b],[
# Record 185: Address = 0x6c80, Length = 128
0x6c80,0x80,
0xc2,0x2b,0x31,0xd0,0x19,0xc5,0x9b,0xff,0xc7,0xf6,0x94,0x68,0xff,0x3e,0x36,0x7b,
0x71,0x45,0xe5,0xbc,0x71,0x92,0xa,0x8b,0xa4,0x73,0xff,0x2,0x7a,0x2d,0xe1,0x57,
0xd3,0x99,0xb6,0x19,0xac,0x62,0xbd,0xbe,0x35,0xfb,0xf9,0xc0,0x0,0xd5,0xbc,0x23,
0xde,0x37,0x77,0xaf,0xb4,0x90,0x1c,0xa5,0x90,0xe6,0x25,0x9a,0x49,0x74,0x2c,0x8c,
0x46,0x6d,0xc5,0x9f,0xee,0x98,0xf,0x42,0xae,0xea,0x8b,0x28,0xd2,0xb,0x95,0x7d,
0x43,0x53,0xb3,0xe6,0x32,0x5e,0x11,0x95,0xaa,0xbd,0xd5,0x1d,0x59,0xe7,0x4d,0x54,
0x91,0x28,0xd5,0xcf,0x90,0x7e,0x34,0xac,0xd4,0x79,0x76,0x9d,0x5b,0x31,0x8,0x7,
0x7f,0xc5,0xdf,0x7b,0x4d,0xe1,0x48,0x37,0x21,0xe7,0x36,0xf9,0xed,0xb0,0x6e,0xd7],[
# Record 186: Address = 0x6d00, Length = 128
0x6d00,0x80,
0xbc,0xa1,0x3c,0x6d,0xf6,0xd6,0x8d,0x88,0x7a,0x6b,0xea,0x85,0x5c,0x4a,0x80,0x86,
0x98,0xaa,0x2c,0x8e,0x42,0x94,0x73,0xe6,0xbc,0xf0,0x5d,0x69,0x86,0xae,0x11,0xda,
0x69,0x4f,0x6f,0x60,0x8d,0xf7,0xd8,0x58,0x34,0x6b,0x52,0xc1,0x98,0x1d,0x1a,0x17,
0x57,0x51,0xbb,0x63,0xa8,0xf4,0xb,0x58,0x81,0xb1,0x2a,0xbb,0x84,0x6c,0x4e,0x43,
0x39,0xd0,0xcc,0xea,0x4c,0x1e,0x36,0x9,0xc7,0xe1,0xd9,0xab,0xd4,0x17,0x2e,0x9e,
0x5,0x25,0xe,0xf2,0xe7,0x68,0x6b,0x2d,0xe4,0x5,0xb1,0x27,0xd2,0x24,0xca,0xdd,
0x15,0xaa,0x45,0x76,0x4b,0x71,0x9b,0x58,0x6f,0xfe,0x91,0xd3,0x5a,0x6d,0x70,0x15,
0x43,0x43,0x1b,0xb3,0x50,0x84,0xda,0x8a,0xbe,0x2c,0xb9,0xe,0x50,0x2f,0x53,0x41],[
# Record 187: Address = 0x6d80, Length = 128
0x6d80,0x80,
0x3e,0xf9,0x88,0x84,0x29,0xb0,0x38,0x63,0x91,0x43,0x2c,0x63,0x10,0xf3,0x52,0xce,
0xc5,0x4d,0x9f,0x8b,0x31,0xfb,0x5b,0x8c,0x8e,0x72,0xb9,0x8c,0xc,0x3a,0x6e,0xc4,
0xfe,0xcd,0xa5,0x63,0x40,0x78,0x32,0x4b,0x77,0x5,0xe8,0x5,0xcd,0x79,0xe0,0x3b,
0xe5,0x75,0x36,0x97,0x18,0x21,0xb8,0x66,0x19,0xee,0xc6,0x26,0x78,0x80,0xf7,0xb5,
0xa,0xb7,0xb9,0xf6,0x4e,0x16,0xc9,0x89,0xd5,0x7,0xf7,0xd7,0x14,0x34,0x9b,0x54,
0x77,0xe3,0x42,0x8f,0x64,0xf9,0xdb,0x5d,0xf3,0x36,0xa9,0x31,0xc9,0xf9,0x98,0x59,
0x87,0x4d,0x63,0x24,0x68,0x94,0x8e,0x94,0x2d,0xe3,0x70,0xaa,0x10,0x57,0x14,0x9d,
0xe0,0x82,0x3a,0x43,0x3,0x44,0x75,0x9,0x75,0xbb,0x25,0xc2,0x1a,0x98,0xa2,0xe3],[
# Record 188: Address = 0x6e00, Length = 128
0x6e00,0x80,
0x25,0x97,0x59,0xdd,0x7d,0xdf,0xff,0x2c,0xa,0xd7,0xea,0x6,0x5a,0xd3,0xb2,0x0,
0x2,0xe5,0xab,0x77,0x39,0x58,0x3,0x49,0x4d,0x6b,0xd9,0x9b,0xcd,0xa2,0x56,0x25,
0x7a,0x99,0x46,0x36,0x6,0xd7,0x50,0x9b,0xb6,0xa8,0x3e,0xfa,0xc8,0x30,0xd4,0x30,
0x2,0x85,0x53,0xf8,0x65,0x7c,0x45,0xe4,0xc,0xd6,0xc5,0x0,0x34,0x7b,0xf4,0xb9,
0xd5,0x26,0x62,0x3c,0x2b,0xae,0x64,0x30,0x4f,0x27,0x6d,0xec,0x77,0x43,0x8b,0x35,
0x5,0x5d,0x92,0xba,0x7,0xde,0xd6,0x42,0xc9,0xe3,0xf0,0xfe,0xec,0xd0,0x61,0x13,
0x8e,0xc0,0x42,0xc4,0xb3,0xfa,0x59,0x7c,0xf3,0x18,0x22,0x4f,0x5,0xe5,0xe8,0x98,
0xc1,0x63,0x27,0x46,0x34,0xa6,0x30,0x42,0x20,0xf4,0xf1,0xae,0x68,0x64,0x28,0x50],[
# Record 189: Address = 0x6e80, Length = 128
0x6e80,0x80,
0x5d,0xde,0x1,0xb2,0x27,0x12,0x22,0xfa,0xbb,0x4c,0x32,0x58,0x8,0x73,0x4,0xb8,
0x35,0x4c,0xeb,0x41,0x14,0x84,0x77,0x7b,0x7b,0xd7,0x9a,0x1b,0xd5,0x2f,0x4b,0x46,
0x1c,0xf5,0xf0,0x26,0x2d,0x28,0xb,0x80,0x80,0x38,0x1b,0xc1,0x80,0x76,0x66,0xa0,
0x4f,0xf7,0xc6,0xdf,0xe9,0x20,0x13,0x93,0xe,0xa,0x78,0xd1,0xf0,0xb2,0xdf,0xa4,
0x2,0x8f,0xa3,0x35,0xdb,0x8c,0x4f,0xc9,0x83,0x42,0x2c,0x7,0x43,0x8f,0x20,0xea,
0xf9,0x1f,0x77,0xac,0xc1,0xef,0xc4,0x62,0x9c,0x90,0x67,0xc4,0xe9,0xf,0x21,0xe8,
0x2c,0xcd,0x12,0x92,0x6e,0xf0,0x9c,0x11,0x22,0x6b,0x48,0x46,0xce,0xd4,0x31,0x9f,
0x21,0xb0,0xf4,0xca,0x32,0x7f,0xa4,0xe7,0xb6,0x13,0xb,0xd0,0x98,0x66,0xd2,0xb9],[
# Record 190: Address = 0x6f00, Length = 128
0x6f00,0x80,
0xf8,0xe3,0xb8,0x13,0x56,0xc3,0x1e,0x69,0xbd,0xf8,0xdd,0x80,0x11,0x96,0x5c,0x2c,
0xad,0x61,0x35,0x26,0x87,0x5b,0x9b,0x19,0xf,0x0,0xb8,0x33,0x40,0x79,0x38,0xed,
0x9f,0x3a,0xe8,0x64,0xe4,0x8f,0xca,0x92,0xca,0x33,0x4e,0x9f,0x1a,0xf9,0xa9,0x3e,
0xe,0xcb,0x36,0xb5,0x89,0xba,0x18,0xfe,0xa6,0x12,0x4d,0x19,0x35,0x1,0x6d,0x7a,
0x78,0xf8,0xf0,0x62,0x34,0x6a,0x8e,0xf4,0x24,0x73,0x88,0x4e,0x2e,0x64,0x7,0x97,
0x5b,0x86,0xc1,0x52,0x27,0x8f,0x30,0x7,0xed,0xb7,0x1d,0x6a,0x6d,0xf1,0x3f,0x6e,
0xd8,0xe4,0x72,0x1b,0xb4,0x8,0xc8,0xc3,0xaa,0xe5,0x7e,0xf0,0x95,0xdb,0x6e,0xa8,
0xdb,0x6d,0x35,0xcc,0x56,0xbe,0x4,0x47,0x2a,0xa,0x7,0xc3,0xfb,0x1a,0xe8,0x94],[
# Record 191: Address = 0x6f80, Length = 128
0x6f80,0x80,
0x9f,0x2b,0x73,0x41,0xbf,0x2e,0xb2,0xc8,0x32,0xfd,0x92,0x1c,0xf2,0x7c,0x1a,0x42,
0x30,0x89,0xee,0x8,0xc,0x54,0xcf,0x9f,0xeb,0x2d,0xb7,0xa8,0x54,0xc5,0x76,0xb8,
0x64,0xb4,0xb3,0xf5,0x26,0xd3,0x72,0xe5,0x1a,0x7a,0x2,0x3d,0x79,0xc9,0x37,0x44,
0x9a,0x40,0x12,0x73,0xdf,0xe2,0x77,0x1e,0x29,0xd1,0xc9,0x9b,0x80,0xfd,0x43,0xb3,
0x95,0x7d,0x6e,0x38,0xf0,0x1d,0x7a,0x8c,0x89,0x69,0xf4,0x19,0x28,0x6d,0xe5,0x79,
0xbf,0x11,0x12,0x7,0x7c,0xfe,0x86,0x33,0xd2,0x69,0x2c,0xec,0xb9,0xd1,0xf5,0x14,
0x1b,0x7d,0x5a,0x75,0x50,0x14,0x37,0x9c,0x56,0x2,0x67,0x6a,0x20,0xdf,0xe9,0xe,
0x2e,0xd7,0x39,0x69,0xc1,0xa4,0xf0,0xe1,0x29,0x26,0xd2,0xe6,0xb,0x85,0x32,0x47],[
# Record 192: Address = 0x7000, Length = 128
0x7000,0x80,
0x1f,0xda,0x40,0xe2,0x5,0xde,0x48,0x43,0x9e,0xbe,0xe5,0xd3,0x8,0x96,0x20,0x58,
0xc5,0x63,0x7c,0x78,0x34,0x61,0xe7,0xde,0x8a,0x36,0xb,0x81,0xb8,0x5a,0x3f,0x22,
0xc6,0xa8,0x67,0x2e,0x8f,0x4e,0xb9,0x46,0x88,0xee,0xe6,0x14,0xe0,0x16,0xe1,0xad,
0xdc,0x9b,0xae,0x50,0x75,0xbf,0xcc,0xa6,0xbe,0x82,0x85,0x86,0x28,0x87,0x75,0xc6,
0xfa,0xf4,0xa5,0x23,0x7d,0xff,0x97,0x97,0xb6,0xe,0xff,0x5d,0x26,0xce,0x34,0x6e,
0x8b,0x8f,0x13,0x77,0xbe,0xcd,0xd5,0x53,0x1b,0x23,0x3f,0xe2,0x37,0x2a,0x26,0xef,
0x33,0xcf,0xc2,0x50,0xb3,0x76,0x89,0x45,0x3f,0xec,0x93,0x4c,0x2f,0x2c,0x41,0xcb,
0x8f,0x27,0xcf,0x10,0x16,0xfc,0xb0,0x48,0x32,0x7f,0x9d,0x35,0xc2,0xd9,0x0,0x9d],[
# Record 193: Address = 0x7080, Length = 128
0x7080,0x80,
0x5b,0x8f,0xf5,0x3e,0x77,0x21,0x47,0x4b,0x90,0x9d,0x82,0xcc,0xcf,0x8a,0x78,0xf8,
0x4e,0x5d,0x4d,0x9d,0x2c,0xc8,0x61,0x17,0x3c,0x8b,0xce,0x96,0x1c,0xe4,0x77,0x68,
0xc2,0x7c,0x34,0x13,0x92,0x65,0xab,0xfc,0xd,0x8b,0xd,0xeb,0x2d,0x5f,0xa,0xaf,
0x79,0x7f,0x72,0x9d,0x53,0xf7,0xda,0xa0,0x47,0x29,0x8c,0xb3,0xf9,0x8a,0xf2,0xbe,
0x7b,0xb9,0xf3,0x13,0x63,0x87,0xb4,0xdf,0x7a,0xf7,0x4d,0xe8,0x81,0xb1,0x86,0x9e,
0xe5,0xf1,0xad,0x6b,0xad,0x7f,0x8c,0x8,0xa6,0xce,0xe1,0xd9,0xd1,0xc,0x52,0x47,
0xaa,0x8c,0x84,0x13,0xf4,0xaa,0xd4,0x8,0x42,0xd9,0x90,0x38,0xdd,0xf2,0x50,0x60,
0x72,0xfa,0xcb,0x6c,0x16,0x4f,0xa0,0xb2,0x5e,0x64,0x9b,0xdd,0xe2,0xe8,0x66,0xec],[
# Record 194: Address = 0x7100, Length = 128
0x7100,0x80,
0x6b,0xbf,0x79,0x48,0x24,0xee,0xdf,0x40,0x4c,0xcd,0xda,0x25,0x4b,0xab,0xaf,0xf2,
0x22,0x8b,0x8b,0x43,0xdc,0xe4,0x91,0x47,0xc9,0xbd,0x22,0x2f,0x19,0xb5,0x7b,0x0,
0xc2,0xc4,0x4b,0x10,0xac,0xe3,0xb9,0x31,0x83,0x2a,0x7c,0x28,0xba,0x6d,0xc4,0x9b,
0xf0,0x2d,0x94,0xd8,0xcc,0x40,0x37,0xd2,0xa,0xda,0x99,0x3a,0x8,0xa5,0x59,0x9,
0x29,0xc1,0xfb,0x52,0x87,0xc1,0x2f,0x35,0x5f,0x6a,0x44,0x88,0xdf,0xf4,0xa9,0x9a,
0x56,0xaf,0x64,0x35,0x7f,0x65,0x70,0x98,0xe9,0x11,0xc7,0x1e,0x4a,0x40,0x43,0xb0,
0x13,0x19,0x8b,0x1e,0x24,0xb5,0x6c,0x87,0xf6,0x3,0xe3,0x21,0x33,0xa3,0xa8,0x34,
0x20,0x87,0x57,0xbf,0xbd,0x6d,0xcf,0xbd,0x7f,0x60,0x73,0x1f,0x2c,0x98,0x78,0x4],[
# Record 195: Address = 0x7180, Length = 128
0x7180,0x80,
0xc5,0x5d,0x8b,0xce,0x88,0x1e,0x13,0x28,0x35,0xfa,0x51,0x7d,0x3f,0x8e,0x5e,0x20,
0xa4,0xc0,0x16,0x36,0x83,0xc7,0x6d,0xea,0xc8,0x33,0xb,0x4b,0x91,0xb5,0x89,0x55,
0x6b,0xc,0x3c,0x6d,0x8,0xea,0x70,0x39,0xad,0xcf,0xd,0x8,0xc,0xf8,0x33,0xcd,
0xe9,0x5d,0x2f,0xac,0x4e,0x42,0x4d,0xa6,0xcc,0x39,0xd2,0x5c,0x3c,0x74,0xdc,0xe3,
0x1a,0xd6,0xad,0xf1,0xe9,0x10,0x6,0x91,0x8e,0xd0,0xb4,0xe2,0x68,0x61,0x9e,0x5e,
0x35,0xb6,0xe1,0x34,0x27,0x4,0x6f,0x49,0xab,0x90,0xc7,0x6f,0x27,0xea,0x73,0x63,
0x6a,0x5b,0x47,0xfd,0xdc,0xd1,0x3f,0x6d,0xa1,0xb2,0xfa,0xf2,0x72,0x6a,0x9,0x27,
0xe2,0x1a,0x4,0x28,0x36,0x56,0xc1,0x97,0x5,0xf4,0xb9,0xcb,0x7b,0xa7,0xe3,0x8c],[
# Record 196: Address = 0x7200, Length = 128
0x7200,0x80,
0x71,0xc1,0x99,0xb4,0x4e,0xf0,0x39,0x67,0xab,0x61,0x76,0xaa,0xe,0xba,0xd7,0xe7,
0x80,0x9f,0x15,0xcd,0x58,0xc3,0x87,0x34,0x51,0x89,0x8d,0x59,0x21,0x80,0xf3,0x42,
0xa3,0x4b,0x83,0x7d,0xf8,0x8,0xb5,0x4f,0xbd,0x7d,0x56,0x7f,0xc5,0x7a,0xb7,0x4c,
0x29,0x5e,0x73,0x48,0x4d,0xa1,0xb,0xb,0xae,0x8b,0x52,0x88,0x77,0x45,0x88,0x8a,
0x50,0x1d,0x75,0xd7,0xf5,0xa5,0xc4,0x4f,0x81,0xc8,0x4,0x74,0xf5,0xd8,0xed,0x4e,
0x45,0x44,0x47,0xfa,0x4a,0xc8,0x11,0x9c,0x4a,0x5,0xf1,0x20,0xd2,0x83,0x13,0xe1,
0xea,0xab,0x69,0x82,0xb0,0xd7,0xcf,0xd8,0xc8,0x65,0x7c,0x75,0xa6,0x6,0x8f,0x61,
0xe9,0xdf,0x73,0xd1,0x51,0xd2,0x52,0x47,0xed,0xcc,0x6a,0xed,0x13,0x71,0xd7,0xc],[
# Record 197: Address = 0x7280, Length = 128
0x7280,0x80,
0x5c,0x15,0xe,0x6b,0xdb,0x4b,0x7a,0x79,0x24,0xda,0x3,0x16,0xc0,0xc0,0xba,0xf9,
0x87,0x58,0xe7,0x4,0x1d,0xed,0x35,0x5e,0x70,0x51,0x1d,0x6a,0x2b,0x8d,0xed,0xf4,
0x92,0xde,0x76,0x1f,0x36,0x55,0xa,0xcc,0xe,0x74,0xc9,0x5a,0x4d,0xad,0x6c,0xb3,
0x8d,0x4f,0xc4,0xd8,0xe9,0x15,0xb1,0x1d,0xf0,0xb1,0x94,0x1d,0x49,0x8e,0xa3,0xe8,
0x6a,0xa5,0x8f,0xd,0x71,0xac,0xb4,0xad,0xd0,0x98,0x2f,0x60,0x2b,0x20,0x98,0xa7,
0x6b,0x95,0xce,0x27,0x6a,0xdd,0x1a,0x9c,0xca,0x1c,0xd6,0xc6,0xf8,0x6c,0x9e,0x8e,
0xb3,0x7e,0xdf,0x18,0x22,0x7,0xb3,0xef,0x82,0x2d,0x13,0xf3,0x37,0x43,0xce,0x16,
0x5c,0xee,0x9e,0x9e,0x4a,0x52,0x5d,0x2,0x0,0x7,0x46,0xb3,0x8,0xc8,0xa7,0xf8],[
# Record 198: Address = 0x7300, Length = 128
0x7300,0x80,
0xc6,0x72,0x1e,0x1,0x4a,0xc8,0x7,0xf5,0x70,0x11,0x71,0xfa,0x3d,0xc8,0x3f,0xf1,
0xf7,0x83,0xbe,0x39,0x3a,0x91,0x8e,0xf2,0x8c,0xdf,0x63,0x5b,0xd5,0x81,0xd,0x67,
0xe4,0x66,0xd5,0xdf,0xc3,0xbb,0x76,0xbf,0x6c,0xa5,0x90,0x35,0x7c,0x5a,0xe2,0x53,
0x33,0xdd,0x4,0x30,0x41,0x6f,0xf1,0x3b,0x63,0x7a,0xfb,0xc5,0x5e,0x19,0x2f,0x3e,
0x59,0x30,0x87,0x6d,0x10,0xda,0x49,0xfb,0x3,0x6a,0x3c,0x27,0x9b,0x4d,0x24,0xd7,
0xc1,0x7c,0x8a,0x9a,0xc3,0x8c,0xad,0x4e,0xfb,0x7f,0x6e,0x71,0xa5,0x47,0xb2,0x67,
0xe3,0x7,0x10,0xe0,0x31,0x5e,0x6e,0x26,0x1,0xec,0x80,0x58,0x7b,0x57,0x3a,0xd7,
0x8a,0xd7,0x43,0x92,0xa7,0x31,0x8d,0xb6,0x66,0xc7,0xa6,0xa,0x62,0x6,0x66,0xe3],[
# Record 199: Address = 0x7380, Length = 128
0x7380,0x80,
0x46,0x12,0x29,0xa8,0xd4,0xfe,0x59,0x99,0x1e,0xad,0x10,0x8c,0x4,0xa1,0xe0,0xe,
0x4f,0xf3,0x43,0x32,0x16,0x70,0xad,0x6b,0x91,0xc3,0xce,0xdf,0x92,0x42,0x5b,0xee,
0x93,0x16,0xa3,0x85,0xdc,0x55,0x0,0xf1,0x3b,0x38,0x9e,0xb1,0x23,0x44,0x0,0x8f,
0x22,0x50,0x92,0xdd,0x51,0x1f,0xda,0x7,0x81,0xed,0xf7,0x95,0xa8,0x48,0x25,0xe1,
0xb1,0x1e,0x17,0xc3,0x21,0x8a,0x55,0x2a,0x6a,0x45,0xd6,0x47,0x2,0x57,0x41,0xf8,
0xe5,0x7f,0x86,0x2f,0x10,0x34,0x52,0xfd,0xe6,0x7a,0x33,0x91,0x9,0x97,0xde,0xba,
0xe3,0xa8,0x88,0x55,0x16,0x26,0xf6,0xdc,0xf,0x98,0xfb,0xff,0xe9,0x5c,0xf9,0x31,
0xfa,0x7,0xb4,0x19,0x2c,0x10,0x8c,0xc4,0x30,0x38,0x63,0x9a,0x5b,0x6a,0xf6,0x70],[
# Record 200: Address = 0x7400, Length = 128
0x7400,0x80,
0xd4,0x67,0x7c,0x91,0x64,0xeb,0x3d,0xd4,0x35,0x73,0x91,0x9d,0xc1,0xe0,0x9e,0x1f,
0xba,0xb5,0xf5,0x60,0x4,0x3e,0xe3,0x4d,0xdf,0xe1,0x31,0xb7,0x0,0xad,0x5b,0x71,
0xbe,0xbb,0x18,0xc0,0xc6,0xae,0x20,0x11,0x19,0xcb,0xa7,0xcc,0xc8,0xdc,0x7e,0x82,
0xd7,0x5e,0x1,0x78,0x9a,0xa1,0x2d,0xa7,0x24,0x74,0x8b,0x25,0xd,0x91,0xa6,0x2b,
0x91,0x28,0x5f,0xd,0xe9,0x30,0xc3,0x53,0x7d,0xb4,0xe6,0x9a,0x30,0x76,0x83,0x3b,
0x62,0xe9,0x68,0xbc,0x76,0x6b,0x4c,0x21,0xf2,0xb4,0x1,0xca,0x55,0x1,0xe6,0xf,
0xaf,0x2a,0x2a,0xa,0x64,0x7b,0xb1,0x50,0x1b,0x3b,0x1e,0x36,0x37,0x29,0xac,0xc,
0xe,0x78,0x3e,0x37,0x34,0x18,0xbd,0xe,0x88,0xe6,0x20,0x0,0x9b,0xe,0xdb,0x32],[
# Record 201: Address = 0x7480, Length = 24
0x7480,0x18,
0xdf,0x6c,0x30,0x64,0xfc,0xf0,0x79,0xd2,0xe8,0xd1,0x16,0xb2,0x11,0x79,0xf6,0x9,
0x60,0xef,0xf2,0x51,0xab,0xe9,0xfc,0x64,0x12,0xee,0x9a,0x9f,0x7a,0x5e,0xcf,0x6d,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[
# Record 202: Address = 0x7500, Length = 128
0x7500,0x80,
0x56,0x34,0x11,0x99,0xff,0xff,0xff,0xff,0x0,0x1,0x2,0x3,0x4,0x0,0x0,0x0,
0x5,0x0,0x0,0x0,0x0,0x0,0x80,0x40,0x6,0x0,0x0,0x0,0x15,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0x0,0x0,0xc8,0x42,0x0,0x80,0x9,0x45,0x0,0x0,0x2f,0x45,
0x0,0x80,0x22,0x45,0x0,0x80,0x22,0x45,0x0,0x80,0xd4,0x44,0x0,0x0,0x40,0x3f,
0xa,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xac,0xf3,0x86,0x42,0xac,0xf3,0x86,0x42,
0xac,0xf3,0x86,0x42,0xac,0xf3,0x86,0x42,0xac,0xf3,0x86,0x42,0x0,0x0,0x80,0x41,
0x0,0x0,0x80,0x41,0x0,0x0,0x80,0x41,0x0,0x0,0x80,0x41,0x0,0x0,0x80,0x41,
0x0,0x0,0x40,0x40,0x0,0x0,0x40,0x40,0x0,0x0,0x40,0x40,0x0,0x0,0x40,0x40],[
# Record 203: Address = 0x7580, Length = 128
0x7580,0x80,
0x0,0x0,0x40,0x40,0x19,0x8,0x0,0x0,0x97,0x20,0x23,0x42,0x7b,0x22,0x4a,0x42,
0x6b,0x79,0x9b,0x42,0xea,0xd6,0x5c,0x42,0x0,0x0,0x70,0xc1,0x0,0x0,0x70,0xc1,
0x0,0x0,0x20,0xc2,0x0,0x0,0x70,0xc1,0x91,0x83,0xc0,0xc0,0xdf,0x8f,0x35,0xc1,
0xb3,0x45,0x5f,0xc1,0xce,0x4d,0x2d,0xc1,0x0,0x0,0x20,0x3f,0x0,0x0,0x70,0x3f,
0x0,0x0,0x20,0x3f,0x0,0x0,0x80,0x3f,0x0,0x0,0x0,0x0,0xa0,0xc1,0x36,0x43,
0x0,0x0,0x82,0xc2,0x0,0x0,0x80,0x42,0xf4,0x1c,0xaf,0xc1,0x0,0x0,0x0,0xbf,
0x0,0x0,0x50,0x42,0x0,0x0,0x2c,0x42,0x48,0xe1,0x32,0xc1,0x71,0x3d,0x9e,0xc1,
0x0,0x0,0x0,0x0,0x5c,0x8f,0xaa,0x41,0x9a,0x99,0x9,0x41,0x0,0x0,0xa0,0x42],[
# Record 204: Address = 0x7600, Length = 128
0x7600,0x80,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x8c,0x42,0x0,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xcd,0xcc,0x4c,0x3e,0xcd,0xcc,0x4c,0x3e,
0x0,0x0,0x0,0x0,0x0,0x0,0x40,0x40,0x0,0x0,0x40,0x40,0x0,0x0,0x80,0x3f,
0x0,0x0,0x70,0x42,0x0,0x0,0x70,0x42,0x0,0x0,0x70,0x42,0x0,0x0,0x70,0x42,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,
0x0,0x0,0x80,0x3f,0x0,0x0,0xc8,0x41,0xe8,0x3,0x0,0x0,0xa,0x0,0x0,0x0,
0x1e,0x0,0x0,0x0,0x78,0x0,0x0,0x0,0x32,0x0,0x0,0x0,0x0,0x0,0x0,0x0,
0x0,0x0,0x80,0x3f,0x0,0x0,0x80,0x3f,0x0,0x0,0x80,0x3f,0x0,0x0,0x80,0x3f],[
# Record 205: Address = 0x7680, Length = 128
0x7680,0x80,
0x33,0xb3,0x33,0xb3,0x82,0x0,0xff,0x0,0x83,0x0,0xff,0x0,0x0,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0xa,0x0,0x32,0x0,0x64,0x0,0x0,0x0,0x32,0x0,0x32,0x0,
0x64,0x0,0x0,0x0,0x64,0x0,0x64,0x0,0x64,0x0,0x0,0x0,0x96,0x0,0x64,0x0,
0x64,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xd0,0x7,
0x0,0x0,0xff,0xff,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x80,0x2,0x0,0x3c,0x0,
0x0,0x0,0xff,0xff,0x92,0xcc,0x5c,0xbf,0x51,0x3,0x0,0xc0,0x2,0x0,0x3c,0x0,
0x0,0x0,0xff,0xff,0x92,0xcc,0x5c,0xbf,0xd1,0x1,0x0,0xc0,0x2,0x0,0x3c,0x0,
0x0,0x0,0xff,0xff,0x92,0xcc,0x5c,0xbf,0x11,0x2,0x0,0xc0,0x2,0x0,0x3c,0x0],[
# Record 206: Address = 0x7700, Length = 128
0x7700,0x80,
0x0,0x0,0xff,0xff,0x92,0xcc,0x5c,0xbf,0x91,0x0,0x0,0xc0,0x19,0x0,0x90,0x1,
0x0,0x0,0xff,0xff,0xda,0x73,0x1f,0xc1,0x32,0x0,0x0,0xe2,0x19,0x0,0x90,0x1,
0x0,0x0,0xff,0xff,0xda,0x73,0x1f,0xc1,0x32,0x0,0x0,0xe1,0x0,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[
# Record 207: Address = 0x7780, Length = 128
0x7780,0x80,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0xa0,0x40,
0x0,0x0,0x80,0x40,0x0,0x0,0x66,0x3f,0x0,0x0,0xc8,0x41,0x9f,0xd7,0x9b,0x9f,
0x8a,0x3,0x4,0x0,0x2,0x1,0xff,0x5f,0x7,0x1f,0x0,0x0,0x7f,0x0,0xff,0xff,
0x1e,0x0,0x1f,0x0,0x3f,0x78,0x1,0x7,0x8,0x0,0x0,0x0,0x0,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0],[
# Record 208: Address = 0x7800, Length = 128
0x7800,0x80,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x5,0x5,0x5,0x5,
0x5,0x5,0x5,0x5,0x5,0x5,0x5,0x5,0x4,0x0,0x0,0x0,0xa,0x32,0x0,0x64,
0x50,0x1,0x1,0x43,0x0,0x0,0x0,0x0,0x56,0x34,0x11,0x99,0x7,0x0,0x1,0x0,
0x2,0x10,0x3,0x10,0x4,0x10,0x5,0x10,0x6,0x20,0x7,0x20,0x0,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x2,
0x5,0x8,0xb,0xe,0x12,0x16,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,
0x0,0x0,0x4,0x0,0xa,0x0,0x10,0x0,0x16,0x0,0x1c,0x0,0x24,0x0,0x2c,0x0],[
# Record 209: Address = 0x7880, Length = 128
0x7880,0x80,
0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,
0x0,0x0,0x0,0x0,0x61,0x78,0x74,0x3f,0xea,0x5f,0x7a,0x3d,0xf3,0x79,0x38,0x3d,
0x1,0x5a,0x70,0x3f,0xb0,0x70,0x4d,0x3f,0xaf,0xc5,0x99,0x3e,0x0,0x0,0x80,0x3f,
0x3e,0x3d,0x4a,0x3e,0x28,0x1d,0x33,0x3f,0x0,0x0,0x0,0x0,0x37,0xde,0x4d,0x3f,
0x43,0x75,0x7e,0x3f,0x0,0x0,0x80,0x3f,0x25,0x87,0x48,0x3e,0x4f,0x5e,0xc5,0x3b,
0x0,0x0,0x0,0x0,0x49,0xb1,0x3b,0x3f,0x72,0x49,0x15,0x3f,0x0,0x0,0x80,0x3f,
0x6f,0x9d,0x88,0x3e,0x1c,0x6d,0xd5,0x3e,0x0,0x0,0x0,0x0,0xa9,0x2b,0x18,0x3f,
0xc5,0xf6,0xf,0x3f,0x0,0x0,0x80,0x3f,0xaf,0xa8,0xcf,0x3e,0x75,0x12,0xe0,0x3e],[
# Record 210: Address = 0x7900, Length = 128
0x7900,0x80,
0x0,0x0,0x0,0x0,0xcb,0xda,0x71,0x3f,0x98,0x93,0x76,0x3f,0xf9,0x91,0x72,0x3f,
0x9f,0x1f,0x70,0x3f,0x4d,0x53,0x62,0x3d,0x7b,0xc6,0x16,0x3d,0x6a,0xe0,0x56,0x3d,
0x11,0x6,0x7e,0x3d,0x2d,0xd6,0x72,0x3f,0x1f,0xdf,0x6e,0x3f,0x69,0x94,0x5e,0x3f,
0x24,0x75,0x6c,0x3f,0x32,0x9d,0x52,0x3d,0x7,0x7,0x89,0x3d,0x5b,0xae,0x5,0x3e,
0xdc,0x56,0x9c,0x3d,0xb2,0x9b,0x8b,0x3a,0x3c,0x8f,0xf8,0x3e,0xe,0x39,0x74,0x3d,
0x1a,0x9c,0xa1,0x38,0x4b,0x5e,0xaf,0x3d,0xb0,0xb7,0xed,0x3b,0xe4,0xc9,0xc8,0x3d,
0x1d,0x1f,0xa0,0x32,0xd2,0x4b,0x98,0x3a,0x25,0x4c,0xdc,0x3d,0x66,0x50,0x7,0x3d,
0xe6,0xe6,0x80,0x32,0x26,0xd,0xa8,0x3d,0xac,0xb8,0x37,0x32,0x96,0x6a,0xac,0x3d],[
# Record 211: Address = 0x7980, Length = 128
0x7980,0x80,
0xdc,0x24,0x34,0x28,0xdc,0x24,0x34,0x28,0x0,0x9e,0x3c,0x3e,0x0,0xc9,0x7f,0x3b,
0x2c,0xba,0x9d,0x3d,0x54,0xfa,0xed,0x39,0x30,0x4e,0xfa,0x3c,0x8d,0xac,0x8b,0x3d,
0x7a,0xad,0x71,0x36,0x78,0x45,0x18,0x32,0x10,0x96,0xdc,0x3d,0xc0,0xa3,0x6f,0x3d,
0xd5,0xa5,0x22,0x39,0x29,0x9b,0xc6,0x3d,0xc8,0x9c,0x54,0x33,0x79,0x81,0xa7,0x3e,
0x6c,0x96,0xe8,0x30,0xdc,0x24,0x34,0x28,0xe1,0xeb,0xfd,0x28,0x65,0x6c,0x8e,0x3d,
0xdc,0x24,0x34,0x28,0xdc,0x24,0x34,0x28,0xdc,0x24,0x34,0x28,0xdc,0x24,0x34,0x28,
0xdc,0x24,0x34,0x28,0xdc,0x24,0x34,0x28,0xdc,0x24,0x34,0x28,0xdc,0x24,0x34,0x28,
0xdc,0x24,0x34,0x28,0xdc,0x24,0x34,0x28,0xdc,0x24,0x34,0x28,0xdc,0x24,0x34,0x28],[
# Record 212: Address = | |
% (len(set(compliment_cute)), max(compliment_cute)))
print ("DEBUG: compliment_funny Cnt: %d and max value %f" % (len(set(compliment_funny)), max(compliment_funny)))
print ("DEBUG: compliment_hot Cnt: %d and max value %f" % (len(set(compliment_hot)), max(compliment_hot)))
print ("DEBUG: compliment_list Cnt: %d and max value %f" % (len(set(compliment_list)), max(compliment_list)))
print ("DEBUG: compliment_more Cnt: %d and max value %f" % (len(set(compliment_more)), max(compliment_more)))
print ("DEBUG: compliment_note Cnt: %d and max value %f" % (len(set(compliment_note)), max(compliment_note)))
print ("DEBUG: compliment_photos Cnt: %d and max value %f" % (len(set(compliment_photos)), max(compliment_photos)))
print ("DEBUG: compliment_plain Cnt: %d and max value %f" % (len(set(compliment_plain)), max(compliment_plain)))
print ("DEBUG: compliment_profile Cnt: %d and max value %f" % (len(set(compliment_profile)), max(compliment_profile)))
print ("DEBUG: compliment_writer Cnt: %d and max value %f" % (len(set(compliment_writer)), max(compliment_writer)))
print ("DEBUG: cool Cnt: %d and max value %f" % (len(set(cool)), max(cool)))
print ("DEBUG: fans Cnt: %d and max value %f" % (len(set(fans)), max(fans)))
print ("DEBUG: funny Cnt: %d and max value %f" % (len(set(funny)), max(funny)))
print ("DEBUG: review_count Cnt: %d and max value %f" % (len(set(review_count)), max(review_count)))
print ("DEBUG: useful Cnt: %d and max value %f" % (len(set(useful)), max(useful)))
# output
save_file(user_id, "../data/yelp/yelp-dataset/user_id.txt")
def parse_review_feature(review_obj):
review_id = review_obj['review_id']
user_id = review_obj['user_id']
business_id = review_obj['business_id']
stars = review_obj['stars']
useful = review_obj['useful']
cool = review_obj['cool']
funny = review_obj['funny']
date = review_obj['date']
text = review_obj['text']
def filter_valid_review_obj(review_obj_list, min_star = 4):
""" Filter out reviews with no less than 4 starts
"""
review_candidates = []
for i, review_obj in enumerate(review_obj_list):
if review_obj['stars'] >= min_star:
review_candidates.append(review_obj)
## DEBUG
# print ("DEBUG: review_candidates after filter size: %s" % len(review_candidates))
return review_candidates
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def iter_file(filename, encoding = "utf-8", N = 1000):
""" read and go over review dataset
"""
# with open(filename) as f:
with codecs.open(filename, encoding = encoding) as f:
for lines in grouper(f, N, ''):
assert len(lines) == N
yield lines
def prepare_dataset(review_obj_list, user_min_review_cnt = 20, negative_sample_ratio = 50, ratio = 0.8):
""" Prepare Business Review Dataset with
minimum review count: user_min_review_cnt
negative sampling rate: negative_sample_ratio
ratio: train and test split ratio 80%/20%
"""
# user_id cnt
user_id_list_raw = [obj['user_id'] for obj in review_obj_list]
user_business_cnt_dict = dict(Counter(user_id_list_raw))
# user filter list after min cnt
user_id_filter_list = []
for k, v in user_business_cnt_dict.items():
if (v >= user_min_review_cnt):
user_id_filter_list.append(k)
print ("DEBUG: user_id_filter_list after filter size is: %d" % len(user_id_filter_list))
user_business_tuple = []
user_id_filter_set = set(user_id_filter_list)
user_item_dict = {}
cnt = 0
for obj in review_obj_list:
cnt += 1
if cnt % 100000 == 0:
print ("DEBUG: Processing Review Obj Number: %d" % cnt)
user_id = obj['user_id']
business_id = obj['business_id']
if user_id in user_id_filter_set:
user_business_tuple.append((user_id, business_id))
#if user_id in user_id_filter_list:
if user_id in user_item_dict.keys():
user_item_dict[user_id].append(business_id)
else:
user_item_dict[user_id] = [business_id]
print ("DEBUG: User-Business Interaction num is %d" % len(user_business_tuple))
user_business_tuple_sorted = sorted(user_business_tuple, key = lambda x:x[0])
# deine filtered user_id list and business_id_list
user_id_list = list(user_id_filter_set)
business_id_list = list(set([i for u, i in user_business_tuple_sorted]))
interaction_list = [ u + "\t" + i for (u, i) in user_business_tuple_sorted]
# Positive and Negative Label
# convert (U, I) tuple to dataset dict
user_business_interaction_dict = {}
cur_user_id = user_business_tuple_sorted[0][0]
cur_business_id = []
cnt = 0
for user_id, business_id in user_business_tuple_sorted:
cnt += 1
if cnt % 100000 == 0:
print ("DEBUG: Processing line %d" % cnt)
if cur_user_id != user_id:
user_business_interaction_dict[cur_user_id] = cur_business_id
cur_business_id = []
cur_business_id.append(business_id)
cur_user_id = user_id
else:
cur_business_id.append(business_id)
## Generate Positive and Negative Example
dataset = {}
cnt = 0
for user_id in user_business_interaction_dict.keys():
cnt += 1
if cnt % 100000 == 0:
print ("DEBUG: Generating Dataset line %d" % cnt)
positive_sample = user_business_interaction_dict[user_id]
negative_sample = None
dataset[user_id] = {}
dataset[user_id]['positive'] = positive_sample
dataset[user_id]['negative'] = negative_sample
# sample negative samples
train_dataset, test_dataset = split_dataset(dataset, business_id_list, negative_sample_ratio, ratio)
return dataset, train_dataset, test_dataset
def calculate_dataset_statistics(dataset):
"""
"""
user_cnt = 0
interaction_cnt = 0
business_cnt = 0
user_cnt = len(dataset.keys())
business_list = []
for uid in dataset.keys():
positive_sample = dataset[uid]['positive']
business_list.extend(positive_sample)
interaction_cnt = len(business_list)
business_cnt = len(set(business_list))
print ("DEBUG: User Instance Cnt %d" % user_cnt)
print ("DEBUG: Business Instance Cnt %d" % business_cnt)
print ("DEBUG: Interaction Cnt %d" % interaction_cnt)
def sample_negative(negative_sample, number):
return random.sample(negative_sample, number)
def split_dataset(dataset, business_id_list, negative_sample_ratio = 5, ratio = 0.8):
# filter out business
train_dataset, test_dataset = {}, {}
index = 0
business_id_set = set(business_id_list)
for user_id in dataset.keys():
index += 1
if (index % 1000 == 0):
print ("DEBUG: Split Dataset Lines %d" % index)
positive_sample = dataset[user_id]['positive'] # positive interaction id
negative_sample = (business_id_set - set(positive_sample))
# random shuffle
np.random.shuffle(positive_sample)
split_size = int(ratio * len(positive_sample))
train_dataset[user_id] = {}
train_dataset[user_id]['positive'] = positive_sample[0:split_size]
n_train_pos = split_size
train_dataset[user_id]['negative'] = sample_negative(negative_sample, n_train_pos * negative_sample_ratio)
test_dataset[user_id] = {}
test_dataset[user_id]['positive'] = positive_sample[split_size:len(positive_sample)]
n_test_pos = len(positive_sample) - split_size
test_dataset[user_id]['negative'] = sample_negative(negative_sample, n_test_pos * negative_sample_ratio)
return train_dataset, test_dataset
def split_array(feature_str, sep = ","):
""" split the feature_str by separator
"""
digits = feature_str.split(sep)
digits_array = np.array([float(d.strip()) for d in digits])
return digits_array
def get_user_obj_dict(datafolder):
""" Read file yelp_academic_dataset_user.json user json object
"""
user_json_path = os.path.join(datafolder, "yelp_academic_dataset_user.json")
print ("DEBUG: Start Reading User Data from file %s" % user_json_path)
user_obj_list = read_json_line(user_json_path)
user_obj_dict = {}
for u in user_obj_list:
user_obj_dict[u['user_id']] = u
print ("DEBUG: Finish Reading User Data lines %d" % len(user_obj_list))
return user_obj_dict
def get_business_obj_dict(business_obj_list):
""" Convert business_obj_list to business_obj_dict, with key: business_id, V: business_obj
"""
business_obj_dict = {}
for w in business_obj_list:
business_obj_dict[w['business_id']] = w
return business_obj_dict
def get_business_obj_list(datafolder):
""" Read file yelp_academic_dataset_business.json business json object as list
"""
business_json_path = os.path.join(datafolder, "yelp_academic_dataset_business.json")
print ("DEBUG: Start Reading Business Data from file %s" % business_json_path)
business_obj_list = read_json_line(business_json_path)
print ("DEBUG: Finish Reading Business Data lines %d" % len(business_obj_list))
return business_obj_list
def get_user_obj_list(datafolder):
""" Read file yelp_academic_dataset_user.json user json object as list
"""
user_json_path = os.path.join(datafolder, "yelp_academic_dataset_user.json")
print ("DEBUG: Start Reading User Data from file %s" % user_json_path)
user_obj_list = read_json_line(user_json_path)
print ("DEBUG: Finish Reading User Data lines %d" % len(user_obj_list))
return user_obj_list
def get_user_item_interaction(dataset):
"""
"""
user_item_id_dict = {}
item_user_id_dict = {}
cnt = 0
for uid in dataset.keys():
cnt += 1
if (cnt % 1000 == 0):
print ("DEBUG: Output Cnt %d" % cnt)
item_ids = dataset[uid]['positive']
# uid-item_id
user_item_id_dict[uid] = item_ids
for item_id in item_ids:
if item_id in item_user_id_dict.keys():
item_user_id_dict[item_id].append(uid)
else:
item_user_id_dict[item_id] = [uid]
print ("DEBUG: Generating User Item Id Dict Size %d" % len(user_item_id_dict))
print ("DEBUG: Generating Item User Id Dict Size %d" % len(item_user_id_dict))
return user_item_id_dict, item_user_id_dict
def generate_pretrain_examples(datafolder, train_dataset, dataset_name, user_obj_dict, business_obj_dict,
sparse_id_dict, user_item_id_dict, item_user_id_dict):
# Generate Pretrain Examples
pretrain_dataset = []
row = 0
for uid in train_dataset.keys():
row += 1
if (row % 100 == 0):
print ("DEBUG: Processing row %d" % row)
positive_sample = train_dataset[uid]['positive']
user_obj = user_obj_dict[uid]
user_index, u_dense_feature = parse_user_feature(user_obj, sparse_id_dict)
u_i_ids = user_item_id_dict.get(uid) if uid in user_item_id_dict else []
u_i_ids_sparse = [sparse_id_dict.get(bid, DEFAULT_INDEX) for bid in u_i_ids]
for business_id in positive_sample:
business_obj = business_obj_dict[business_id]
business_index, b_sparse_feature, b_dense_features = parse_business_feature(business_obj, sparse_id_dict)
i_u_ids = item_user_id_dict.get(business_id) if business_id in item_user_id_dict else []
i_u_ids_sparse = [sparse_id_dict.get(uid, DEFAULT_INDEX) for uid in i_u_ids]
pretrain_dataset.append((user_index, u_dense_feature, business_index, b_sparse_feature, b_dense_features,
u_i_ids_sparse, i_u_ids_sparse))
example_path = os.path.join(datafolder, dataset_name)
print ("DEBUG: Exporting model to below path %s" % example_path)
with io.open(example_path, 'wb') as output_file:
pickle.dump(pretrain_dataset, output_file)
return pretrain_dataset
def generate_examples_batch(datafolder, dataset, dataset_name,
user_obj_dict, business_obj_dict,
sparse_id_dict,
user_item_id_dict,
item_user_id_dict,
NS = 50, save_every_num = 10000):
""" generate training examples features and testing examples features
Return: list of tuples
(label, user_index, u_dense_feature, business_index, b_sparse_feature, b_dense_features)
sparse_id_dict: user_id, shop_id, category_id dict
"""
# training dataset
examples = []
row = 0
part_id = 0
example_cnt = 0
for uid in dataset.keys():
row += 1
if (row % 100 == 0):
print ("DEBUG: Processing %s Data Row %d" % (dataset_name, row))
positive_sample = dataset[uid]['positive']
negative_sample = dataset[uid]['negative']
user_obj = user_obj_dict[uid]
user_index, u_dense_feature = parse_user_feature(user_obj, sparse_id_dict)
u_i_ids = user_item_id_dict.get(uid) if uid in user_item_id_dict else []
u_i_ids_sparse = [sparse_id_dict.get(bid, DEFAULT_INDEX) for bid in u_i_ids]
# Positive Samples
for i, pos_bid in enumerate(positive_sample):
example_cnt += 1
pos_b_obj = business_obj_dict[pos_bid]
pos_business_index, pos_b_sparse_feature, pos_b_dense_features = | |
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
from typing import Iterable, List, Mapping, Optional, Tuple, Union
import popart._internal.ir as _ir
from popart.ir.context import get_current_context, op_debug_context
from popart.ir.graph import Graph
from popart.ir.tensor import Tensor
from .utils import check_in_graph
__all__ = ['repeat']
# TODO: T49287 add a repeat_with_info
@op_debug_context
def repeat(repeat_subgraph: Graph,
repeat_trip_count: int,
*subgraph_fn_param_inputs: Tensor,
subgraph_in_to_parent_in: Optional[Mapping[Tensor, Tensor]] = None
) -> Union[None, Tensor, Tuple[Tensor, ...]]:
"""
Repeat Op: An op that repeats a subgraph with the provided input tensors
`repeat_trip_count` number of times..
Implementation detail: In order to maintain the input / output indices of the subgraph, we must
call the user provided subgraph, and create a "middle" subgraph to repeat the user provided
subgraph inside:
# yapf: disable
LoopOp Keep
│ Going User
│ Iterator │ Inputs
│ │ │ │ │ │
▼ ▼ ▼ ▼ ▼ ▼
┌─Wrapper_subgraph──┬─┬─┬──────────┐
Parent graph │ │ │ │ │
│ │ │ │ │
│ │ │ │ │
│ ▼ ▼ ▼ │
│ CallOp ┌─Loop_subgraph───┐ │
│ │ │ (user provided) │ │
│ └──►│ │ │
│ │ (Ops) │ │
│ │ │ │
│ │ │ │
│ └──────────┬─┬─┬──┘ │
│ │ │ │ │
│ ▼ ▼ ▼ │
└───┬───────────────┬─┬─┬──────────┘
│ │ │ │
│ │ │ │
▼ ▼ ▼ ▼
Keep User outputs
Going
# yapf: enable
Args:
repeat_subgraph (Graph): User defined graph to repeat `repeat_trip_count` times.
repeat_trip_count (int): Number of times to repeat the subgraph.
subgraph_in_to_parent_in (Optional[Mapping[Tensor, Tensor]]):
Mapping of `subgraph tensor -> parent tensor` that corresponds to
the inputs that the callable defined internally, e.g. by using
popart.ir.subgraph_input. Defaults to an empty dictionary.
Works effectively the same as the call op's `subgraph_in_to_parent_in` argument.
Throws:
ValueError: If repeat_trip_count <= 1.
ValueError: If the number of explicitly passed inputs + the number of loop created inputs
!= the number of outputs.
Returns:
None: If `subgraph` has no output tensors.
Tensor:
The output tensor of the call in the parent graph, if `subgraph` has
exactly 1 output.
Tuple[Tensor, ...]:
Tuple of the output tensors of the call in the parent graph, if
#subgraph has >1 outputs. The tensors will be in ascending order of
the graph output index of the corresponding subgraph tensor.
Example:
```
# popart.ir.Module to repeat
class AddWeight(pir.Module):
def __init__(self):
self.w: pir.Tensor = None
def build(self, x):
self.w = pir.subgraph_input(x.shape, x.dtype, "w")
return self.w + x, w
with g: # a graph
add_weight0 = AddWeight()
add_weight_graph0 = ir.create_graph(add_weight0, x0)
# repeat 8 times
y0, w0 = ops.repeat(add_weight_graph0,
8,
x0,
subgraph_in_to_parent_in={add_weight0.w: w0})
```
"""
if repeat_trip_count <= 1:
raise ValueError(
f"Repeat trip count for repeat of {repeat_subgraph.name} "
f"of {repeat_trip_count} must be > 1.")
subgraph_in_to_parent_in = subgraph_in_to_parent_in if (
subgraph_in_to_parent_in is not None) else {}
# For clarity, we rename our graphs:
# - Bottom: The user provided bottom level graph. We call this with a call op. This has gone
# through the create_graph procedure, so we do not need to add subgraph ins/outs.
# - Middle: The graph we create to wrap the bottom graph. We repeat this. This has not gone
# through the create_graph procedure, so we can add subgraph inputs (including the repeat
# iterator and condition) as needed.
# - Top: The graph we add the repeat to, and the current graph in the context. Potentially
# can be the main graph.
ctx = get_current_context()
top_graph = ctx.graph
ir = top_graph.ir()
pb_ir = ir._pb_ir
pb_top_graph = top_graph._pb_graph
# This is the graph we will call.
bottom_graph = repeat_subgraph
pb_bottom_graph = bottom_graph._pb_graph
# The loop op requires the same number of inputs as outputs.
if len(subgraph_fn_param_inputs) + len(subgraph_in_to_parent_in) != len(
pb_bottom_graph.getOutputIds()):
raise ValueError(
f"The number of explicitly passed inputs ({len(subgraph_fn_param_inputs)}):"
f" {[t.id for t in subgraph_fn_param_inputs]}\n"
f" + the number of loop created inputs ({len(subgraph_in_to_parent_in)}):"
f" {[t.id for t in subgraph_in_to_parent_in.values()]}\n"
f" must equal the number of outputs ({len(pb_bottom_graph.getOutputIds())}):"
f" {pb_bottom_graph.getOutputIds()}")
# Create the middle graph, call and loop ops
pb_middle_graph, pb_callop, pb_loop_op = _setup_call_and_repeat(
pb_ir, pb_top_graph, pb_bottom_graph)
# set the number of times to loop
pb_loop_op.setTripCountValue(repeat_trip_count)
# Check all the parent tensors are in the right graph.
for _, parent_tensor in subgraph_in_to_parent_in.items():
check_in_graph(top_graph, parent_tensor)
# 1, 2. Connect inputs.
_setup_inputs(subgraph_fn_param_inputs, subgraph_in_to_parent_in,
pb_top_graph, pb_bottom_graph, pb_middle_graph, pb_callop,
pb_loop_op)
# 3. Connect outputs.
outnames = _setup_outputs(pb_top_graph, pb_bottom_graph, pb_middle_graph,
pb_callop, pb_loop_op)
pb_callop.setup()
pb_loop_op.setup()
out_tensors = [
Tensor._from_pb_tensor(pb_top_graph.getTensor(out)) for out in outnames
]
# Return nothing if no outputs.
if len(out_tensors) == 0:
return None
# Return single tensor if only one output.
if len(out_tensors) == 1:
return out_tensors[0]
# Return tuple of output tensors if multiple outputs.
else:
return tuple(out_tensors)
# Design point: For simplicity all of the below functions only take _ir level objects as arguments.
def _setup_call_and_repeat(pb_ir: _ir.Ir, pb_top_graph: _ir.Graph,
pb_bottom_graph: _ir.Graph
) -> Tuple[_ir.Graph, _ir.op.CallOp, _ir.op.LoopOp]:
"""Setup the call and repeat ops, as well as the middle graph that the loop op will loop.
Args:
pb_ir (_ir.Ir): The _ir level Ir
pb_top_graph (_ir.Graph): The _ir top level graph that will contain the loop op.
pb_bottom_graph (_ir.Graph): The _ir user defined subgraph that will be called.
Returns:
Tuple[_ir.Graph, _ir.op.CallOp, _ir.op.LoopOp]: The created _ir-level middle graph, call op
and loop op.
"""
# This is the graph we will repeat.
pb_middle_graph = pb_ir.createGraph(
_ir.GraphId(
pb_ir.createUniqueSubgraphId(
f"{pb_bottom_graph.id.str()}__loop_wrapper")))
opid = _ir.OperatorIdentifier("ai.graphcore", "Call", 1, _ir.NumInputs(),
0)
op_name = pb_middle_graph.id.str() + '__call__' + pb_bottom_graph.id.str()
ctx = get_current_context()
# Call the bottom_graph
pb_callop = pb_middle_graph.createOp_CallOp(opid, pb_bottom_graph,
ctx._get_op_settings(op_name))
opid = _ir.OperatorIdentifier("ai.onnx", "Loop", 11, _ir.NumInputs(), 0)
op_name = pb_top_graph.id.str() + '__loop__' + pb_middle_graph.id.str()
# Loop the middle_graph
pb_loop_op = pb_top_graph.createOp_LoopOp(opid,
ctx._get_op_settings(op_name),
pb_middle_graph)
# Add mandatory loop iterator tensor to subgraph (is not an output)
repeatIterId = _ir.addScope(pb_middle_graph, "Iterator___")
pb_middle_graph.addInput(repeatIterId,
_ir.TensorInfo(_ir.DataType.INT32, ()))
# Add mandatory loop condition tensor to subgraph (is also an output)
repeatCondId = _ir.addScope(pb_middle_graph, "LoopCond___")
pb_middle_graph.addInput(repeatCondId, _ir.TensorInfo(
_ir.DataType.BOOL, ()))
pb_middle_graph.markAsOutput(repeatCondId)
return pb_middle_graph, pb_callop, pb_loop_op
def _setup_inputs(subgraph_fn_param_inputs: Iterable[Tensor],
subgraph_in_to_parent_in: Mapping[Tensor, Tensor],
pb_top_graph: _ir.Graph, pb_bottom_graph: _ir.Graph,
pb_middle_graph: _ir.Graph, pb_callop: _ir.op.CallOp,
pb_loop_op: _ir.op.LoopOp) -> None:
"""Do the following:
1. Connect explicitly passed inputs. These would have been created first
by ir.get_graph, so we do them first. ir.get_graph will have created
the input tensors t_0,...,t_N at input indices 0,..,N, respectively. We
require that the user has passed the parent tensors that correspond to
these inputs in the exact same order, so we can trivially reconstruct
the input indices here.
2. Connect internally created inputs.
Args:
subgraph_fn_param_inputs (Iterable[Tensor]): User defined explicit inputs.
subgraph_in_to_parent_in (Mapping[Tensor, Tensor]):
Mapping of `subgraph tensor -> parent tensor` that corresponds to
the inputs that the callable defined internally, e.g. by using
popart.ir.subgraph_input. Defaults to an empty dictionary.
Works effectively the same as the call op's `subgraph_in_to_parent_in` argument.
pb_top_graph (_ir.Graph): Top _ir graph
pb_bottom_graph (_ir.Graph): Bottom _ir Graph
pb_middle_graph (_ir.Graph): Middle _ir Graph
pb_callop (_ir.op.CallOp): Previously created Call op
pb_loop_op (_ir.op.LoopOp): Previously created Loop op
"""
# Note: Only bottom_graph (which is called) has gone through the ir.get_graph process.
# middle_graph (intentionally) has not, so we need to add loop inputs/outputs.
# User defined indices start at 2 for loop ops.
sgInIdx = 0
for t in subgraph_fn_param_inputs:
callInIdx = pb_callop.subgraphInToOpInIndex(sgInIdx)
# Note the + 2 here
pb_loop_op.addLoopInput(sgInIdx + 2,
_ir.addScope(pb_top_graph, t.name),
_ir.addScope(pb_middle_graph, t.name), False)
pb_callop.connectInTensor(callInIdx,
_ir.addScope(pb_middle_graph, t.name))
sgInIdx += 1
# 2. Connect internally created inputs.
for sg_tensor, parent_tensor in subgraph_in_to_parent_in.items():
sgInIdx = pb_bottom_graph.getInputIndex(sg_tensor.id)
callInIdx = pb_callop.subgraphInToOpInIndex(sgInIdx)
top_tensor_id = _ir.addScope(pb_top_graph, parent_tensor.id)
pb_loop_op.addLoopInput(
sgInIdx + 2, top_tensor_id,
_ir.addScope(pb_middle_graph,
_ir.removeScope(pb_bottom_graph, sg_tensor.id)),
False)
set_input_modified(pb_loop_op, pb_loop_op.inTensor(sgInIdx + 2))
pb_callop.connectInTensor(
callInIdx,
_ir.addScope(pb_middle_graph,
_ir.removeScope(pb_bottom_graph, sg_tensor.id)))
set_input_modified(pb_callop, pb_callop.inTensor(callInIdx))
def _setup_outputs(pb_top_graph: _ir.Graph, pb_bottom_graph: _ir.Graph,
pb_middle_graph: _ir.Graph, pb_callop: _ir.op.CallOp,
pb_loop_op: _ir.op.LoopOp) -> List[str]:
"""3. Connect outputs. We introspect the subgraph to get its outputs then,
for each one, create an output tensor of the call op in the middle
graph.
Args:
pb_top_graph (_ir.Graph): Top _ir graph
pb_bottom_graph (_ir.Graph): Bottom _ir Graph
pb_middle_graph (_ir.Graph): Middle _ir Graph
pb_callop (_ir.op.CallOp): Previously created Call op
pb_loop_op (_ir.op.LoopOp): Previously created Loop | |
0xab, 0x94, 0x53,
0xb1, 0x4a, 0x79, 0x24, 0xf8, 0x4a, 0xb1, 0x43,
0xf3, 0x42, 0xb1, 0x47, 0x83, 0x2d, 0x91, 0x4f,
0x82, 0xc4, 0x91, 0x4f, 0xa2, 0xc4, 0x81, 0x4f,
0xa2, 0x64, 0x15, 0x4f, 0xa2, 0x64, 0x19, 0x2e,
0x4a, 0x9e, 0x25, 0x2e, 0x5a, 0x9e, 0x24, 0x2e,
0x4a, 0x1f, 0x49, 0xe2, 0x22, 0xf5, 0x91, 0x24,
0x87, 0x22, 0x1f, 0x49, 0x32, 0x48, 0x1f, 0x49,
0x36, 0x6a, 0x1f, 0x48, 0xb2, 0x6a, 0xf1, 0x81,
0x24, 0xab, 0x96, 0x1b, 0x28, 0xab, 0x94, 0x51,
0xab, 0x94, 0x43, 0xba, 0x4a, 0x39, 0x24, 0x2b,
0x94, 0x47, 0x8a, 0x2b, 0xb4, 0x4f, 0x82, 0xa4,
0xb4, 0x4f, 0xa2, 0xf4, 0xbc, 0x63, 0xe0, 0x22,
0xd4, 0xc1, 0x64, 0x22, 0x1f, 0x41, 0x34, 0x68,
0x15, 0x34, 0x6a, 0x15, 0xb4, 0x62, 0x51, 0x41,
0xaf, 0x46, 0xd9, 0x41, 0xb2, 0x6a, 0x99, 0x25,
0x2b, 0x84, 0x41, 0x2f, 0x14, 0x1b, 0xf4, 0x42,
0xa1, 0x47, 0x82, 0x2d, 0xa1, 0x67, 0x82, 0x2d,
0xb1, 0x4f, 0xa2, 0xc1, 0xa1, 0x4f, 0xa2, 0xed,
0x11, 0xfa, 0x24, 0x4a, 0x9e, 0x21, 0xa4, 0x8e,
0x25, 0xbc, 0x64, 0x59, 0xa4, 0x1f, 0xc8, 0x42,
0xfa, 0x91, 0x44, 0x85, 0xf2, 0x91, 0x64, 0x81,
0x17, 0x49, 0xa1, 0x1f, 0x48, 0x12, 0x5a, 0x41,
0xaf, 0x44, 0xd1, 0x41, 0xb2, 0x4a, 0x1b, 0x95,
0xaa, 0x43, 0x92, 0x8a, 0x41, 0x2b, 0x84, 0x47,
0x82, 0x2b, 0x84, 0x65, 0xb8, 0x42, 0xf8, 0x24,
0x1a, 0x9f, 0xa5, 0x0c, 0x20, 0x08, 0x00, 0x00,
0x00, 0x00, 0x44, 0x80, 0x02, 0x14, 0x00, 0x00,
0x00, 0x21, 0x00, 0x80, 0x08, 0x00, 0x00, 0x14,
0x00, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44,
0x00, 0x00, 0x00, 0x80, 0x02, 0x50, 0x92, 0xf0,
0x4c, 0x5c, 0x20, 0x81, 0x01, 0x12, 0x18, 0x81,
0x20, 0x83, 0x21, 0x22, 0x01, 0x20, 0x41, 0x08,
0x20, 0x04, 0x1a, 0x82, 0x21, 0x02, 0x6a, 0x08,
0x80, 0x86, 0x02, 0xa2, 0x18, 0x12, 0x00, 0x1a,
0x04, 0x80, 0xa8, 0x21, 0x00, 0x00, 0x68, 0x80,
0xa2, 0x81, 0x00, 0x42, 0x32, 0x48, 0x48, 0x22,
0x28, 0x22, 0x20, 0x04, 0xcf, 0x73, 0x07, 0x20,
0x0a, 0x00, 0x32, 0x2a, 0x01, 0x84, 0x82, 0x20,
0x42, 0xb8, 0x48, 0x08, 0x82, 0x12, 0x10, 0xa8,
0x84, 0x48, 0x00, 0x00, 0x28, 0x80, 0xa1, 0x43,
0x00, 0x28, 0x2a, 0x21, 0x22, 0x22, 0x01, 0x22,
0x20, 0x2a, 0x03, 0x28, 0x20, 0x22, 0x21, 0x02,
0x42, 0x80, 0x4c, 0x01, 0xc8, 0x42, 0x48, 0x6a,
0x48, 0x81, 0xe8, 0x52, 0x01, 0x12, 0xd2, 0x20,
0x21, 0x61, 0x82, 0x1a, 0x82, 0x81, 0xa1, 0x28,
0x41, 0x22, 0x84, 0x46, 0x98, 0x28, 0x82, 0x18,
0x48, 0x84, 0xc8, 0x46, 0x02, 0xa0, 0xa1, 0xa0,
0x28, 0x48, 0x22, 0x12, 0x82, 0x18, 0x19, 0xa1,
0x12, 0x28, 0x8b, 0x16, 0x18, 0x42, 0x2a, 0xa8,
0x12, 0x8a, 0xa2, 0x12, 0x00, 0x22, 0x30, 0x34,
0x2a, 0x28, 0x82, 0x84, 0xa4, 0xa4, 0x48, 0x42,
0xa8, 0x42, 0x4e, 0x22, 0x4a, 0x68, 0x14, 0x1a,
0xf8, 0x47, 0xe4, 0x00, 0x28, 0x00, 0x20, 0x01,
0x28, 0x81, 0x80, 0x01, 0x20, 0x09, 0x00, 0x00,
0x32, 0x18, 0x22, 0x80, 0x04, 0x80, 0x82, 0xa1,
0x24, 0x22, 0x20, 0x02, 0x00, 0x00, 0x00, 0x00,
0x80, 0x22, 0x84, 0x02, 0x12, 0x00, 0x20, 0x11,
0x01, 0x42, 0x22, 0x20, 0x02, 0xf0, 0xdc, 0x9c,
0x00, 0x28, 0x00, 0x80, 0x82, 0x21, 0x01, 0x80,
0x08, 0x18, 0x00, 0x84, 0x00, 0x00, 0x42, 0x00,
0x43, 0x02, 0x00, 0x00, 0x00, 0x18, 0x42, 0x00,
0x22, 0x80, 0x41, 0x04, 0x00, 0x20, 0x22, 0x21,
0x04, 0x00, 0x80, 0x04, 0x12, 0x20, 0x02, 0x48,
0x42, 0x22, 0x2f, 0x32, 0x0a, 0x22, 0x00, 0x20,
0x01, 0x28, 0x80, 0xa2, 0x12, 0x20, 0x03, 0x18,
0x20, 0x81, 0xa8, 0x14, 0x80, 0x04, 0x52, 0x00,
0x14, 0x28, 0x22, 0x58, 0xc2, 0x18, 0x4a, 0x22,
0x02, 0x8c, 0x02, 0x00, 0x00, 0x8b, 0x12, 0x22,
0x12, 0x00, 0x1b, 0x48, 0x22, 0x00, 0x11, 0x00,
0x60, 0x12, 0x22, 0x20, 0x42, 0x01, 0x42, 0x4f,
0xd8, 0x09, 0x28, 0x28, 0x00, 0x80, 0x22, 0x01,
0x28, 0x18, 0x88, 0x28, 0x20, 0x01, 0x20, 0x88,
0x04, 0x80, 0x84, 0x04, 0x10, 0x81, 0x02, 0x4a,
0x21, 0x08, 0x22, 0x28, 0x10, 0x08, 0x28, 0x00,
0x41, 0x8c, 0x81, 0x02, 0x80, 0xb2, 0x41, 0xa9,
0x24, 0x00, 0x11, 0x20, 0x04, 0x2e, 0x11, 0x28,
0x22, 0x22, 0x14, 0x80, 0xb6, 0xc1, 0x02, 0x00,
0x00, 0x18, 0x20, 0x22, 0x01, 0x28, 0x80, 0x21,
0x01, 0x60, 0x81, 0x80, 0x01, 0x48, 0x20, 0x01,
0x26, 0x04, 0x20, 0x02, 0x42, 0x12, 0x48, 0x18,
0x42, 0x28, 0x00, 0x20, 0x01, 0x22, 0x80, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x24,
0x04, 0x97, 0xcb, 0x00, 0x20, 0x82, 0x21, 0x01,
0x80, 0x01, 0x42, 0x40, 0x08, 0x30, 0x14, 0x00,
0x00, 0x20, 0x04, 0x80, 0x44, 0x88, 0x02, 0x00,
0x43, 0x04, 0x00, 0x20, 0x02, 0x80, 0x02, 0x00,
0x80, 0x01, 0x42, 0x48, 0x80, 0x12, 0x08, 0x10,
0x28, 0x0c, 0x62, 0x22, 0x42, 0x22, 0x4a, 0xf1,
0x7a, 0xf9, 0x20, 0x22, 0x04, 0x00, 0x22, 0x30,
0x88, 0x28, 0x28, 0x88, 0x80, 0x23, 0x08, 0x48,
0x84, 0xa0, 0x81, 0x48, 0x00, 0x12, 0x18, 0x28,
0x00, 0x22, 0x22, 0x00, 0x48, 0x28, 0x1a, 0x04,
0x20, 0x06, 0x1a, 0x08, 0x18, 0x00, 0x20, 0x22,
0x08, 0xa8, 0x88, 0xa0, 0xc2, 0x18, 0x20, 0x0c,
0x00, 0x48, 0x38, 0x8f, 0x31, 0x03, 0x12, 0x18,
0x00, 0x00, 0x20, 0x08, 0x11, 0x52, 0x00, 0xa4,
0x20, 0x08, 0x00, 0x18, 0x22, 0x88, 0x48, 0x00,
0xa0, 0x41, 0x00, 0x20, 0xa2, 0x21, 0x80, 0x88,
0x91, 0x92, 0x00, 0x80, 0x06, 0x28, 0x28, 0x82,
0x00, 0x10, 0x08, 0x00, 0x00, 0x18, 0x00, 0x8a,
0x02, 0xf0, 0xc7, 0x6e, 0x80, 0x81, 0x01, 0x00,
0x28, 0x80, 0x01, 0x1b, 0x24, 0x42, 0x84, 0x12,
0x89, 0x28, 0x81, 0x18, 0x08, 0x92, 0x48, 0x2a,
0x84, 0x88, 0x25, 0x24, 0x02, 0x42, 0x28, 0x60,
0x44, 0x20, 0x84, 0xa2, 0x48, 0x2c, 0x81, 0x01,
0x28, 0x98, 0x48, 0x18, 0x20, 0x81, 0x28, 0xa2,
0x48, 0x40, 0x48, 0x08, 0x2a, 0x6c, 0x81, 0x4a,
0x28, 0x29, 0x22, 0xa2, 0x28, 0x62, 0x4a, 0xf2,
0x1f, 0xf5, 0x20, 0x02, 0x22, 0x12, 0x18, 0x00,
0x84, 0x22, 0x80, 0x09, 0x00, 0x25, 0x84, 0x04,
0x00, 0x12, 0x00, 0x00, 0x81, 0x22, 0x12, 0x80,
0x02, 0x28, 0x2a, 0x01, 0x18, 0x28, 0x88, 0x28,
0x00, 0x22, 0x20, 0x82, 0x02, 0x48, 0x00, 0x88,
0x82, 0x00, 0x00, 0x42, 0x48, 0x80, 0x04, 0xf0,
0x37, 0x37, 0x00, 0x48, 0x28, 0x62, 0x80, 0x02,
0x00, 0x38, 0x80, 0x82, 0x02, 0x00, 0x20, 0x23,
0x23, 0x02, 0x20, 0x22, 0x22, 0x24, 0x02, 0x22,
0x00, 0x42, 0x48, 0x4a, 0xa1, 0x21, 0x42, 0x68,
0x20, 0x02, 0x20, 0x24, 0x04, 0x20, 0x04, 0x20,
0x84, 0x24, 0x82, 0x02, 0x20, 0x02, 0x28, 0x48,
0x80, 0xf4, 0xeb, 0x99, 0x00, 0x00, 0x00, 0xc0,
0x18, 0x40, 0x08, 0x18, 0x18, 0x10, 0x08, 0x20,
0x01, 0x00, 0x00, 0x00, 0x18, 0x48, 0x00, 0x00,
0x18, 0x12, 0x20, 0x04, 0x00, 0x00, 0x42, 0x00,
0x28, 0x42, 0xa0, 0x41, 0x00, 0x00, 0x80, 0x04,
0x14, 0x40, 0x01, 0x1a, 0x02, 0xaf, 0x35, 0x06,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a,
0x02, 0x84, 0x00, 0x00, 0x00, 0x12, 0x00, 0x12,
0x80, 0x01, 0x00, 0x22, 0x00, 0x80, 0x01, 0x48,
0x00, 0x00, 0x20, 0x02, 0x20, 0x02, 0x00, 0x20,
0x02, 0x00, 0x00, 0x12, 0x8f, 0x1f, 0x07, 0x20,
0x01, 0x12, 0x20, 0x01, 0x20, 0x01, 0x00, 0x18,
0x00, 0x18, 0x00, 0x00, 0x22, 0x22, 0x00, 0x42,
0x00, 0x00, 0x42, 0x80, 0x02, 0x80, 0x02, 0x28,
0x28, 0x22, 0x28, 0x28, 0x00, 0x28, 0x00, 0x80,
0x81, 0x02, 0x30, 0x21, 0x20, 0x22, 0x02, 0x22,
0x20, 0xf2, 0x1e, 0xb2, 0x20, 0x01, 0x12, 0x20,
0x21, 0x11, 0x08, 0x20, 0x92, 0x48, 0x80, 0x01,
0x00, 0x10, 0x28, 0x01, 0x00, 0x20, 0x02, 0x18,
0x20, 0x22, 0x01, 0x20, 0x04, 0x00, 0x28, 0x00,
0x28, 0x00, 0x00, 0x20, 0x04, 0x22, 0x58, 0x20,
0x22, 0x02, 0x00, 0x20, 0x22, 0x02, 0x12, 0x6f,
0x25, 0x03, 0x10, 0x08, 0x00, 0x81, 0x12, 0x12,
0x18, 0x00, 0x1a, 0x02, 0x12, 0x89, 0x01, 0x20,
0x01, 0x00, 0x00, 0x00, 0x00, 0x18, 0x42, 0x80,
0x84, 0xa1, 0x21, 0x18, 0x42, 0x48, 0x80, 0x05,
0x80, 0xa2, 0x42, 0x00, 0x00, 0x42, 0x80, 0x05,
0x18, 0x00, 0x20, 0x04, 0x42, 0x22, 0x48, 0xbf,
0x85, 0x08, 0x80, 0x01, 0x18, 0x81, 0x00, 0x40,
0x48, 0x08, 0x10, 0xc8, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x80, 0x04, 0x00, 0x00,
0x00, 0x48, 0x00, 0x48, 0x20, 0x21, 0x81, 0x81,
0x05, 0x80, 0x22, 0x01, 0x18, 0x12, 0x48, 0x22,
0x00, 0x18, 0x28, 0xef, 0xc8, 0x05, 0x12, 0x83,
0x08, 0x80, 0x81, 0x02, 0x00, 0x00, 0x48, 0x20,
0x01, 0x40, 0x08, 0x00, 0x00, 0x00, 0x20, 0x83,
0x21, 0x21, 0x04, 0x20, 0x02, 0x48, 0x00, 0x12,
0x00, 0x62, 0x20, 0x02, 0x62, 0x00, 0x4a, 0x01,
0x00, 0x80, 0x82, 0x02, 0x00, 0x00, 0xe0, 0xfa,
0x02, 0x82, 0xa0, 0x41, 0x00, | |
<filename>feature_encoders/utils.py<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) Hebes Intelligence Private Company
# This source code is licensed under the Apache License, Version 2.0 found in the
# LICENSE file in the root directory of this source tree.
import glob
from typing import Any, Union
import numpy as np
import pandas as pd
import scipy
from omegaconf import OmegaConf
from pandas.api.types import is_bool_dtype as is_bool
from pandas.api.types import is_categorical_dtype as is_category
from pandas.api.types import is_datetime64_any_dtype as is_datetime
from pandas.api.types import is_integer_dtype as is_integer
from pandas.api.types import is_object_dtype as is_object
from sklearn.utils import check_array
from sklearn.utils.validation import column_or_1d
from feature_encoders.settings import CONF_PATH
def maybe_reshape_2d(arr: np.ndarray):
"""Reshape an array (if needed) so it's always 2-d and long.
Args:
arr (numpy.ndarray): The input array.
Returns:
numpy.ndarray: The reshaped array.
"""
if arr.ndim < 2:
arr = arr.reshape(-1, 1)
return arr
def as_list(val: Any):
"""Cast input as list.
Helper function, always returns a list of the input value.
"""
if isinstance(val, str):
return [val]
if hasattr(val, "__iter__"):
return list(val)
if val is None:
return []
return [val]
def as_series(x: Union[np.ndarray, pd.Series, pd.DataFrame]):
"""Cast an iterable to a Pandas Series object."""
if isinstance(x, pd.Series):
return x
if isinstance(x, pd.DataFrame):
return x.iloc[:, 0]
else:
return pd.Series(column_or_1d(x))
def get_categorical_cols(X: pd.DataFrame, int_is_categorical=True):
"""Return the names of the categorical columns in the input DataFrame.
Args:
X (pandas.DataFrame): Input dataframe.
int_is_categorical (bool, optional): If True, integer types are
considered categorical. Defaults to True.
Returns:
list: The names of categorical columns in the input DataFrame.
"""
obj_cols = []
for col in X.columns:
# check if it is date
if is_datetime(X[col]):
continue
# check if it is bool, object or category
if is_bool(X[col]) or is_object(X[col]) or is_category(X[col]):
obj_cols.append(col)
continue
# check if it is integer
if int_is_categorical and is_integer(X[col]):
obj_cols.append(col)
continue
return obj_cols
def get_datetime_data(X: pd.DataFrame, col_name=None):
"""Get datetime information from the input dataframe.
Args:
X (pandas.DataFrame): The input dataframe.
col_name (str, optional): The name of the column that contains
datetime information. If None, it is assumed that the datetime
information is provided by the input dataframe's index.
Defaults to None.
Returns:
pandas.Series: The datetime information.
"""
if col_name is not None:
dt_column = X[col_name]
else:
dt_column = X.index.to_series()
col_dtype = dt_column.dtype
if isinstance(col_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
col_dtype = np.datetime64
if not np.issubdtype(col_dtype, np.datetime64):
dt_column = pd.to_datetime(dt_column, infer_datetime_format=True)
return dt_column
def check_X(
X: pd.DataFrame, exists=None, int_is_categorical=True, return_col_info=False
):
"""Perform a series of checks on the input dataframe.
Args:
X (pamdas.DataFrame): The input dataframe.
exists (str or list of str, optional): Names of columns that must be present
in the input dataframe. Defaults to None.
int_is_categorical (bool, optional): If True, integer types are considered
categorical. Defaults to True.
return_col_info (bool, optional): If True, the function will return the names
of the categorical and the names of the numerical columns, in addition to
the provided dataframe. Defaults to False.
Raises:
ValueError: If the input is not a pandas DataFrame.
ValueError: If any of the column names in `exists` are not found in the input.
ValueError: If Nan or inf values are found in the provided input data.
Returns:
pandas.DataFrame if `return_col_info` is False else (pandas.DataFrame, list, list)
"""
if not isinstance(X, pd.DataFrame):
raise ValueError("Input values are expected as pandas DataFrames.")
exists = as_list(exists)
for name in exists:
if name not in X:
raise ValueError(f"Regressor {name} missing from dataframe")
categorical_cols = get_categorical_cols(X, int_is_categorical=int_is_categorical)
numeric_cols = X.columns.difference(categorical_cols)
if (len(categorical_cols) > 0) and X[categorical_cols].isnull().values.any():
raise ValueError("Found NaN values in input's categorical data")
if (len(numeric_cols) > 0) and np.any(~np.isfinite(X[numeric_cols])):
raise ValueError("Found NaN or Inf values in input's numerical data")
if return_col_info:
return X, categorical_cols, numeric_cols
return X
def check_y(y: Union[pd.Series, pd.DataFrame], index=None):
"""Perform a series of checks on the input dataframe.
The checks are carried out by `sklearn.utils.check_array`.
Args:
y (Union[pandas.Series, pandas.DataFrame]): The input dataframe.
index (Union[pandas.Index, pandas.DatetimeIndex], optional): An index to compare
with the input dataframe's index. Defaults to None.
Raises:
ValueError: If the input is neither a pandas Series nor a pandas DataFrame with
only a single column.
ValueError: If the input data has different index than the one that was provided
for comparison (if `index` is not None).
Returns:
pandas.DataFrame: The validated input data.
"""
if isinstance(y, pd.DataFrame) and (y.shape[1] == 1):
target_name = y.columns[0]
elif isinstance(y, pd.Series):
target_name = y.name or "_target_values_"
else:
raise ValueError(
"This estimator accepts target inputs as "
"`pd.Series` or `pd.DataFrame` with only a single column."
)
if (index is not None) and not y.index.equals(index):
raise ValueError(
"Input data has different index than the one "
"that was provided for comparison"
)
y = pd.DataFrame(
data=check_array(y, ensure_2d=False), index=y.index, columns=[target_name]
)
return y
def tensor_product(a: np.ndarray, b: np.ndarray, reshape=True):
"""Compute the tensor product of two matrices.
Args:
a (numpy array of shape (n, m_a)): The first matrix.
b (numpy array of shape (n, m_b)): The second matrix.
reshape (bool, optional): Whether to reshape the result to be 2D (n, m_a * m_b)
or return a 3D tensor (n, m_a, m_b). Defaults to True.
Raises:
ValueError: If input arrays are not 2-dimensional.
ValueError: If both input arrays do not have the same number of samples.
Returns:
numpy.ndarray of shape (n, m_a * m_b) if `reshape = True` else of shape (n, m_a, m_b).
"""
if (a.ndim != 2) or (b.ndim != 2):
raise ValueError("Inputs must be 2-dimensional")
na, ma = a.shape
nb, mb = b.shape
if na != nb:
raise ValueError("Both arguments must have the same number of samples")
if scipy.sparse.issparse(a):
a = a.A
if scipy.sparse.issparse(b):
b = b.A
product = a[..., :, None] * b[..., None, :]
if reshape:
return product.reshape(na, ma * mb)
return product
def add_constant(
data: Union[np.ndarray, pd.Series, pd.DataFrame], prepend=True, has_constant="skip"
):
"""Add a column of ones to an array.
Args:
data (array-like): A column-ordered design matrix.
prepend (bool, optional): If true, the constant is in the first column.
Else the constant is appended (last column). Defaults to True.
has_constant ({'raise', 'add', 'skip'}, optional): Behavior if ``data``
already has a constant. The default will return data without adding
another constant. If 'raise', will raise an error if any column has a
constant value. Using 'add' will add a column of 1s if a constant column
is present. Defaults to "skip".
Returns:
numpy.ndarray: The original values with a constant (column of ones).
"""
x = np.asanyarray(data)
ndim = x.ndim
if ndim == 1:
x = x[:, None]
elif x.ndim > 2:
raise ValueError("Only implemented for 2-dimensional arrays")
is_nonzero_const = np.ptp(x, axis=0) == 0
is_nonzero_const &= np.all(x != 0.0, axis=0)
if is_nonzero_const.any():
if has_constant == "skip":
return x
elif has_constant == "raise":
if ndim == 1:
raise ValueError("data is constant.")
else:
columns = np.arange(x.shape[1])
cols = ",".join([str(c) for c in columns[is_nonzero_const]])
raise ValueError(f"Column(s) {cols} are constant.")
x = [np.ones(x.shape[0]), x]
x = x if prepend else x[::-1]
return np.column_stack(x)
def load_config(model="towt", features="default", merge_multiple=False):
"""Load model configuration and feature generator mapping.
Given `model` and `features`, the function searches for files in:
::
conf_path = str(CONF_PATH)
model_files = glob.glob(f"{conf_path}/models/{model}.*")
feature_files = glob.glob(f"{conf_path}/features/{features}.*")
Args:
model (str, optional): The name of the model configuration to load.
Defaults to "towt".
features (str, optional): The name of the feature generator mapping to
load. Defaults to "default".
merge_multiple (bool, optional): If True and more than one files are found when
searching for either models or features, the contents of the files will ne merged.
Otherwise, an exception will be raised. Defaults to False.
Returns:
(dict, dict): The model configuration and feature mapping as dictionaries.
"""
conf_path = str(CONF_PATH)
model_conf = None
model_files = glob.glob(f"{conf_path}/models/{model}.*")
if len(model_files) == 0:
raise ValueError("No model configuration files found")
elif (len(model_files) > 1) and (not merge_multiple):
raise ValueError("More than one model configuration files found")
elif len(model_files) > 1:
model_conf = OmegaConf.merge(
*[OmegaConf.load(model_file) for model_file in model_files]
)
else:
model_conf = OmegaConf.load(model_files[0])
feature_conf = None
feature_files = glob.glob(f"{conf_path}/features/{features}.*")
if len(feature_files) == 0:
raise ValueError("No feature generator mapping files found")
elif (len(feature_files) > 1) and (not merge_multiple):
raise ValueError("More than one feature generator mapping files found")
elif len(feature_files) > 1:
feature_conf = OmegaConf.merge(
*[OmegaConf.load(feature_file) | |
has the ability to swing around an axis arranged perpendicularly relative
to the direction of the deflection of the actuating element ( 1 ). '''
expected = ['valve', 'deflectable', 'actuating element', 'sealing', 'contour', 'elastic', 'deflection', 'force',
'piezoelectric', 'transducer', 'perpendicularly']
idx = self.find_idx(text)
actual = self.tfidf.detect_popular_ngrams_in_docs_set(docs_set=[idx])
self.assertGreaterOrEqualDiceScore(expected, actual)
def test_patent_US09289910_20160322(self):
text = '''Two surfaces forming a cutting edge and a ridge of a cutting edge existing along the boundary
between the two surfaces intersecting with each other are irradiated with a gas cluster ion beam at the same
time, the maximum height of the profile of the two surfaces being equal to or smaller than 1 μm. A facet is
newly formed on the ridge of the cutting edge by performing the irradiation with the gas cluster ion beam in
such a manner that the two surfaces are not perpendicularly but obliquely irradiated with the gas cluster ion
beam, and at least a part of the ridge of the cutting edge is perpendicularly irradiated with the gas cluster
ion beam. '''
expected = ['ridge', 'cutting edge', 'intersecting', 'gas', 'gas cluster', 'ion', 'ion beam', 'irradiation',
'perpendicularly']
idx = self.find_idx(text)
actual = self.tfidf.detect_popular_ngrams_in_docs_set(docs_set=[idx])
self.assertGreaterOrEqualDiceScore(expected, actual)
def test_patent_US07682616_20100323(self):
text = '''Phytoceutical compositions for the prevention and treatment of circulatory disorders, feminine
endocrine disorders, and dermal disorders. A specific combination of extracts of plants is taught,
as well as principles for varying the formulations based on categorizing plants into one of three groups,
Energy, Bio-Intelligence, and Organization and selecting several plants from each group. Such combinations
have synergistic effects, with minimal side effects. '''
expected = ['Phytoceutical', 'treatment', 'circulatory disorders', 'feminine', 'endocrine', 'disorders',
'dermal disorders', 'extracts', 'plants', 'forumulations', 'categorizing', 'Energy',
'Bio-Intelligence', 'synergistic', 'side effects']
idx = self.find_idx(text)
actual = self.tfidf.detect_popular_ngrams_in_docs_set(docs_set=[idx])
self.assertGreaterOrEqualDiceScore(expected, actual)
def test_patent_US07872489_20110118(self):
text = '''A method of locating a defect of a failed semiconductor device which includes applying a test
pattern to the failed semiconductor device and providing failed semiconductor device test responses as a pass
signature, applying radiation to each of multiple locations of circuitry of a correlation semiconductor
device with sufficient energy to induce a fault in the circuitry, applying the test pattern to the
correlation semiconductor device while the radiation is applied to the location and comparing correlation
semiconductor device test responses with the pass signature for each location, and determining a defect
location of the failed semiconductor device in which correlation semiconductor device test responses at least
nearly match the pass signature. The radiation may be a laser beam. The method may include determining an
exact match or a near match based on a high correlation result. Asynchronous scanning may be used to provide
timing information. '''
expected = ['defect', 'semiconductor', 'semiconductor device', 'signature', 'radiation', 'circuitry',
'correlation', 'laser beam', 'laser', 'Asynchronous']
idx = self.find_idx(text)
actual = self.tfidf.detect_popular_ngrams_in_docs_set(docs_set=[idx])
self.assertGreaterOrEqualDiceScore(expected, actual)
def test_patent_US06932708_20050823(self):
text = '''This communication game system comprises a client system 1 and a game server system 2
for communicating with the client system 1 . The game server system 2 comprises a database 21 for storing group
information which relates a plurality of client systems to each other as a battle group. The game server system 2
is structured to decide a battle combination among the client systems 1 belonging to the same battle group, to perform a
battle by managing the sending and receiving of data between the client systems determined by the above-mentioned combination,
and to decide the next combination in accordance with the results of the battle. Each client system 1 has its own character select function and chat function when watching games.'''
expected = ['client', 'system', 'server', 'database', 'battle', 'battle group', 'chat', 'game', 'function',
'client system', 'server system', 'battle combination', 'chat function']
idx = self.find_idx(text)
actual = self.tfidf.detect_popular_ngrams_in_docs_set(docs_set=[idx])
self.assertGreaterOrEqualDiceScore(expected, actual)
# FN database
def test_patent_US08306135_20121106(self):
text = '''A communication system using an OFDM includes a data creation section for coding data to be transmitted and mapping
the data, a null symbol insertion section for filling a null symbol into a no-data subchannel if the number of subchannels containing
the mapped data is small for the band assignment, and a symbol interleave section for performing symbol interleave in the whole user assignment band and
inserting a known training symbol and pilot symbol into the determined symbol position of the user assignment band are included and symbols are placed such that
signal phase change is continuous in the same subcarrier between symbols and carrier sense is executed at the positions of the symbols where the signal phase change is continuous.'''
expected = ['communication', 'OFDM', 'data', 'coding', 'transmit', 'mapp', 'no data', 'subchannel', 'band',
'interleav', 'assign', 'insert', 'signal', 'phase', 'subcarrier', 'carrier', 'continuous',
'communication system', 'null symbol', 'band assign', 'phase change', 'training symbol',
'pilot symbol', 'map', 'map data']
idx = self.find_idx(text)
actual = self.tfidf.detect_popular_ngrams_in_docs_set(docs_set=[idx])
self.assertGreaterOrEqualDiceScore(expected, actual)
def test_patent_US09395838_20160719(self):
text = '''An input device is equipped with a touch panel for detecting an input, a coordinates acquiring unit for detecting input coordinates which are coordinates
of the input detected by the touch panel, and a pull manipulation judging unit which, when an input to an input detection surface which is a surface on which the touch panel
is placed, makes effective the Z coordinate in the direction perpendicular to the input detection surface among the input coordinates detected by the coordinates acquiring unit.'''
expected = ['input', 'touch', 'panel', 'touch panel', 'coordinate', 'Z']
idx = self.find_idx(text)
actual = self.tfidf.detect_popular_ngrams_in_docs_set(docs_set=[idx])
self.assertGreaterOrEqualDiceScore(expected, actual)
# FN: Z
def test_patent_US09714145_20170725(self):
text = '''The disclosure describes, in part, a system and method for improving the stacking of containers on or in a transportation unit.
In some implementations, a stacking configuration may be planned that identifies containers and a position for those containers in the stacking configuration.
The stacking configuration may be planned based on dimension values of the containers such that when stacked the stacking configuration remains stable.
In addition, to improve the efficiency at which containers may be stacked, the disclosure describes that containers and/or the picking of items for those containers may be
sequenced so that the containers, when packed and routed, arrive in a manner that allows efficient stacking.'''
expected = ['stack', 'container', 'configur', 'sequenc', 'transportation unit', 'transportation']
idx = self.find_idx(text)
actual = self.tfidf.detect_popular_ngrams_in_docs_set(docs_set=[idx])
self.assertGreaterOrEqualDiceScore(expected, actual)
# FP: {'configur', 'disclosur', 'describ', 'improv', 'effici'} (too general)
# TN: plan
# Thoughts: need to consider synonyms
def test_patent_US09910243_20180306(self):
text = '''A lens interchangeable type camera system, comprising an interchangeable lens and a camera body, comprising a first control section that carries out manual focus
control by detecting rotation direction and rotation amount of an operation member, in accordance with a manual focus mode command from a mode setting at a time when the
operation member is at the first position, and a second control section that, when the operation member is at a second position, irrespective of a command from a mode setting section,
notifies a detection result of a first detection section to the camera body, detects rotational position of the operation member using a third detection section, and forcibly carries
out manual focus control based on a rotation position, wherein the lens interchangeable type camera system further comprises a function restriction section that sets operation of the
second control section to valid or invalid.'''
expected = ['lens', 'camera', 'interchangeable', 'focus', 'rotation', 'manual focus']
idx = self.find_idx(text)
actual = self.tfidf.detect_popular_ngrams_in_docs_set(docs_set=[idx])
self.assertGreaterOrEqualDiceScore(expected, actual)
def test_patent_US07198133_20070403(self):
text = '''A one-piece, transparent flexible ear coupler for use with hearing evaluation is disclosed. It includes an annular side wall | |
<reponame>peterwilliams97/spaCy_practice<filename>vanishing_grad_example.py
"""
coding: utf-8
Vanishing Gradients
We will demonstrate the difference between using sigmoid and ReLU nonlinearities in a simple
neural network with two hidden layers. This notebook is built off of a minimal net demo done
by <NAME> for CS 231n, which you can check out here:
http://cs231n.github.io/neural-networks-case-study/
"""
# Setup
import os
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
plot_dir = 'plots'
plot_n = 0
try:
os.makedirs(plot_dir)
except FileExistsError:
pass
def save_plot(name):
global plot_n
path = os.path.join(plot_dir, '%03d-%s-%s.png' % (plot_n, rubric, name))
print('Saving "%s"' % path)
plt.savefig(path)
plot_n += 1
# generate random data -- not linearly separable
np.random.seed(1)
N = 80 # number of points per class
D = 2 # dimensionality
K = 4 # number of classes
N_EPOCHS = 50000
d_theta = 2 * np.pi / K
S = 1.5
R = 0.6
delta = R / K
print('N=%d D=%d K=%d N_EPOCHS=%d' % (N, D, K, N_EPOCHS))
rubric = 'N=%d-K=%d-S=%.1f-R=%.1f' % (N, K, S, R)
X = np.zeros((N * K, D))
num_train_examples = X.shape[0]
y = np.zeros(N * K, dtype='uint8')
for j in range(K):
ix = range(N * j, N * (j + 1))
r = np.sqrt(np.linspace(0.0, 1, N)) # radius
t = np.linspace(j * d_theta, j * d_theta + S * np.pi, N) + np.random.randn(N) * delta # theta
X[ix] = np.c_[r * np.sin(t), r * np.cos(t)]
y[ix] = j
fig = plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)
plt.xlim([-1, 1])
plt.ylim([-1, 1])
plt.legend()
save_plot('input')
# The sigmoid function "squashes" inputs to lie between 0 and 1. Unfortunately, this means that for
# inputs with sigmoid output close to 0 or 1, the gradient with respect to those inputs are close to
# zero. This leads to the phenomenon of vanishing gradients, where gradients drop close to zero, and
# the net does not learn well.
#
# On the other hand, the relu function (max(0, x)) does not saturate with input size. Plot these
# functions to gain intuition.
def sigmoid(x):
x = 1 / (1 + np.exp(-x))
return x
def sigmoid_grad(x):
return (x) * (1 - x)
def relu(x):
return np.maximum(0, x)
# Let's try and see now how the two kinds of nonlinearities change deep neural net training in
# practice. Below, we build a very simple neural net with three layers (two hidden layers), for
# which you can swap out ReLU/ sigmoid nonlinearities.
def three_layer_net(NONLINEARITY, X, y, model, step_size, reg):
"""function to train a three layer neural net with either RELU or sigmoid nonlinearity via
vanilla grad descent
"""
# parameter initialization
h = model['h']
h2 = model['h2']
W1 = model['W1']
W2 = model['W2']
W3 = model['W3']
b1 = model['b1']
b2 = model['b2']
b3 = model['b3']
# some hyper-parameters
# gradient descent loop
num_examples = X.shape[0]
plot_array_1 = []
plot_array_2 = []
for i in range(N_EPOCHS):
# FORWARD PROP
if NONLINEARITY == 'RELU':
hidden_layer = relu(np.dot(X, W1) + b1)
hidden_layer2 = relu(np.dot(hidden_layer, W2) + b2)
scores = np.dot(hidden_layer2, W3) + b3
elif NONLINEARITY == 'SIGM':
hidden_layer = sigmoid(np.dot(X, W1) + b1)
hidden_layer2 = sigmoid(np.dot(hidden_layer, W2) + b2)
scores = np.dot(hidden_layer2, W3) + b3
exp_scores = np.exp(scores)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) # [N x K]
#print(X.shape)
#print(scores.shape)
#print(np.sum(exp_scores, axis=1, keepdims=True).shape)
#print(probs.shape)
#assert False
# compute the loss: average cross-entropy loss and regularization
# v = probs[range(num_examples), y] -> 1d vector v[i] = probs[i, y[i]]]
corect_logprobs = -np.log(probs[range(num_examples), y])
data_loss = np.sum(corect_logprobs) / num_examples
reg_loss = 0.5*reg*np.sum(W1*W1) + 0.5*reg*np.sum(W2*W2) + 0.5*reg*np.sum(W3*W3)
loss = data_loss + reg_loss
if i % 1000 == 0:
print("iteration %d: loss %f" % (i, loss))
# compute the gradient on scores
dscores = probs
dscores[range(num_examples), y] -= 1
dscores /= num_examples
# BACKPROP HERE
dW3 = (hidden_layer2.T).dot(dscores)
db3 = np.sum(dscores, axis=0, keepdims=True)
if NONLINEARITY == 'RELU':
# backprop ReLU nonlinearity here
dhidden2 = np.dot(dscores, W3.T)
dhidden2[hidden_layer2 <= 0] = 0
dW2 = np.dot( hidden_layer.T, dhidden2)
plot_array_2.append(np.sum(np.abs(dW2)) / np.sum(np.abs(dW2.shape)))
db2 = np.sum(dhidden2, axis=0)
dhidden = np.dot(dhidden2, W2.T)
dhidden[hidden_layer <= 0] = 0
elif NONLINEARITY == 'SIGM':
# backprop sigmoid nonlinearity here
dhidden2 = dscores.dot(W3.T)*sigmoid_grad(hidden_layer2)
dW2 = (hidden_layer.T).dot(dhidden2)
plot_array_2.append(np.sum(np.abs(dW2))/np.sum(np.abs(dW2.shape)))
db2 = np.sum(dhidden2, axis=0)
dhidden = dhidden2.dot(W2.T)*sigmoid_grad(hidden_layer)
dW1 = np.dot(X.T, dhidden)
plot_array_1.append(np.sum(np.abs(dW1))/np.sum(np.abs(dW1.shape)))
db1 = np.sum(dhidden, axis=0)
# add regularization
dW3 += reg * W3
dW2 += reg * W2
dW1 += reg * W1
#option to return loss, grads -- uncomment next comment
grads={}
grads['W1']=dW1
grads['W2']=dW2
grads['W3']=dW3
grads['b1']=db1
grads['b2']=db2
grads['b3']=db3
#return loss, grads
# update
W1 += -step_size * dW1
b1 += -step_size * db1
W2 += -step_size * dW2
b2 += -step_size * db2
W3 += -step_size * dW3
b3 += -step_size * db3
# evaluate training set accuracy
if NONLINEARITY == 'RELU':
hidden_layer = relu(np.dot(X, W1) + b1)
hidden_layer2 = relu(np.dot(hidden_layer, W2) + b2)
elif NONLINEARITY == 'SIGM':
hidden_layer = sigmoid(np.dot(X, W1) + b1)
hidden_layer2 = sigmoid(np.dot(hidden_layer, W2) + b2)
scores = np.dot(hidden_layer2, W3) + b3
predicted_class = np.argmax(scores, axis=1)
print('training accuracy: %.2f' % (np.mean(predicted_class == y)))
# return cost, grads
return plot_array_1, plot_array_2, W1, W2, W3, b1, b2, b3
# #### Train net with sigmoid nonlinearity first
# Initialize toy model, train sigmoid net
# N = 100 # number of points per class
# D = 2 # dimensionality
# K = 3 # number of classes
h = 50
h2 = 50
num_train_examples = X.shape[0]
model = {}
model['h'] = h # size of hidden layer 1
model['h2'] = h2 # size of hidden layer 2
model['W1'] = 0.1 * np.random.randn(D, h)
model['b1'] = np.zeros((1, h))
model['W2'] = 0.1 * np.random.randn(h, h2)
model['b2'] = np.zeros((1, h2))
model['W3'] = 0.1 * np.random.randn(h2, K)
model['b3'] = np.zeros((1, K))
(sigm_array_1, sigm_array_2, s_W1, s_W2, s_W3, s_b1, s_b2, s_b3
) = three_layer_net('SIGM', X, y, model, step_size=1e-1, reg=1e-3)
# #### Now train net with ReLU nonlinearity
# In[33]:
#Re-initialize model, train relu net
model={}
model['h'] = h # size of hidden layer 1
model['h2'] = h2# size of hidden layer 2
model['W1'] = 0.1 * np.random.randn(D,h)
model['b1'] = np.zeros((1,h))
model['W2'] = 0.1 * np.random.randn(h,h2)
model['b2'] = np.zeros((1,h2))
model['W3'] = 0.1 * np.random.randn(h2,K)
model['b3'] = np.zeros((1,K))
(relu_array_1, relu_array_2, r_W1, r_W2,r_W3, r_b1, r_b2,r_b3
) = three_layer_net('RELU', X, y, model, step_size=1e-1, reg=1e-3)
# # The Vanishing Gradient Issue
# We can use the sum of the magnitude of gradients for the weights between hidden layers as a cheap
# heuristic to measure speed of learning (you can also use the magnitude of gradients for each
# neuron in the hidden layer here). Intuitively, when the magnitude of the gradients of the weight
# vectors or of each neuron are large, the net is learning faster. (NOTE: For our net, each hidden
# layer has the same number of neurons. If you want to play around with this, make sure to adjust
# the heuristic to account for the number of neurons in the layer).
# In[34]:
fig = plt.figure()
plt.plot(np.array(sigm_array_1))
plt.plot(np.array(sigm_array_2))
plt.title('Sum of magnitudes of gradients -- SIGM weights')
plt.legend(("sigm first layer", "sigm second layer"))
save_plot('gradients.SIGM.weights')
# In[35]:
fig = plt.figure()
plt.plot(np.array(relu_array_1))
plt.plot(np.array(relu_array_2))
plt.title('Sum of magnitudes of gradients -- ReLU weights')
plt.legend(("relu first layer", "relu second layer"))
save_plot('gradients.ReLU.weights')
# In[36]:
# Overlaying the two plots to compare
fig = plt.figure()
plt.plot(np.array(relu_array_1), ls=':')
plt.plot(np.array(relu_array_2), ls=':')
plt.plot(np.array(sigm_array_1))
plt.plot(np.array(sigm_array_2))
plt.title('Sum of magnitudes of gradients -- hidden layer neurons')
plt.legend(("relu first layer", "relu second layer", "sigm first layer", "sigm second layer"))
save_plot('gradients.hidden.layer')
# #### Feel free to play around with this notebook to gain intuition. Things you might want to try:
#
# - Adding additional layers to the nets and seeing how early layers continue to train slowly for the sigmoid net
# - Experiment with hyper-parameter tuning for the nets -- changing regularization and gradient descent step size
# - Experiment with different nonlinearities -- Leaky ReLU, Maxout. How quickly do different layers learn now?
#
#
# We can see how well each classifier does in terms of distinguishing the toy data classes. As
# expected, since the ReLU net trains faster, for a set number of epochs it performs better compared
# to the sigmoid net.
# In[40]:
# plot the classifiers- SIGMOID
h = 0.02
margin = 0.2
x_min, x_max = X[:, 0].min() - margin, X[:, 0].max() + margin
y_min, y_max = X[:, 1].min() - margin, X[:, 1].max() + margin
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = np.dot(sigmoid(np.dot(sigmoid(np.dot(np.c_[xx.ravel(), yy.ravel()], s_W1)
+ s_b1), s_W2) + s_b2), s_W3) + s_b3
Z = np.argmax(Z, axis=1)
Z = Z.reshape(xx.shape)
fig = plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral, | |
import inspect
import os
import sys
from gettext import gettext as _
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
)
import click
import click.core
import click.formatting
import click.parser
import click.types
from .utils import _get_click_major
if TYPE_CHECKING: # pragma: no cover
import click.shell_completion
# TODO: when deprecating Click 7, remove this
def _typer_param_shell_complete(
self: click.core.Parameter, ctx: click.Context, incomplete: str
) -> List["click.shell_completion.CompletionItem"]:
if self._custom_shell_complete is not None:
results = self._custom_shell_complete(ctx, self, incomplete)
if results and isinstance(results[0], str):
from click.shell_completion import CompletionItem
results = [CompletionItem(c) for c in results]
return cast(List["click.shell_completion.CompletionItem"], results)
return self.type.shell_complete(ctx, self, incomplete)
def _typer_param_setup_autocompletion_compat(
self: click.Parameter,
*,
autocompletion: Optional[
Callable[[click.Context, List[str], str], List[Union[Tuple[str, str], str]]]
] = None,
) -> None:
if autocompletion is not None and self._custom_shell_complete is None:
import warnings
warnings.warn(
"'autocompletion' is renamed to 'shell_complete'. The old name is"
" deprecated and will be removed in Click 8.1. See the docs about"
" 'Parameter' for information about new behavior.",
DeprecationWarning,
stacklevel=2,
)
def compat_autocompletion(
ctx: click.Context, param: click.core.Parameter, incomplete: str
) -> List["click.shell_completion.CompletionItem"]:
from click.shell_completion import CompletionItem
out = []
for c in autocompletion(ctx, [], incomplete): # type: ignore
if isinstance(c, tuple):
c = CompletionItem(c[0], help=c[1])
elif isinstance(c, str):
c = CompletionItem(c)
if c.value.startswith(incomplete):
out.append(c)
return out
self._custom_shell_complete = compat_autocompletion
class TyperArgument(click.core.Argument):
def __init__(
self,
*,
# Parameter
param_decls: List[str],
type: Optional[Any] = None,
required: Optional[bool] = None,
default: Optional[Any] = None,
callback: Optional[Callable[..., Any]] = None,
nargs: Optional[int] = None,
metavar: Optional[str] = None,
expose_value: bool = True,
is_eager: bool = False,
envvar: Optional[Union[str, List[str]]] = None,
shell_complete: Optional[
Callable[
[click.Context, click.Parameter, str],
Union[List["click.shell_completion.CompletionItem"], List[str]],
]
] = None,
autocompletion: Optional[Callable[..., Any]] = None,
# TyperArgument
show_default: Union[bool, str] = True,
show_choices: bool = True,
show_envvar: bool = True,
help: Optional[str] = None,
hidden: bool = False,
):
self.help = help
self.show_default = show_default
self.show_choices = show_choices
self.show_envvar = show_envvar
self.hidden = hidden
kwargs: Dict[str, Any] = {
"param_decls": param_decls,
"type": type,
"required": required,
"default": default,
"callback": callback,
"nargs": nargs,
"metavar": metavar,
"expose_value": expose_value,
"is_eager": is_eager,
"envvar": envvar,
}
if _get_click_major() > 7:
kwargs["shell_complete"] = shell_complete
else:
kwargs["autocompletion"] = autocompletion
super().__init__(**kwargs)
if _get_click_major() > 7:
_typer_param_setup_autocompletion_compat(
self, autocompletion=autocompletion
)
def get_help_record(self, ctx: click.Context) -> Optional[Tuple[str, str]]:
# Modified version of click.core.Option.get_help_record()
# to support Arguments
if self.hidden:
return None
name = self.make_metavar()
help = self.help or ""
extra = []
if self.show_envvar:
envvar = self.envvar
# allow_from_autoenv is currently not supported in Typer for CLI Arguments
if envvar is not None:
var_str = (
", ".join(str(d) for d in envvar)
if isinstance(envvar, (list, tuple))
else envvar
)
extra.append(f"env var: {var_str}")
if self.default is not None and (self.show_default or ctx.show_default):
if isinstance(self.show_default, str):
default_string = f"({self.show_default})"
elif isinstance(self.default, (list, tuple)):
default_string = ", ".join(str(d) for d in self.default)
elif inspect.isfunction(self.default):
default_string = "(dynamic)"
else:
default_string = str(self.default)
extra.append(f"default: {default_string}")
if self.required:
extra.append("required")
if extra:
extra_str = ";".join(extra)
help = f"{help} [{extra_str}]" if help else f"[{extra_str}]"
return name, help
def make_metavar(self) -> str:
# Modified version of click.core.Argument.make_metavar()
# to include Argument name
if self.metavar is not None:
return self.metavar
var = (self.name or "").upper()
if not self.required:
var = "[{}]".format(var)
type_var = self.type.get_metavar(self)
if type_var:
var += f":{type_var}"
if self.nargs != 1:
var += "..."
return var
def shell_complete(
self, ctx: click.Context, incomplete: str
) -> List["click.shell_completion.CompletionItem"]:
return _typer_param_shell_complete(self, ctx=ctx, incomplete=incomplete)
class TyperOption(click.core.Option):
def __init__(
self,
*,
# Parameter
param_decls: List[str],
type: Optional[Union[click.types.ParamType, Any]] = None,
required: Optional[bool] = None,
default: Optional[Any] = None,
callback: Optional[Callable[..., Any]] = None,
nargs: Optional[int] = None,
metavar: Optional[str] = None,
expose_value: bool = True,
is_eager: bool = False,
envvar: Optional[Union[str, List[str]]] = None,
shell_complete: Optional[
Callable[
[click.Context, click.Parameter, str],
Union[List["click.shell_completion.CompletionItem"], List[str]],
]
] = None,
autocompletion: Optional[Callable[..., Any]] = None,
# Option
show_default: Union[bool, str] = False,
prompt: Union[bool, str] = False,
confirmation_prompt: Union[bool, str] = False,
prompt_required: bool = True,
hide_input: bool = False,
is_flag: Optional[bool] = None,
flag_value: Optional[Any] = None,
multiple: bool = False,
count: bool = False,
allow_from_autoenv: bool = True,
help: Optional[str] = None,
hidden: bool = False,
show_choices: bool = True,
show_envvar: bool = False,
):
# TODO: when deprecating Click 7, remove custom kwargs with prompt_required
# and call super().__init__() directly
kwargs: Dict[str, Any] = {
"param_decls": param_decls,
"type": type,
"required": required,
"default": default,
"callback": callback,
"nargs": nargs,
"metavar": metavar,
"expose_value": expose_value,
"is_eager": is_eager,
"envvar": envvar,
"show_default": show_default,
"prompt": prompt,
"confirmation_prompt": confirmation_prompt,
"hide_input": hide_input,
"is_flag": is_flag,
"flag_value": flag_value,
"multiple": multiple,
"count": count,
"allow_from_autoenv": allow_from_autoenv,
"help": help,
"hidden": hidden,
"show_choices": show_choices,
"show_envvar": show_envvar,
}
if _get_click_major() > 7:
kwargs["prompt_required"] = prompt_required
kwargs["shell_complete"] = shell_complete
else:
kwargs["autocompletion"] = autocompletion
super().__init__(**kwargs)
if _get_click_major() > 7:
_typer_param_setup_autocompletion_compat(
self, autocompletion=autocompletion
)
def get_help_record(self, ctx: click.Context) -> Optional[Tuple[str, str]]:
# Click 7.x was not breaking this use case, so in that case, re-use its logic
if _get_click_major() < 8:
return super().get_help_record(ctx)
# Duplicate all of Click's logic only to modify a single line, to allow boolean
# flags with only names for False values as it's currently supported by Typer
# Ref: https://typer.tiangolo.com/tutorial/parameter-types/bool/#only-names-for-false
if self.hidden:
return None
any_prefix_is_slash = False
def _write_opts(opts: Sequence[str]) -> str:
nonlocal any_prefix_is_slash
rv, any_slashes = click.formatting.join_options(opts)
if any_slashes:
any_prefix_is_slash = True
if not self.is_flag and not self.count:
rv += f" {self.make_metavar()}"
return rv
rv = [_write_opts(self.opts)]
if self.secondary_opts:
rv.append(_write_opts(self.secondary_opts))
help = self.help or ""
extra = []
if self.show_envvar:
envvar = self.envvar
if envvar is None:
if (
self.allow_from_autoenv
and ctx.auto_envvar_prefix is not None
and self.name is not None
):
envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}"
if envvar is not None:
var_str = (
envvar
if isinstance(envvar, str)
else ", ".join(str(d) for d in envvar)
)
extra.append(_("env var: {var}").format(var=var_str))
# Temporarily enable resilient parsing to avoid type casting
# failing for the default. Might be possible to extend this to
# help formatting in general.
resilient = ctx.resilient_parsing
ctx.resilient_parsing = True
try:
default_value = self.get_default(ctx, call=False)
finally:
ctx.resilient_parsing = resilient
show_default_is_str = isinstance(self.show_default, str)
if show_default_is_str or (
default_value is not None and (self.show_default or ctx.show_default)
):
if show_default_is_str:
default_string = f"({self.show_default})"
elif isinstance(default_value, (list, tuple)):
default_string = ", ".join(str(d) for d in default_value)
elif callable(default_value):
default_string = _("(dynamic)")
elif self.is_bool_flag and self.secondary_opts:
# For boolean flags that have distinct True/False opts,
# use the opt without prefix instead of the value.
# Typer override, original commented
# default_string = click.parser.split_opt(
# (self.opts if self.default else self.secondary_opts)[0]
# )[1]
if self.default:
if self.opts:
default_string = click.parser.split_opt(self.opts[0])[1]
else:
default_string = str(default_value)
else:
default_string = click.parser.split_opt(self.secondary_opts[0])[1]
# Typer override end
elif self.is_bool_flag and not self.secondary_opts and not default_value:
default_string = ""
else:
default_string = str(default_value)
if default_string:
extra.append(_("default: {default}").format(default=default_string))
if isinstance(self.type, click.types._NumberRangeBase):
range_str = self.type._describe_range()
if range_str:
extra.append(range_str)
if self.required:
extra.append(_("required"))
if extra:
extra_str = "; ".join(extra)
help = f"{help} [{extra_str}]" if help else f"[{extra_str}]"
return ("; " if any_prefix_is_slash else " / ").join(rv), help
def shell_complete(
self, ctx: click.Context, incomplete: str
) -> List["click.shell_completion.CompletionItem"]:
return _typer_param_shell_complete(self, ctx=ctx, incomplete=incomplete)
def _typer_format_options(
self: click.core.Command, *, ctx: click.Context, formatter: click.HelpFormatter
) -> None:
args = []
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
if param.param_type_name == "argument":
args.append(rv)
elif param.param_type_name == "option":
opts.append(rv)
# TODO: explore adding Click's gettext support, e.g.:
# from gettext import gettext as _
# with formatter.section(_("Options")):
# ...
if args:
with formatter.section("Arguments"):
formatter.write_dl(args)
if opts:
with formatter.section("Options"):
formatter.write_dl(opts)
def _typer_main_shell_completion(
self: click.core.Command,
*,
ctx_args: Dict[str, Any],
prog_name: str,
complete_var: Optional[str] = None,
) -> None:
if complete_var is None:
complete_var = f"_{prog_name}_COMPLETE".replace("-", "_").upper()
instruction = os.environ.get(complete_var)
if not instruction:
return
from .completion import shell_complete
rv = shell_complete(self, ctx_args, prog_name, complete_var, instruction)
sys.exit(rv)
class TyperCommand(click.core.Command):
def format_options(
self, ctx: click.Context, formatter: click.HelpFormatter
) -> None:
_typer_format_options(self, ctx=ctx, formatter=formatter)
def _main_shell_completion(
self,
ctx_args: Dict[str, Any],
prog_name: str,
complete_var: Optional[str] = None,
) -> None:
_typer_main_shell_completion(
self, ctx_args=ctx_args, prog_name=prog_name, complete_var=complete_var
)
class TyperGroup(click.core.Group):
def format_options(
self, ctx: click.Context, formatter: click.HelpFormatter
) -> None:
_typer_format_options(self, ctx=ctx, formatter=formatter)
self.format_commands(ctx, | |
= ArgumentDescriptor(name='unicodestring1', n=
TAKEN_FROM_ARGUMENT1, reader=read_unicodestring1, doc=
"""A counted Unicode string.
The first argument is a 1-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
"""
)
def read_unicodestring4(f):
"""
>>> import io
>>> s = 'abcd\\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\\xea\\xaf\\x8d'
>>> n = bytes([len(enc), 0, 0, 0]) # little-endian 4-byte length
>>> t = read_unicodestring4(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring4(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring4, but only 6 remain
"""
n = read_uint4(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError('unicodestring4 byte count > sys.maxsize: %d' % n)
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError(
'expected %d bytes in a unicodestring4, but only %d remain' % (n,
len(data)))
unicodestring4 = ArgumentDescriptor(name='unicodestring4', n=
TAKEN_FROM_ARGUMENT4U, reader=read_unicodestring4, doc=
"""A counted Unicode string.
The first argument is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
"""
)
def read_unicodestring8(f):
"""
>>> import io
>>> s = 'abcd\\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\\xea\\xaf\\x8d'
>>> n = bytes([len(enc)]) + b'\\0' * 7 # little-endian 8-byte length
>>> t = read_unicodestring8(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring8(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring8, but only 6 remain
"""
n = read_uint8(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError('unicodestring8 byte count > sys.maxsize: %d' % n)
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError(
'expected %d bytes in a unicodestring8, but only %d remain' % (n,
len(data)))
unicodestring8 = ArgumentDescriptor(name='unicodestring8', n=
TAKEN_FROM_ARGUMENT8U, reader=read_unicodestring8, doc=
"""A counted Unicode string.
The first argument is an 8-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
"""
)
def read_decimalnl_short(f):
"""
>>> import io
>>> read_decimalnl_short(io.BytesIO(b"1234\\n56"))
1234
>>> read_decimalnl_short(io.BytesIO(b"1234L\\n56"))
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: b'1234L'
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if s == b'00':
return False
elif s == b'01':
return True
return int(s)
def read_decimalnl_long(f):
"""
>>> import io
>>> read_decimalnl_long(io.BytesIO(b"1234L\\n56"))
1234
>>> read_decimalnl_long(io.BytesIO(b"123456789012345678901234L\\n6"))
123456789012345678901234
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if s[-1:] == b'L':
s = s[:-1]
return int(s)
decimalnl_short = ArgumentDescriptor(name='decimalnl_short', n=
UP_TO_NEWLINE, reader=read_decimalnl_short, doc=
"""A newline-terminated decimal integer literal.
This never has a trailing 'L', and the integer fit
in a short Python int on the box where the pickle
was written -- but there's no guarantee it will fit
in a short Python int on the box where the pickle
is read.
"""
)
decimalnl_long = ArgumentDescriptor(name='decimalnl_long', n=UP_TO_NEWLINE,
reader=read_decimalnl_long, doc=
"""A newline-terminated decimal integer literal.
This has a trailing 'L', and can represent integers
of any size.
"""
)
def read_floatnl(f):
"""
>>> import io
>>> read_floatnl(io.BytesIO(b"-1.25\\n6"))
-1.25
"""
s = read_stringnl(f, decode=False, stripquotes=False)
return float(s)
floatnl = ArgumentDescriptor(name='floatnl', n=UP_TO_NEWLINE, reader=
read_floatnl, doc=
"""A newline-terminated decimal floating literal.
In general this requires 17 significant digits for roundtrip
identity, and pickling then unpickling infinities, NaNs, and
minus zero doesn't work across boxes, or on some boxes even
on itself (e.g., Windows can't read the strings it produces
for infinities or NaNs).
"""
)
def read_float8(f):
"""
>>> import io, struct
>>> raw = struct.pack(">d", -1.25)
>>> raw
b'\\xbf\\xf4\\x00\\x00\\x00\\x00\\x00\\x00'
>>> read_float8(io.BytesIO(raw + b"\\n"))
-1.25
"""
data = f.read(8)
if len(data) == 8:
return _unpack('>d', data)[0]
raise ValueError('not enough data in stream to read float8')
float8 = ArgumentDescriptor(name='float8', n=8, reader=read_float8, doc=
"""An 8-byte binary representation of a float, big-endian.
The format is unique to Python, and shared with the struct
module (format string '>d') "in theory" (the struct and pickle
implementations don't share the code -- they should). It's
strongly related to the IEEE-754 double format, and, in normal
cases, is in fact identical to the big-endian 754 double format.
On other boxes the dynamic range is limited to that of a 754
double, and "add a half and chop" rounding is used to reduce
the precision to 53 bits. However, even on a 754 box,
infinities, NaNs, and minus zero may not be handled correctly
(may not survive roundtrip pickling intact).
"""
)
from pickle import decode_long
def read_long1(f):
"""
>>> import io
>>> read_long1(io.BytesIO(b"\\x00"))
0
>>> read_long1(io.BytesIO(b"\\x02\\xff\\x00"))
255
>>> read_long1(io.BytesIO(b"\\x02\\xff\\x7f"))
32767
>>> read_long1(io.BytesIO(b"\\x02\\x00\\xff"))
-256
>>> read_long1(io.BytesIO(b"\\x02\\x00\\x80"))
-32768
"""
n = read_uint1(f)
data = f.read(n)
if len(data) != n:
raise ValueError('not enough data in stream to read long1')
return decode_long(data)
long1 = ArgumentDescriptor(name='long1', n=TAKEN_FROM_ARGUMENT1, reader=
read_long1, doc=
"""A binary long, little-endian, using 1-byte size.
This first reads one byte as an unsigned size, then reads that
many bytes and interprets them as a little-endian 2's-complement long.
If the size is 0, that's taken as a shortcut for the long 0L.
"""
)
def read_long4(f):
"""
>>> import io
>>> read_long4(io.BytesIO(b"\\x02\\x00\\x00\\x00\\xff\\x00"))
255
>>> read_long4(io.BytesIO(b"\\x02\\x00\\x00\\x00\\xff\\x7f"))
32767
>>> read_long4(io.BytesIO(b"\\x02\\x00\\x00\\x00\\x00\\xff"))
-256
>>> read_long4(io.BytesIO(b"\\x02\\x00\\x00\\x00\\x00\\x80"))
-32768
>>> read_long1(io.BytesIO(b"\\x00\\x00\\x00\\x00"))
0
"""
n = read_int4(f)
if n < 0:
raise ValueError('long4 byte count < 0: %d' % n)
data = f.read(n)
if len(data) != n:
raise ValueError('not enough data in stream to read long4')
return decode_long(data)
long4 = ArgumentDescriptor(name='long4', n=TAKEN_FROM_ARGUMENT4, reader=
read_long4, doc=
"""A binary representation of a long, little-endian.
This first reads four bytes as a signed size (but requires the
size to be >= 0), then reads that many bytes and interprets them
as a little-endian 2's-complement long. If the size is 0, that's taken
as a shortcut for the int 0, although LONG1 should really be used
then instead (and in any case where # of bytes < 256).
"""
)
class StackObject(object):
__slots__ = 'name', 'obtype', 'doc'
def __init__(self, name, obtype, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(obtype, type) or isinstance(obtype, tuple)
if isinstance(obtype, tuple):
for contained in obtype:
assert isinstance(contained, type)
self.obtype = obtype
assert isinstance(doc, str)
self.doc = doc
def __repr__(self):
return self.name
pyint = pylong = StackObject(name='int', obtype=int, doc=
'A Python integer object.')
pyinteger_or_bool = StackObject(name='int_or_bool', obtype=(int, bool), doc
='A Python integer or boolean object.')
pybool = StackObject(name='bool', obtype=bool, doc='A Python boolean object.')
pyfloat = StackObject(name='float', obtype=float, doc='A Python float object.')
pybytes_or_str = pystring = StackObject(name='bytes_or_str', obtype=(bytes,
str), doc='A Python bytes or (Unicode) string object.')
pybytes = StackObject(name='bytes', obtype=bytes, doc='A Python bytes object.')
pyunicode = StackObject(name='str', obtype=str, doc=
'A Python (Unicode) string object.')
pynone = StackObject(name='None', obtype=type(None), doc=
'The Python None object.')
pytuple = StackObject(name='tuple', obtype=tuple, doc='A Python tuple object.')
pylist = StackObject(name='list', obtype=list, doc='A Python list object.')
pydict = StackObject(name='dict', obtype=dict, doc='A Python dict object.')
pyset = StackObject(name='set', obtype=set, doc='A Python set object.')
pyfrozenset = StackObject(name='frozenset', obtype=set, doc=
'A Python frozenset object.')
anyobject = StackObject(name='any', obtype=object, doc=
'Any kind of object whatsoever.')
markobject = StackObject(name='mark', obtype=StackObject, doc=
"""'The mark' is a unique object.
Opcodes that operate on a variable number of objects
generally don't embed the count of objects in the opcode,
or pull it off the stack. Instead the MARK opcode is used
to push a special marker object on the stack, and then
some other opcodes grab all the objects from the top of
the stack down to (but not including) the topmost marker
object.
"""
)
stackslice = StackObject(name='stackslice', obtype=StackObject, doc=
"""An object representing a contiguous slice of the stack.
This is used in conjunction with markobject, to represent all
of the stack following the topmost markobject. For example,
the POP_MARK opcode changes the stack from
[..., markobject, stackslice]
to
[...]
No matter how many object are on the stack after the topmost
markobject, POP_MARK gets rid of all of them (including the
topmost markobject too).
"""
)
class OpcodeInfo(object):
__slots__ = ('name', 'code', 'arg', 'stack_before', 'stack_after',
'proto', 'doc')
def __init__(self, name, code, arg, stack_before, stack_after, proto, doc):
assert isinstance(name, str)
self.name = name
assert | |
t0 = 1.0 - 2.0*H_D
t1 = acos(t0)
t2 = 2.0*(t0)
t3 = (H_D - H_D*H_D)**0.5
t4 = t1 - t2*t3
return (1./pi*t4)**0.5
def C_wedge_meter_Miller(D, H):
r'''Calculates the coefficient of discharge of an wedge flow meter
used for measuring flow rate of fluid, based on the geometry of the
differential pressure flow meter.
For half-inch lines:
.. math::
C = 0.7883 + 0.107(1 - \beta^2)
For 1 to 1.5 inch lines:
.. math::
C = 0.6143 + 0.718(1 - \beta^2)
For 1.5 to 24 inch lines:
.. math::
C = 0.5433 + 0.2453(1 - \beta^2)
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
H : float
Portion of the diameter of the clear segment of the pipe up to the
wedge blocking flow; the height of the pipe up to the wedge, [m]
Returns
-------
C : float
Coefficient of discharge of the wedge flow meter, [-]
Notes
-----
There is an ISO standard being developed to cover wedge meters as of 2018.
Wedge meters can have varying angles; 60 and 90 degree wedge meters have
been reported. Tap locations 1 or 2 diameters (upstream and downstream),
and 2D upstream/1D downstream have been used. Some wedges are sharp;
some are smooth. [2]_ gives some experimental values.
Examples
--------
>>> C_wedge_meter_Miller(D=0.1524, H=0.3*0.1524)
0.7267069372687651
References
----------
.. [1] Miller, <NAME>. Flow Measurement Engineering Handbook. 3rd
edition. New York: McGraw-Hill Education, 1996.
.. [2] <NAME>., <NAME>, and <NAME>. "Effect of Wedge Shape
and Pressure Tap Locations on the Characteristics of a Wedge Flowmeter."
IJEMS Vol.01(5), October 1994.
'''
beta = diameter_ratio_wedge_meter(D, H)
if D <= 0.7*inch:
# suggested limit 0.5 inch for this equation
C = 0.7883 + 0.107*(1 - beta*beta)
elif D <= 1.4*inch:
# Suggested limit is under 1.5 inches
C = 0.6143 + 0.718*(1 - beta*beta)
else:
C = 0.5433 + 0.2453*(1 - beta*beta)
return C
def C_Reader_Harris_Gallagher_wet_venturi_tube(mg, ml, rhog, rhol, D, Do, H=1):
r'''Calculates the coefficient of discharge of the wet gas venturi tube
based on the geometry of the tube, mass flow rates of liquid and vapor
through the tube, the density of the liquid and gas phases, and an
adjustable coefficient `H`.
.. math::
C = 1 - 0.0463\exp(-0.05Fr_{gas, th}) \cdot \min\left(1,
\sqrt{\frac{X}{0.016}}\right)
Fr_{gas, th} = \frac{Fr_{\text{gas, densionetric }}}{\beta^{2.5}}
\phi = \sqrt{1 + C_{Ch} X + X^2}
C_{Ch} = \left(\frac{\rho_l}{\rho_{1,g}}\right)^n +
\left(\frac{\rho_{1, g}}{\rho_{l}}\right)^n
n = \max\left[0.583 - 0.18\beta^2 - 0.578\exp\left(\frac{-0.8
Fr_{\text{gas, densiometric}}}{H}\right),0.392 - 0.18\beta^2 \right]
X = \left(\frac{m_l}{m_g}\right) \sqrt{\frac{\rho_{1,g}}{\rho_l}}
{Fr_{\text{gas, densiometric}}} = \frac{v_{gas}}{\sqrt{gD}}
\sqrt{\frac{\rho_{1,g}}{\rho_l - \rho_{1,g}}}
= \frac{4m_g}{\rho_{1,g} \pi D^2 \sqrt{gD}}
\sqrt{\frac{\rho_{1,g}}{\rho_l - \rho_{1,g}}}
Parameters
----------
mg : float
Mass flow rate of gas through the venturi tube, [kg/s]
ml : float
Mass flow rate of liquid through the venturi tube, [kg/s]
rhog : float
Density of gas at `P1`, [kg/m^3]
rhol : float
Density of liquid at `P1`, [kg/m^3]
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of venturi tube at flow conditions, [m]
H : float, optional
A surface-tension effect coefficient used to adjust for different
fluids, (1 for a hydrocarbon liquid, 1.35 for water, 0.79 for water in
steam) [-]
Returns
-------
C : float
Coefficient of discharge of the wet gas venturi tube flow meter
(includes flow rate of gas ONLY), [-]
Notes
-----
This model has more error than single phase differential pressure meters.
The model was first published in [1]_, and became ISO 11583 later.
The limits of this correlation according to [2]_ are as follows:
.. math::
0.4 \le \beta \le 0.75
0 < X \le 0.3
Fr_{gas, th} > 3
\frac{\rho_g}{\rho_l} > 0.02
D \ge 50 \text{ mm}
Examples
--------
>>> C_Reader_Harris_Gallagher_wet_venturi_tube(mg=5.31926, ml=5.31926/2,
... rhog=50.0, rhol=800., D=.1, Do=.06, H=1)
0.9754210845876333
References
----------
.. [1] Reader-harris, Michael, and <NAME>. An Improved Model for
Venturi-Tube Over-Reading in Wet Gas, 2009.
.. [2] ISO/TR 11583:2012 Measurement of Wet Gas Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits.
'''
V = 4*mg/(rhog*pi*D**2)
Frg = Froude_densimetric(V, L=D, rho1=rhol, rho2=rhog, heavy=False)
beta = Do/D
beta2 = beta*beta
Fr_gas_th = Frg*beta**-2.5
n = max(0.583 - 0.18*beta2 - 0.578*exp(-0.8*Frg/H),
0.392 - 0.18*beta2)
C_Ch = (rhol/rhog)**n + (rhog/rhol)**n
X = ml/mg*(rhog/rhol)**0.5
OF = (1.0 + C_Ch*X + X*X)**0.5
C = 1.0 - 0.0463*exp(-0.05*Fr_gas_th)*min(1.0, (X/0.016)**0.5)
return C
def dP_Reader_Harris_Gallagher_wet_venturi_tube(D, Do, P1, P2, ml, mg, rhol,
rhog, H=1):
r'''Calculates the non-recoverable pressure drop of a wet gas venturi
nozzle based on the pressure drop and the geometry of the venturi nozzle,
the mass flow rates of liquid and gas through it, the densities of the
vapor and liquid phase, and an adjustable coefficient `H`.
.. math::
Y = \frac{\Delta \bar \omega}{\Delta P} - 0.0896 - 0.48\beta^9
Y_{max} = 0.61\exp\left[-11\frac{\rho_{1,g}}{\rho_l}
- 0.045 \frac{Fr_{gas}}{H}\right]
\frac{Y}{Y_{max}} = 1 - \exp\left[-35 X^{0.75} \exp
\left( \frac{-0.28Fr_{gas}}{H}\right)\right]
X = \left(\frac{m_l}{m_g}\right) \sqrt{\frac{\rho_{1,g}}{\rho_l}}
{Fr_{\text{gas, densiometric}}} = \frac{v_{gas}}{\sqrt{gD}}
\sqrt{\frac{\rho_{1,g}}{\rho_l - \rho_{1,g}}}
= \frac{4m_g}{\rho_{1,g} \pi D^2 \sqrt{gD}}
\sqrt{\frac{\rho_{1,g}}{\rho_l - \rho_{1,g}}}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of venturi tube at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of venturi tube at the cross-section
of the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of venturi tube at the cross-
section of the pressure tap, [Pa]
ml : float
Mass flow rate of liquid through the venturi tube, [kg/s]
mg : float
Mass flow rate of gas through the venturi tube, [kg/s]
rhol : float
Density of liquid at `P1`, [kg/m^3]
rhog : float
Density of gas at `P1`, [kg/m^3]
H : float, optional
A surface-tension effect coefficient used to adjust for different
fluids, (1 for a hydrocarbon liquid, 1.35 for water, 0.79 for water in
steam) [-]
Returns
-------
C : float
Coefficient of discharge of the wet gas venturi tube flow meter
(includes flow rate of gas ONLY), [-]
Notes
-----
The model was first published in [1]_, and became ISO 11583 later.
Examples
--------
>>> dP_Reader_Harris_Gallagher_wet_venturi_tube(D=.1, Do=.06, H=1,
... P1=6E6, P2=6E6-5E4, ml=5.31926/2, mg=5.31926, rhog=50.0, rhol=800.,)
16957.43843129572
References
----------
.. [1] Reader-harris, Michael, and <NAME>. An Improved Model for
Venturi-Tube Over-Reading in Wet Gas, 2009.
.. [2] ISO/TR 11583:2012 Measurement of Wet Gas Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits.
'''
dP = P1 - P2
beta = Do/D
X = ml/mg*(rhog/rhol)**0.5
V = 4*mg/(rhog*pi*D**2)
Frg = Froude_densimetric(V, L=D, rho1=rhol, rho2=rhog, heavy=False)
Y_ratio = 1.0 - exp(-35.0*X**0.75*exp(-0.28*Frg/H))
Y_max = 0.61*exp(-11.0*rhog/rhol - 0.045*Frg/H)
Y = Y_max*Y_ratio
rhs = -0.0896 - 0.48*beta**9
dw = dP*(Y - rhs)
return dw
# Venturi tube loss coefficients as a function of Re
as_cast_convergent_venturi_Res = [4E5, 6E4, 1E5, 1.5E5]
as_cast_convergent_venturi_Cs = [0.957, 0.966, 0.976, 0.982]
machined_convergent_venturi_Res = [5E4, 1E5, 2E5, 3E5,
7.5E5, # 5E5 to 1E6
1.5E6, # 1E6 to 2E6
5E6] # 2E6 to 1E8
machined_convergent_venturi_Cs = [0.970, 0.977, 0.992, 0.998, 0.995, 1.000, 1.010]
rough_welded_convergent_venturi_Res = [4E4, 6E4, 1E5]
rough_welded_convergent_venturi_Cs = [0.96, 0.97, 0.98]
as_cast_convergent_entrance_machined_venturi_Res = [1E4, 6E4, 1E5, 1.5E5,
3.5E5, # 2E5 to 5E5
3.2E6] # 5E5 to 3.2E6
as_cast_convergent_entrance_machined_venturi_Cs = [0.963, 0.978, 0.98, 0.987, 0.992, 0.995]
CONE_METER_C = 0.82
ROUGH_WELDED_CONVERGENT_VENTURI_TUBE_C = 0.985
MACHINED_CONVERGENT_VENTURI_TUBE_C = 0.995
AS_CAST_VENTURI_TUBE_C = 0.984
def _differential_pressure_C_epsilon(D, D2, m, P1, P2, rho, mu, k, meter_type,
taps=None):
'''Helper function only.
'''
if meter_type == ISO_5167_ORIFICE:
C = C_Reader_Harris_Gallagher(D=D, Do=D2, rho=rho, mu=mu, m=m, taps=taps)
epsilon = orifice_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
elif meter_type == LONG_RADIUS_NOZZLE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = C_long_radius_nozzle(D=D, Do=D2, rho=rho, mu=mu, m=m)
elif meter_type == ISA_1932_NOZZLE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = C_ISA_1932_nozzle(D=D, | |
"""Utility to provide submission and comment statistics in a subreddit."""
from __future__ import print_function
from collections import defaultdict
from datetime import datetime
from tempfile import mkstemp
import codecs
import gc
import logging
import os
import re
import time
from praw import Reddit
from prawcore.exceptions import RequestException
from six import iteritems, text_type as tt
from .helpers import AGENT, arg_parser, check_for_updates
SECONDS_IN_A_DAY = 60 * 60 * 24
RE_WHITESPACE = re.compile(r"\s+")
TOP_VALUES = {"all", "day", "month", "week", "year"}
logger = logging.getLogger(__package__)
class MiniComment(object):
"""Provides a memory optimized version of a Comment."""
__slots__ = ("author", "created_utc", "id", "score", "submission")
def __init__(self, comment, submission):
"""Initialize an instance of MiniComment."""
for attribute in self.__slots__:
if attribute in {"author", "submission"}:
continue
setattr(self, attribute, getattr(comment, attribute))
self.author = str(comment.author) if comment.author else None
self.submission = submission
class MiniSubmission(object):
"""Provides a memory optimized version of a Submission."""
__slots__ = (
"author",
"created_utc",
"distinguished",
"id",
"num_comments",
"permalink",
"score",
"title",
"url",
)
def __init__(self, submission):
"""Initialize an instance of MiniSubmission."""
for attribute in self.__slots__:
if attribute == "author":
continue
setattr(self, attribute, getattr(submission, attribute))
self.author = str(submission.author) if submission.author else None
class SubredditStats(object):
"""Contain all the functionality of the subreddit_stats command."""
post_footer = tt(
">Generated with [BBoe](/u/bboe)'s [Subreddit Stats]"
"(https://github.com/praw-dev/prawtools)"
)
post_header = tt("---\n###{}\n")
post_prefix = tt("Subreddit Stats:")
@staticmethod
def _permalink(item):
if isinstance(item, MiniSubmission):
return tt("/comments/{}").format(item.id)
else:
return tt("/comments/{}//{}?context=1").format(item.submission.id, item.id)
@staticmethod
def _points(points):
return "1 point" if points == 1 else "{} points".format(points)
@staticmethod
def _rate(items, duration):
return 86400.0 * items / duration if duration else items
@staticmethod
def _safe_title(submission):
"""Return titles with whitespace replaced by spaces and stripped."""
return RE_WHITESPACE.sub(" ", submission.title).strip()
@staticmethod
def _save_report(title, body):
descriptor, filename = mkstemp(".md", dir=".")
os.close(descriptor)
with codecs.open(filename, "w", "utf-8") as fp:
fp.write("{}\n\n{}".format(title, body))
logger.info("Report saved to {}".format(filename))
@staticmethod
def _user(user):
return "_deleted_" if user is None else tt("/u/{}").format(user)
def __init__(self, subreddit, site, distinguished, reddit=None):
"""Initialize the SubredditStats instance with config options."""
self.commenters = defaultdict(list)
self.comments = []
self.distinguished = distinguished
self.min_date = 0
self.max_date = time.time() - SECONDS_IN_A_DAY
self.reddit = reddit or Reddit(site, check_for_updates=False, user_agent=AGENT)
self.submissions = {}
self.submitters = defaultdict(list)
self.submit_subreddit = self.reddit.subreddit("subreddit_stats")
self.subreddit = self.reddit.subreddit(subreddit)
def basic_stats(self):
"""Return a markdown representation of simple statistics."""
comment_score = sum(comment.score for comment in self.comments)
if self.comments:
comment_duration = (
self.comments[-1].created_utc - self.comments[0].created_utc
)
comment_rate = self._rate(len(self.comments), comment_duration)
else:
comment_rate = 0
submission_duration = self.max_date - self.min_date
submission_rate = self._rate(len(self.submissions), submission_duration)
submission_score = sum(sub.score for sub in self.submissions.values())
values = [
("Total", len(self.submissions), len(self.comments)),
(
"Rate (per day)",
"{:.2f}".format(submission_rate),
"{:.2f}".format(comment_rate),
),
("Unique Redditors", len(self.submitters), len(self.commenters)),
("Combined Score", submission_score, comment_score),
]
retval = "Period: {:.2f} days\n\n".format(submission_duration / 86400.0)
retval += "||Submissions|Comments|\n:-:|--:|--:\n"
for quad in values:
retval += "__{}__|{}|{}\n".format(*quad)
return retval + "\n"
def fetch_recent_submissions(self, max_duration):
"""Fetch recent submissions in subreddit with boundaries.
Does not include posts within the last day as their scores may not be
representative.
:param max_duration: When set, specifies the number of days to include
"""
if max_duration:
self.min_date = self.max_date - SECONDS_IN_A_DAY * max_duration
for submission in self.subreddit.new(limit=None):
if submission.created_utc <= self.min_date:
break
if submission.created_utc > self.max_date:
continue
self.submissions[submission.id] = MiniSubmission(submission)
def fetch_submissions(self, submissions_callback, *args):
"""Wrap the submissions_callback function."""
logger.debug("Fetching submissions")
submissions_callback(*args)
logger.info("Found {} submissions".format(len(self.submissions)))
if not self.submissions:
return
self.min_date = min(x.created_utc for x in self.submissions.values())
self.max_date = max(x.created_utc for x in self.submissions.values())
self.process_submitters()
self.process_commenters()
def fetch_top_submissions(self, top):
"""Fetch top submissions by some top value.
:param top: One of week, month, year, all
:returns: True if any submissions were found.
"""
for submission in self.subreddit.top(limit=None, time_filter=top):
self.submissions[submission.id] = MiniSubmission(submission)
def process_commenters(self):
"""Group comments by author."""
for index, submission in enumerate(self.submissions.values()):
if submission.num_comments == 0:
continue
real_submission = self.reddit.submission(id=submission.id)
real_submission.comment_sort = "top"
for i in range(3):
try:
real_submission.comments.replace_more(limit=0)
break
except RequestException:
if i >= 2:
raise
logger.debug(
"Failed to fetch submission {}, retrying".format(submission.id)
)
self.comments.extend(
MiniComment(comment, submission)
for comment in real_submission.comments.list()
if self.distinguished or comment.distinguished is None
)
if index % 50 == 49:
logger.debug(
"Completed: {:4d}/{} submissions".format(
index + 1, len(self.submissions)
)
)
# Clean up to reduce memory usage
submission = None
gc.collect()
self.comments.sort(key=lambda x: x.created_utc)
for comment in self.comments:
if comment.author:
self.commenters[comment.author].append(comment)
def process_submitters(self):
"""Group submissions by author."""
for submission in self.submissions.values():
if submission.author and (
self.distinguished or submission.distinguished is None
):
self.submitters[submission.author].append(submission)
def publish_results(self, view, submitters, commenters):
"""Submit the results to the subreddit. Has no return value (None)."""
def timef(timestamp, date_only=False):
"""Return a suitable string representaation of the timestamp."""
dtime = datetime.fromtimestamp(timestamp)
if date_only:
retval = dtime.strftime("%Y-%m-%d")
else:
retval = dtime.strftime("%Y-%m-%d %H:%M PDT")
return retval
basic = self.basic_stats()
top_commenters = self.top_commenters(commenters)
top_comments = self.top_comments()
top_submissions = self.top_submissions()
# Decrease number of top submitters if body is too large.
body = None
while body is None or len(body) > 40000 and submitters > 0:
body = (
basic
+ self.top_submitters(submitters)
+ top_commenters
+ top_submissions
+ top_comments
+ self.post_footer
)
submitters -= 1
title = "{} {} {}posts from {} to {}".format(
self.post_prefix,
str(self.subreddit),
"top " if view in TOP_VALUES else "",
timef(self.min_date, True),
timef(self.max_date),
)
try: # Attempt to make the submission
return self.submit_subreddit.submit(title, selftext=body)
except Exception:
logger.exception("Failed to submit to {}".format(self.submit_subreddit))
self._save_report(title, body)
def run(self, view, submitters, commenters):
"""Run stats and return the created Submission."""
logger.info("Analyzing subreddit: {}".format(self.subreddit))
if view in TOP_VALUES:
callback = self.fetch_top_submissions
else:
callback = self.fetch_recent_submissions
view = int(view)
self.fetch_submissions(callback, view)
if not self.submissions:
logger.warning("No submissions were found.")
return
return self.publish_results(view, submitters, commenters)
def top_commenters(self, num):
"""Return a markdown representation of the top commenters."""
num = min(num, len(self.commenters))
if num <= 0:
return ""
top_commenters = sorted(
iteritems(self.commenters),
key=lambda x: (-sum(y.score for y in x[1]), -len(x[1]), str(x[0])),
)[:num]
retval = self.post_header.format("Top Commenters")
for author, comments in top_commenters:
retval += "1. {} ({}, {} comment{})\n".format(
self._user(author),
self._points(sum(x.score for x in comments)),
len(comments),
"s" if len(comments) != 1 else "",
)
return "{}\n".format(retval)
def top_submitters(self, num):
"""Return a markdown representation of the top submitters."""
num = min(num, len(self.submitters))
if num <= 0:
return ""
top_submitters = sorted(
iteritems(self.submitters),
key=lambda x: (-sum(y.score for y in x[1]), -len(x[1]), str(x[0])),
)[:num]
retval = self.post_header.format("Top Submitters' Top Submissions")
for (author, submissions) in top_submitters:
retval += "1. {}, {} submission{}: {}\n".format(
self._points(sum(x.score for x in submissions)),
len(submissions),
"s" if len(submissions) != 1 else "",
self._user(author),
)
for sub in sorted(submissions, key=lambda x: (-x.score, x.title))[:10]:
title = self._safe_title(sub)
if sub.permalink in sub.url:
retval += tt(" 1. {}").format(title)
else:
retval += tt(" 1. [{}]({})").format(title, sub.url)
retval += " ({}, [{} comment{}]({}))\n".format(
self._points(sub.score),
sub.num_comments,
"s" if sub.num_comments != 1 else "",
self._permalink(sub),
)
retval += "\n"
return retval
def top_submissions(self):
"""Return a markdown representation of the top submissions."""
num = min(10, len(self.submissions))
if num <= 0:
return ""
top_submissions = sorted(
[
x
for x in self.submissions.values()
if self.distinguished or x.distinguished is None
],
key=lambda x: (-x.score, -x.num_comments, x.title),
)[:num]
if not top_submissions:
return ""
retval = self.post_header.format("Top Submissions")
for sub in top_submissions:
title = self._safe_title(sub)
if sub.permalink in sub.url:
retval += tt("1. {}").format(title)
else:
retval += tt("1. [{}]({})").format(title, sub.url)
retval += " by {} ({}, [{} comment{}]({}))\n".format(
self._user(sub.author),
self._points(sub.score),
sub.num_comments,
"s" if sub.num_comments != 1 else "",
self._permalink(sub),
)
return tt("{}\n").format(retval)
def top_comments(self):
"""Return a markdown representation of the top comments."""
num = min(10, len(self.comments))
if num <= 0:
return ""
top_comments = sorted(self.comments, key=lambda x: (-x.score, str(x.author)))[
:num
]
retval = self.post_header.format("Top Comments")
for comment in top_comments:
title = self._safe_title(comment.submission)
retval += tt("1. {}: {}'s [comment]({}) in {}\n").format(
self._points(comment.score),
self._user(comment.author),
self._permalink(comment),
title,
)
return tt("{}\n").format(retval)
def main():
"""Provide the entry point to the subreddit_stats command."""
parser = arg_parser(usage="usage: %prog [options] SUBREDDIT VIEW")
parser.add_option(
"-c",
"--commenters",
type="int",
default=10,
help="Number of top commenters to display " "[default %default]",
)
parser.add_option(
"-d",
"--distinguished",
action="store_true",
help=(
"Include distinguished subissions and "
"comments (default: False). Note that regular "
"comments of distinguished submissions will still "
"be included."
),
)
parser.add_option(
"-s",
"--submitters",
type="int",
default=10,
help="Number of top submitters to display " "[default %default]",
)
options, args = parser.parse_args()
if options.verbose == 1:
logger.setLevel(logging.INFO)
elif options.verbose > 1:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.NOTSET)
logger.addHandler(logging.StreamHandler())
if len(args) != 2:
parser.error("SUBREDDIT and VIEW must be | |
def test_current_pers_r1_notr2(self):
c1 = self._fixture(False, True, True, False)
self._assert_is_orphan(c1)
def test_leg_pers_notr1_notr2(self):
c1 = self._fixture(True, True, False, False)
self._assert_is_orphan(c1)
def test_current_pers_notr1_notr2(self):
c1 = self._fixture(False, True, True, False)
self._assert_is_orphan(c1)
def test_leg_transient_r1_r2(self):
c1 = self._fixture(True, False, True, True)
self._assert_not_orphan(c1)
def test_current_transient_r1_r2(self):
c1 = self._fixture(False, False, True, True)
self._assert_not_orphan(c1)
def test_leg_transient_r1_notr2(self):
c1 = self._fixture(True, False, True, False)
self._assert_not_orphan(c1)
def test_current_transient_r1_notr2(self):
c1 = self._fixture(False, False, True, False)
self._assert_is_orphan(c1)
def test_leg_transient_notr1_notr2(self):
c1 = self._fixture(True, False, False, False)
self._assert_is_orphan(c1)
def test_current_transient_notr1_notr2(self):
c1 = self._fixture(False, False, False, False)
self._assert_is_orphan(c1)
def test_leg_transient_notr1_notr2_noevent(self):
c1 = self._fixture(True, False, False, False, False)
self._assert_is_orphan(c1)
def test_current_transient_notr1_notr2_noevent(self):
c1 = self._fixture(False, False, False, False, False)
self._assert_is_orphan(c1)
def test_leg_persistent_notr1_notr2_noevent(self):
c1 = self._fixture(True, True, False, False, False)
self._assert_not_orphan(c1)
def test_current_persistent_notr1_notr2_noevent(self):
c1 = self._fixture(False, True, False, False, False)
self._assert_not_orphan(c1)
class O2MConflictTest(fixtures.MappedTest):
"""test that O2M dependency detects a change in parent, does the
right thing, and updates the collection/attribute.
"""
@classmethod
def define_tables(cls, metadata):
Table(
"parent",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
Table(
"child",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column(
"parent_id", Integer, ForeignKey("parent.id"), nullable=False
),
)
@classmethod
def setup_classes(cls):
class Parent(cls.Comparable):
pass
class Child(cls.Comparable):
pass
def _do_move_test(self, delete_old):
Parent, Child = self.classes.Parent, self.classes.Child
with Session(autoflush=False) as sess:
p1, p2, c1 = Parent(), Parent(), Child()
if Parent.child.property.uselist:
p1.child.append(c1)
else:
p1.child = c1
sess.add_all([p1, c1])
sess.flush()
if delete_old:
sess.delete(p1)
if Parent.child.property.uselist:
p2.child.append(c1)
else:
p2.child = c1
sess.add(p2)
sess.flush()
eq_(sess.query(Child).filter(Child.parent_id == p2.id).all(), [c1])
def test_o2o_delete_old(self):
Child, Parent, parent, child = (
self.classes.Child,
self.classes.Parent,
self.tables.parent,
self.tables.child,
)
mapper(
Parent,
parent,
properties={"child": relationship(Child, uselist=False)},
)
mapper(Child, child)
self._do_move_test(True)
self._do_move_test(False)
def test_o2m_delete_old(self):
Child, Parent, parent, child = (
self.classes.Child,
self.classes.Parent,
self.tables.parent,
self.tables.child,
)
mapper(
Parent,
parent,
properties={"child": relationship(Child, uselist=True)},
)
mapper(Child, child)
self._do_move_test(True)
self._do_move_test(False)
def test_o2o_backref_delete_old(self):
Child, Parent, parent, child = (
self.classes.Child,
self.classes.Parent,
self.tables.parent,
self.tables.child,
)
mapper(
Parent,
parent,
properties={
"child": relationship(
Child,
uselist=False,
backref=backref("parent", cascade_backrefs=False),
)
},
)
mapper(Child, child)
self._do_move_test(True)
self._do_move_test(False)
def test_o2o_delcascade_delete_old(self):
Child, Parent, parent, child = (
self.classes.Child,
self.classes.Parent,
self.tables.parent,
self.tables.child,
)
mapper(
Parent,
parent,
properties={
"child": relationship(
Child, uselist=False, cascade="all, delete"
)
},
)
mapper(Child, child)
self._do_move_test(True)
self._do_move_test(False)
def test_o2o_delorphan_delete_old(self):
Child, Parent, parent, child = (
self.classes.Child,
self.classes.Parent,
self.tables.parent,
self.tables.child,
)
mapper(
Parent,
parent,
properties={
"child": relationship(
Child, uselist=False, cascade="all, delete, delete-orphan"
)
},
)
mapper(Child, child)
self._do_move_test(True)
self._do_move_test(False)
def test_o2o_delorphan_backref_delete_old(self):
Child, Parent, parent, child = (
self.classes.Child,
self.classes.Parent,
self.tables.parent,
self.tables.child,
)
mapper(
Parent,
parent,
properties={
"child": relationship(
Child,
uselist=False,
cascade="all, delete, delete-orphan",
backref=backref("parent", cascade_backrefs=False),
)
},
)
mapper(Child, child)
self._do_move_test(True)
self._do_move_test(False)
def test_o2o_backref_delorphan_delete_old(self):
Child, Parent, parent, child = (
self.classes.Child,
self.classes.Parent,
self.tables.parent,
self.tables.child,
)
mapper(Parent, parent)
mapper(
Child,
child,
properties={
"parent": relationship(
Parent,
uselist=False,
single_parent=True,
backref=backref("child", uselist=False),
cascade="all,delete,delete-orphan",
cascade_backrefs=False,
)
},
)
self._do_move_test(True)
self._do_move_test(False)
def test_o2m_backref_delorphan_delete_old(self):
Child, Parent, parent, child = (
self.classes.Child,
self.classes.Parent,
self.tables.parent,
self.tables.child,
)
mapper(Parent, parent)
mapper(
Child,
child,
properties={
"parent": relationship(
Parent,
uselist=False,
single_parent=True,
backref=backref("child", uselist=True),
cascade="all,delete,delete-orphan",
cascade_backrefs=False,
)
},
)
self._do_move_test(True)
self._do_move_test(False)
class PartialFlushTest(fixtures.MappedTest):
"""test cascade behavior as it relates to object lists passed to flush().
"""
@classmethod
def define_tables(cls, metadata):
Table(
"base",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("descr", String(50)),
)
Table(
"noninh_child",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("base_id", Integer, ForeignKey("base.id")),
)
Table(
"parent",
metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
)
Table(
"inh_child",
metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
Column("parent_id", Integer, ForeignKey("parent.id")),
)
def test_o2m_m2o(self):
base, noninh_child = self.tables.base, self.tables.noninh_child
class Base(fixtures.ComparableEntity):
pass
class Child(fixtures.ComparableEntity):
pass
mapper(
Base,
base,
properties={"children": relationship(Child, backref="parent")},
)
mapper(Child, noninh_child)
sess = Session()
c1, c2 = Child(), Child()
b1 = Base(descr="b1", children=[c1, c2])
sess.add(b1)
assert c1 in sess.new
assert c2 in sess.new
sess.flush([b1])
# c1, c2 get cascaded into the session on o2m.
# not sure if this is how I like this
# to work but that's how it works for now.
assert c1 in sess and c1 not in sess.new
assert c2 in sess and c2 not in sess.new
assert b1 in sess and b1 not in sess.new
sess = Session()
c1, c2 = Child(), Child()
b1 = Base(descr="b1", children=[c1, c2])
sess.add(b1)
sess.flush([c1])
# m2o, otoh, doesn't cascade up the other way.
assert c1 in sess and c1 not in sess.new
assert c2 in sess and c2 in sess.new
assert b1 in sess and b1 in sess.new
sess = Session()
c1, c2 = Child(), Child()
b1 = Base(descr="b1", children=[c1, c2])
sess.add(b1)
sess.flush([c1, c2])
# m2o, otoh, doesn't cascade up the other way.
assert c1 in sess and c1 not in sess.new
assert c2 in sess and c2 not in sess.new
assert b1 in sess and b1 in sess.new
def test_circular_sort(self):
"""test ticket 1306"""
base, inh_child, parent = (
self.tables.base,
self.tables.inh_child,
self.tables.parent,
)
class Base(fixtures.ComparableEntity):
pass
class Parent(Base):
pass
class Child(Base):
pass
mapper(Base, base)
mapper(
Child,
inh_child,
inherits=Base,
properties={
"parent": relationship(
Parent,
backref="children",
primaryjoin=inh_child.c.parent_id == parent.c.id,
)
},
)
mapper(Parent, parent, inherits=Base)
sess = Session()
p1 = Parent()
c1, c2, c3 = Child(), Child(), Child()
p1.children = [c1, c2, c3]
sess.add(p1)
sess.flush([c1])
assert p1 in sess.new
assert c1 not in sess.new
assert c2 in sess.new
class SubclassCascadeTest(fixtures.DeclarativeMappedTest):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Company(Base):
__tablename__ = "company"
id = Column(Integer, primary_key=True)
name = Column(String(50))
employees = relationship("Employee", cascade="all, delete-orphan")
class Employee(Base):
__tablename__ = "employee"
id = Column(Integer, primary_key=True)
name = Column(String(50))
type = Column(String(50))
company_id = Column(ForeignKey("company.id"))
__mapper_args__ = {
"polymorphic_identity": "employee",
"polymorphic_on": type,
}
class Engineer(Employee):
__tablename__ = "engineer"
id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
engineer_name = Column(String(30))
languages = relationship("Language", cascade="all, delete-orphan")
__mapper_args__ = {"polymorphic_identity": "engineer"}
class MavenBuild(Base):
__tablename__ = "maven_build"
id = Column(Integer, primary_key=True)
java_language_id = Column(
ForeignKey("java_language.id"), nullable=False
)
class Manager(Employee):
__tablename__ = "manager"
id = Column(Integer, ForeignKey("employee.id"), primary_key=True)
manager_name = Column(String(30))
__mapper_args__ = {"polymorphic_identity": "manager"}
class Language(Base):
__tablename__ = "language"
id = Column(Integer, primary_key=True)
engineer_id = Column(ForeignKey("engineer.id"), nullable=False)
name = Column(String(50))
type = Column(String(50))
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "language",
}
class JavaLanguage(Language):
__tablename__ = "java_language"
id = Column(ForeignKey("language.id"), primary_key=True)
maven_builds = relationship(
"MavenBuild", cascade="all, delete-orphan"
)
__mapper_args__ = {"polymorphic_identity": "java_language"}
def test_cascade_iterator_polymorphic(self):
(
Company,
Employee,
Engineer,
Language,
JavaLanguage,
MavenBuild,
) = self.classes(
"Company",
"Employee",
"Engineer",
"Language",
"JavaLanguage",
"MavenBuild",
)
obj = Company(
employees=[
Engineer(
languages=[
JavaLanguage(name="java", maven_builds=[MavenBuild()])
]
)
]
)
eng = obj.employees[0]
lang = eng.languages[0]
maven_build = lang.maven_builds[0]
from sqlalchemy import inspect
state = inspect(obj)
it = inspect(Company).cascade_iterator("save-update", state)
eq_(set([rec[0] for rec in it]), set([eng, maven_build, lang]))
state = inspect(eng)
it = inspect(Employee).cascade_iterator("save-update", state)
eq_(set([rec[0] for rec in it]), set([maven_build, lang]))
def test_delete_orphan_round_trip(self):
(
Company,
Employee,
Engineer,
Language,
JavaLanguage,
MavenBuild,
) = self.classes(
"Company",
"Employee",
"Engineer",
"Language",
"JavaLanguage",
"MavenBuild",
)
obj = Company(
employees=[
Engineer(
languages=[
JavaLanguage(name="java", maven_builds=[MavenBuild()])
]
)
]
)
s = Session()
s.add(obj)
s.commit()
obj.employees = []
s.commit()
eq_(s.query(Language).count(), 0)
class ViewonlyFlagWarningTest(fixtures.MappedTest):
"""test for #4993.
In 1.4, this moves to test/orm/test_cascade, deprecation warnings
become errors, will then be for #4994.
"""
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(30)),
)
Table(
"orders",
metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer),
Column("description", String(30)),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Order(cls.Comparable):
pass
@testing.combinations(
({"delete"}, {"delete"}),
(
{"all, delete-orphan"},
{"delete", "delete-orphan", "merge", "save-update"},
),
({"save-update, expunge"}, {"save-update"}),
)
def test_write_cascades(self, setting, settings_that_warn):
Order = self.classes.Order
assert_raises_message(
sa_exc.ArgumentError,
'Cascade settings "%s" apply to persistence '
"operations" % (", ".join(sorted(settings_that_warn))),
relationship,
Order,
primaryjoin=(
self.tables.users.c.id == foreign(self.tables.orders.c.user_id)
),
cascade=", ".join(sorted(setting)),
viewonly=True,
)
def test_expunge_cascade(self):
User, Order, orders, users = (
self.classes.User,
self.classes.Order,
self.tables.orders,
self.tables.users,
)
mapper(Order, orders)
mapper(
User,
users,
properties={
"orders": relationship(
Order,
primaryjoin=(
self.tables.users.c.id
== foreign(self.tables.orders.c.user_id)
),
cascade="expunge",
viewonly=True,
)
},
)
sess = Session()
u = User(id=1, name="jack")
sess.add(u)
sess.add_all(
[
Order(id=1, user_id=1, description="someorder"),
Order(id=2, user_id=1, description="someotherorder"),
]
)
sess.commit()
u1 = sess.query(User).first()
orders = u1.orders
eq_(len(orders), 2)
in_(orders[0], sess)
in_(orders[1], sess)
sess.expunge(u1)
not_in_(orders[0], sess)
not_in_(orders[1], sess)
def test_default_none_cascade(self):
User, Order, orders, users = (
self.classes.User,
self.classes.Order,
self.tables.orders,
self.tables.users,
)
mapper(Order, orders)
mapper(
User,
users,
properties={
"orders": relationship(
Order,
primaryjoin=(
self.tables.users.c.id
== foreign(self.tables.orders.c.user_id)
),
viewonly=True,
)
},
)
sess = Session()
u1 = User(id=1, name="jack")
sess.add(u1)
o1, o2 = (
Order(id=1, user_id=1, | |
noqa: E501
collection_formats = {}
path_params = {}
if 'entity_set_name' in local_var_params:
path_params['entitySetName'] = local_var_params['entity_set_name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/entity-sets/ids/{entitySetName}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_entity_set_ids(self, **kwargs): # noqa: E501
"""Get IDs for entity sets given their names. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_set_ids(async_req=True)
>>> result = thread.get()
:param request_body:
:type request_body: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: dict(str, str)
"""
kwargs['_return_http_data_only'] = True
return self.get_entity_set_ids_with_http_info(**kwargs) # noqa: E501
def get_entity_set_ids_with_http_info(self, **kwargs): # noqa: E501
"""Get IDs for entity sets given their names. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_set_ids_with_http_info(async_req=True)
>>> result = thread.get()
:param request_body:
:type request_body: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(dict(str, str), status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'request_body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_set_ids" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'request_body' in local_var_params:
body_params = local_var_params['request_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/entity-sets/ids/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, str)', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_entity_set_property_metadata(self, entity_set_id, property_type_id, **kwargs): # noqa: E501
"""Get specified property type metadata for an entity set. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_set_property_metadata(entity_set_id, property_type_id, async_req=True)
>>> result = thread.get()
:param entity_set_id: (required)
:type entity_set_id: str
:param property_type_id: (required)
:type property_type_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: EntitySetPropertyMetaData
"""
kwargs['_return_http_data_only'] = True
return self.get_entity_set_property_metadata_with_http_info(entity_set_id, property_type_id, **kwargs) # noqa: E501
def get_entity_set_property_metadata_with_http_info(self, entity_set_id, property_type_id, **kwargs): # noqa: E501
"""Get specified property type metadata for an entity set. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_entity_set_property_metadata_with_http_info(entity_set_id, property_type_id, async_req=True)
>>> result = thread.get()
:param entity_set_id: (required)
:type entity_set_id: str
:param property_type_id: (required)
:type property_type_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(EntitySetPropertyMetaData, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'entity_set_id',
'property_type_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_entity_set_property_metadata" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'entity_set_id' is set
if self.api_client.client_side_validation and ('entity_set_id' not in local_var_params or # noqa: E501
local_var_params['entity_set_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity_set_id` when calling `get_entity_set_property_metadata`") # noqa: E501
# verify the required parameter 'property_type_id' is set
if self.api_client.client_side_validation and ('property_type_id' not in local_var_params or # noqa: E501
local_var_params['property_type_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `property_type_id` when calling `get_entity_set_property_metadata`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_set_id' in local_var_params:
path_params['entitySetId'] = local_var_params['entity_set_id'] # noqa: E501
if 'property_type_id' in local_var_params:
path_params['propertyTypeId'] = local_var_params['property_type_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['http_auth', 'openlattice_auth'] # noqa: E501
return self.api_client.call_api(
'/datastore/entity-sets/all/{entitySetId}/properties/{propertyTypeId}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EntitySetPropertyMetaData', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_property_metadata_for_entity_sets(self, **kwargs): # noqa: E501
"""Get property metadata for entity sets. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_property_metadata_for_entity_sets(async_req=True)
>>> result = thread.get()
:param request_body:
:type request_body: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: dict(str, dict(str, EntitySetPropertyMetaData))
"""
kwargs['_return_http_data_only'] = True
return self.get_property_metadata_for_entity_sets_with_http_info(**kwargs) # noqa: E501
def get_property_metadata_for_entity_sets_with_http_info(self, **kwargs): # noqa: E501
"""Get property metadata for entity sets. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_property_metadata_for_entity_sets_with_http_info(async_req=True)
>>> result = thread.get()
:param request_body:
:type request_body: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be | |
# coding: utf-8
# ====================================
# import
# ====================================
# import from std
import os
import sys
sys.path.append('./')
import time
import traceback
# import from pypi
import datetime
from plyer import notification
import PyQt5.QtWidgets as qw
import PyQt5.QtGui as qg
import PyQt5.QtCore as qc
# import from selfmade
import src.analyzeScenario as asn
import src.comm2checker
import src.comm2dmm
import src.comm2resistor
import src.generateExcel
import src.ipmessenger as ipm
import src.order2checker as o2c
import src.serBitAssign as ba
# ====================================
# global variables
# ====================================
flgGoing = False
# ====================================
# const variables
# ====================================
ACCver = 'b0.1'
ROOTPATH = os.getenv('HOMEDRIVE') + os.getenv('HOMEPATH') + '/Desktop'
initialOhmPath = './settings/initial_ohm_setting.ini'
imgStart = './images/start.png'
imgPause = './images/pause.png'
imgStop = './images/stop.png'
imgInitRes = './images/initial.png'
imgGo = './images/go.png'
imgReady = './images/ready.png'
imgSet = './images/set.png'
imgIco = './images/car_dekotora.ico'
imgToast = './images/car_dekotora.ico'
imgSplash = './images/splash_loading.png'
# ====================================
# Scenario Tab
# ====================================
class ScenarioTab(qw.QWidget):
# *************
# init
# *************
def __init__(self,parent=None):
super(ScenarioTab, self).__init__()
self.initUI()
self.logSavePath = ''
self.scenarioFullPath = ''
self.lstScenarioCover = []
self.lstScenarioItems = []
self.lstScenarioCanInfo = []
self.lstScenarioRyDefInfo = []
self.systemLog = []
self.lstResData = []
self.lstDmmInfo = []
self.lstResInfo = []
self.flgPause = False
self.flgStop = False
# *************
# function
# *************
# --------------
# update system log
# --------------
def updateStatusIcon(self, img):
# update
try:
if img=='go':
self.lblState.setPixmap(qg.QPixmap(imgGo).scaled(48,48))
elif img=='ready':
self.lblState.setPixmap(qg.QPixmap(imgReady).scaled(48,48))
elif img=='set':
self.lblState.setPixmap(qg.QPixmap(imgSet).scaled(48,48))
except:
if traceback.format_exc(2) != 'SystemExit: 0':
with open(ROOTPATH + '/ErrLog.txt', 'a') as file:
file.write(traceback.format_exc())
file.close()
qg.QGuiApplication.processEvents()
# --------------
# update system log
# --------------
def updateSystemLog(self, log):
# append
self.systemLog.append(log)
self.boxLogData.append(log)
qg.QGuiApplication.processEvents()
# --------------
# push save path dir button
# --------------
def funcSelectFolder(self):
# dialog
rootpath = ROOTPATH
path = qw.QFileDialog.getExistingDirectory(
None,
'select log save folder',
rootpath
)
# output
self.boxSavePath.setText(path)
self.logSavePath = path
# --------------
# push scenario file button
# --------------
def funcSelectExcelFile(self):
# dialog
rootpath = ROOTPATH
filename = qw.QFileDialog.getOpenFileName(
self,
'select scenario file',
rootpath,
'Excel Files (*.xls *.xlsx *.xlsm)'
)
# GUI update
try:
if filename[0] != '':
self.scenarioFullPath = filename[0]
# ---init
self.boxScenarioPath.clear()
self.boxScenarioTitle.clear()
self.boxScenarioAuthor.clear()
self.boxScenarioECUtype.clear()
self.boxScenarioECUcode.clear()
self.boxScenarioSumm.clear()
self.boxScenarioItems.clear()
self.boxScenarioJudgement.clear()
# ---cover
self.lstScenarioCover = asn.takeInCover(filename[0])
self.boxScenarioPath.setText(filename[0])
self.boxScenarioTitle.setText(self.lstScenarioCover[0])
self.boxScenarioAuthor.setText(self.lstScenarioCover[1])
self.boxScenarioECUtype.setText(self.lstScenarioCover[2])
self.boxScenarioECUcode.setText(self.lstScenarioCover[3])
self.boxScenarioSumm.append(self.lstScenarioCover[4])
self.boxScenarioSumm.append(self.lstScenarioCover[5])
self.boxScenarioSumm.append(self.lstScenarioCover[6])
self.boxScenarioSumm.append(self.lstScenarioCover[7])
# --- contents
self.lstScenarioItems = asn.takeInScenario(filename[0])
for i in range(len(self.lstScenarioItems)):
no = asn.sn2numZ3(self.lstScenarioItems[i])
odr = asn.sn2order(self.lstScenarioItems[i])
jdg = asn.sn2judge(self.lstScenarioItems[i])
self.boxScenarioItems.append(no+': '+odr)
self.boxScenarioJudgement.append(no+': '+jdg)
# --- can info
self.lstScenarioCanInfo = asn.takeInCanInfo(filename[0])
# --- relay default info
self.lstScenarioRyDefInfo = asn.takeInRyDefInfo(filename[0])
except:
pass
# with open(ROOTPATH + '/ErrLog.txt', 'a') as file:
# file.write(traceback.format_exc())
# file.close()
# --------------
# push res data file button
# --------------
def funcSelectResFile(self):
# dialog
rootpath = ROOTPATH
filename = qw.QFileDialog.getOpenFileName(
self,
'select res file',
rootpath,
'M_Res_in_Checker Files (*.res)'
)
# GUI update
try:
if filename[0] != '':
# --- contents
rawData = []
filedata = open(filename[0], 'r')
rawData = filedata.readlines()
self.lstResData = [float(item.split(' ')[-1].strip()) for item in rawData]
self.boxInitResPath.setText(filename[0])
except:
pass
# with open(ROOTPATH + '/ErrLog.txt', 'a') as file:
# file.write(traceback.format_exc())
# file.close()
# --------------
# push start button
# --------------
def funcStart(self):
global flgGoing
# +++++++++++++
# error check
# +++++++++++++
# ---event
if self.boxSampleEvent.text() == '':
qw.QMessageBox.warning(self, 'ACC', 'Sample event is blank.')
return 0
# ---name
if self.boxSampleName.text() == '':
qw.QMessageBox.warning(self, 'ACC', 'Sample name is blank.')
return 0
# ---logfolder
if self.boxSavePath.text() == '':
qw.QMessageBox.warning(self, 'ACC', 'Log save folder is blank.')
return 0
# ---scenario
if self.boxScenarioPath.text() == '':
qw.QMessageBox.warning(self, 'ACC', 'Scenario file is not selected.')
return 0
# ---progress
if flgGoing == True:
qw.QMessageBox.warning(self, 'ACC', 'Already Started.')
return 0
# +++++++++++++
# process start
# +++++++++++++
# ---dialog
msg = qw.QMessageBox.question(
self,
'ACC',
'Sure?',
qw.QMessageBox.Yes | qw.QMessageBox.No
)
if msg == qw.QMessageBox.No:
return 0
# ---init variables
flgGoing = True
self.flgPause = False
self.flgStop = False
self.systemLog = []
self.boxLogData.clear()
self.boxScenarioJudgement.clear()
# ---display update
self.updateStatusIcon('go')
# ---start
self.updateSystemLog('----------------------------------')
self.updateSystemLog(' Launch Auto Checker System ')
self.updateSystemLog('----------------------------------')
self.updateSystemLog('Initialize Checker.')
self.updateSystemLog('... Connect Checker to ACC.')
# ---check serial comm open (and usb cable is set)
mcu = src.comm2checker.Serial2Mcu()
# ---close system when serial comm is not available
if mcu.port is None:
self.updateSystemLog('>>>>>> Connection is failed.')
self.updateSystemLog('>>>>>> System exit.')
self.updateStatusIcon('set')
flgGoing = False
return 0
self.updateSystemLog('>>>>>> Connection is successful.')
# ---reset mcu
self.updateSystemLog('... Reset Checker\'s MCU.')
_ = mcu.sendMsg(ba.mcuReset,0.1)
self.updateSystemLog('>>>>>> Restart after 3 seconds.')
time.sleep(1)
self.updateSystemLog('>>>>>> Restart after 2 seconds.')
time.sleep(1)
self.updateSystemLog('>>>>>> Restart after 1 seconds.')
time.sleep(1)
self.updateSystemLog('>>>>>> Done.')
# ---get sample info
lstSampleInfo = [self.boxSampleEvent.text(), self.boxSampleName.text()]
# ---get checker info
lstCheckerInfo = [
ACCver,
mcu.sendMsg(ba.softVersion,0.1),
self.boxScenarioTitle.text()
]
# ---get scenario info(init)
lstScenarioInfo = []
# ---send can info to checker
self.updateSystemLog('... Send CAN Data to Checker.')
self.updateSystemLog('>>>>>> Sending Data 1.')
_ = mcu.sendAddInfo(ba.reqCanSndId, self.lstScenarioCanInfo[0][1], 0.1)
self.updateSystemLog('>>>>>> Sending Data 2.')
_ = mcu.sendAddInfo(ba.reqCanResId, self.lstScenarioCanInfo[1][1], 0.1)
self.updateSystemLog('>>>>>> Sending Data 3.')
_ = mcu.sendAddInfo(ba.reqCanMsgDtcRead, self.lstScenarioCanInfo[2][1], 0.1)
self.updateSystemLog('>>>>>> Sending Data 4.')
_ = mcu.sendAddInfo(ba.reqCanMsgDtcClear, self.lstScenarioCanInfo[3][1], 0.1)
self.updateSystemLog('>>>>>> Sending Data 5.')
_ = mcu.sendAddInfo(ba.reqCanMsgAddReq, self.lstScenarioCanInfo[4][1], 0.1)
self.updateSystemLog('>>>>>> Done.')
# ---send relay default info to checker
self.updateSystemLog('... Send Relay Default Status to Checker.')
lstRyDef = [
str(self.lstScenarioRyDefInfo[i][0]).zfill(2)
for i in range(len(self.lstScenarioRyDefInfo))
if self.lstScenarioRyDefInfo[i][1].lower()=='on'
]
strRyDef = ''.join(lstRyDef) if lstRyDef!=[] else 'ff'
_ = mcu.sendAddInfo(ba.reqRelayDefault, strRyDef, 0.1)
_ = mcu.sendMsg(ba.ryDef,0.1)
self.updateSystemLog('>>>>>> Done.')
# ---connect to dmm
self.updateSystemLog('Try to connect with DMM.')
dmm = src.comm2dmm.DmmADCMT(1)
if dmm.hDev==1:
self.updateSystemLog('>>>>>> Connection with DMM is failed.')
self.updateSystemLog('>>>>>> Item about DMM is skipped.')
self.lstDmmInfo = ['No Connection', 'No Connection', 'No Connection']
else:
self.lstDmmInfo = dmm.readId()
self.updateSystemLog('>>>>>> Done.')
# ---connect to resistor
self.updateSystemLog('Try to connect with Resistor.')
res = src.comm2resistor.Resistor('resistorname')
if res.statusRes==False:
self.updateSystemLog('>>>>>> Connection with Resistor is failed.')
self.updateSystemLog('>>>>>> Item about Resistor is skipped.')
self.lstResInfo = ['No Connection', 'No Connection', 'No Connection']
else:
self.lstResInfo = res.readId()
self.updateSystemLog('>>>>>> Done.')
# ---system log update
self.updateSystemLog('----------------------------------')
self.updateSystemLog('Start Auto Checker Program')
self.updateSystemLog('Auto Checker Controller Version : {}'.format(lstCheckerInfo[0]))
self.updateSystemLog('Auto Checker MCU Software Version: {}'.format(lstCheckerInfo[1]))
self.updateSystemLog('----------------------------------')
self.updateSystemLog('DMM Model Name : {}, {}'.format(self.lstDmmInfo[0], self.lstDmmInfo[1]))
self.updateSystemLog('DMM Serial Number: {}'.format(self.lstDmmInfo[2]))
self.updateSystemLog('----------------------------------')
self.updateSystemLog('Resistor Model Name : {}, {}'.format(self.lstResInfo[0], self.lstResInfo[1]))
self.updateSystemLog('Resistor Serial Number: {}'.format(self.lstResInfo[2]))
self.updateSystemLog('Resistor Initial File : {}'.format(self.boxScenarioPath.text()))
self.updateSystemLog('----------------------------------')
self.updateSystemLog('start time: {}'.format(datetime.datetime.now()))
self.updateSystemLog('----------------------------------')
self.updateSystemLog('sample event : {}'.format(lstSampleInfo[0]))
self.updateSystemLog('sample name : {}'.format(lstSampleInfo[1]))
self.updateSystemLog('scenario file : {}'.format(self.boxScenarioPath.text()))
self.updateSystemLog('----------------------------------')
# ---order and judge about each recipe
for i, item in enumerate(self.lstScenarioItems):
# take in scenario data
no = asn.sn2numZ3(item)
odr = asn.sn2order(item)
jdg = asn.sn2judge(item)
self.updateSystemLog('{}: [order] {}'.format(no,odr))
# extract recipe
rcp = asn.sn2recipe(item)
self.updateSystemLog('{}: [recipe] {}'.format(no,rcp))
# order recipe and receive receipt from checker
rct = o2c.order2checker(rcp, mcu, dmm, res, self.lstResData)
# --- move items box
if i==0:
self.boxScenarioItems.moveCursor(qg.QTextCursor.Start)
else:
self.boxScenarioItems.moveCursor(qg.QTextCursor.Down)
# --- if receipt is penRes (checker 5V is low)
if rct == ba.penRes:
self.updateSystemLog('Checker error!')
self.updateSystemLog('Checker internal power is something wrong.')
self.updateSystemLog('System exit.')
self.updateStatusIcon('set')
qw.QMessageBox.information(self, 'ACC', 'Terminated.')
break
result = o2c.judge2checker(rct, jdg)
if result == 'PASS':
# when all receipt is pass, judge text color sets black
self.boxScenarioJudgement.setTextColor(qg.QColor(0,0,0))
else:
# when any receipt is fail, judge text color sets red
self.boxScenarioJudgement.setTextColor(qg.QColor(255,0,0))
self.boxScenarioJudgement.append('['+result+']'+no+': '+rct)
self.updateSystemLog('{}: [receipt] {} ... {} (judge: {})'.format(no,rct,result,jdg))
# when pause is clicked
if self.flgPause:
self.updateSystemLog('Pause Button is pressed!')
self.updateStatusIcon('ready')
qw.QMessageBox.information(self, 'ACC', 'Pause is clicked.\nContinue?')
self.flgPause = False
self.updateStatusIcon('go')
self.updateSystemLog('Restart!')
# when stop is clicked
if self.flgStop:
self.updateSystemLog('Stop Button is pressed!')
self.updateStatusIcon('ready')
msg = qw.QMessageBox.question(
self,
'ACC',
'Stop is clicked.\nSure?',
qw.QMessageBox.Yes | qw.QMessageBox.No
)
if msg ==qw.QMessageBox.No:
self.flgStop = False
self.updateStatusIcon('go')
self.updateSystemLog('Restart!')
else:
self.updateStatusIcon('set')
qw.QMessageBox.information(self, 'ACC', 'Terminated.')
break
# display update
self.updateStatusIcon('go')
qg.QGuiApplication.processEvents()
# ---get scenario info(append)
lstScenarioInfo.append([no, odr, jdg, rct, result])
# end process
# --- set all relay off
_ = o2c.order2checker(['RYALLOFF',], mcu)
# --- display update
self.updateStatusIcon('set')
flgGoing = False
self.updateSystemLog('----------------------------------')
self.updateSystemLog('end time: {}'.format(datetime.datetime.now()))
self.updateSystemLog('')
self.updateSystemLog('Finished!')
# --- toast
notification.notify(
title='Message From ACC',
message='Finished All Scenario Orders!',
app_name='ACC(Python)',
app_icon=imgToast
)
# --- output log file
timeNow = datetime.datetime.now().isoformat().replace(':','-').rsplit('.')[0]
fnamelog = (
self.logSavePath+'/'
+str(self.boxSampleEvent.text())+'_'
+str(self.boxSampleName.text())+'_'
+timeNow
+'.log'
)
with open(fnamelog, 'w') as file:
file.write('\n'.join(self.systemLog))
file.close()
self.updateSystemLog('System log is output at [{}]'.format(self.logSavePath))
# --- output report file
fnameExcel = (
self.logSavePath+'/'
+str(self.boxSampleEvent.text())+'_'
+str(self.boxSampleName.text())+'_'
+timeNow
+'.xlsx'
)
report = src.generateExcel.Report(fnameExcel)
report.setSampleInfo(lstSampleInfo)
report.setCheckerInfo(lstCheckerInfo)
report.setDmmInfo(self.lstDmmInfo)
report.setResistorInfo(self.lstResInfo)
report.setScenarioInfo(lstScenarioInfo)
report.setSampleInfo(lstSampleInfo)
report.outputExcelReport()
report.outputLevelCsv()
self.updateSystemLog('Report file is output at [{}]'.format(self.logSavePath))
# --- ip messenger
if self.boxNotify.isChecked():
self.callIPMsng('finish')
# --- delete objects
del mcu
del dmm
del res
# --------------
# push init res button
# --------------
def funcInitRes(self):
global flgGoing
# +++++++++++++
# error check
# +++++++++++++
# ---logfolder
if self.boxSavePath.text() == '':
qw.QMessageBox.warning(self, 'ACC', 'Log | |
import contextlib
import os
import tempfile
import warnings
from enum import Enum
import mip
class IISFinderAlgorithm(Enum):
DELETION_FILTER = 1
ADDITIVE_ALGORITHM = 2
class SubRelaxationInfeasible(Exception):
pass
class NonRelaxableModel(Exception):
pass
class ConflictFinder:
"""This class groups some IIS (Irreducible Infeasible Set) search algorithms"""
def __init__(self, model: mip.Model):
if model.status == mip.OptimizationStatus.LOADED:
print("model not runned yet, checking if feasible or not")
model.emphasis = 1 # feasibility
model.preprocess = 1 # -1 automatic, 0 off, 1 on.
model.optimize()
assert (
model.status == mip.OptimizationStatus.INFEASIBLE
), "model is not linear infeasible"
self.model = model
def find_iis(
self, method: IISFinderAlgorithm = IISFinderAlgorithm.DELETION_FILTER,
cbc_verbose: bool = False
) -> mip.ConstrList:
"""main method to find an IIS, this method is just a grouping of the other implementations
Args:
model (mip.Model): Infeasible model where to find the IIS
method (str, optional): name of the method to use ["deletion-filter", "additive_algorithm"]. Defaults to 'deletion-filter".
Returns:
mip.ConstrList: IIS constraint list
"""
# assert ,is not because time limit
with contextlib.nullcontext() if cbc_verbose else ignore_output() as iow:
if method == IISFinderAlgorithm.DELETION_FILTER:
return self.deletion_filter()
if method == IISFinderAlgorithm.ADDITIVE_ALGORITHM:
return self.additive_algorithm()
def deletion_filter(self) -> mip.ConstrList:
"""deletion filter algorithm for search an IIS
Args:
model (mip.Model): Infeasible model
Returns:
mip.ConstrList: IIS
"""
# 1. create a model with all constraints but one
aux_model = self.model.copy()
aux_model.objective = 1
aux_model.emphasis = 1 # feasibility
aux_model.preprocess = 1 # -1 automatic, 0 off, 1 on.
print("starting deletion_filter algorithm")
for inc_crt in self.model.constrs:
aux_model_inc_crt = aux_model.constr_by_name(
inc_crt.name
) # find constraint by name
aux_model.remove(aux_model_inc_crt) # temporally remove inc_crt
aux_model.optimize()
status = aux_model.status
# 2. test feasibility, if feasible, return dropped constraint to the set
# 2.1 else removed it permanently
# print('status {}'.format(status))
if status == mip.OptimizationStatus.INFEASIBLE:
# print("removing permanently {}".format(inc_crt.name))
continue
elif status in [
mip.OptimizationStatus.FEASIBLE,
mip.OptimizationStatus.OPTIMAL,
]:
aux_model.add_constr(
inc_crt.expr, name=inc_crt.name, priority=inc_crt.priority
)
iis = aux_model.constrs
return iis
def additive_algorithm(self) -> mip.ConstrList:
"""Additive algorithm to find an IIS
Returns:
mip.ConstrList: IIS
"""
# Create some aux models to test feasibility of the set of constraints
aux_model_testing = mip.Model()
for var in self.model.vars:
aux_model_testing.add_var(
name=var.name,
lb=var.lb,
ub=var.ub,
var_type=var.var_type,
# obj= var.obj,
# column=var.column #!! libc++abi.dylib: terminating with uncaught exception of type CoinError
)
aux_model_testing.objective = 1
aux_model_testing.emphasis = 1 # feasibility
aux_model_testing.preprocess = 1 # -1 automatic, 0 off, 1 on.
aux_model_iis = (
aux_model_testing.copy()
) # a second aux model to test feasibility of the incumbent iis
# algorithm start
all_constraints = self.model.constrs
testing_crt_set = mip.ConstrList(model=aux_model_testing) # T
iis = mip.ConstrList(model=aux_model_iis) # I
while True:
for crt in all_constraints:
testing_crt_set.add(crt.expr, name=crt.name)
aux_model_testing.constrs = testing_crt_set
aux_model_testing.optimize()
if aux_model_testing.status == mip.OptimizationStatus.INFEASIBLE:
iis.add(crt.expr, name=crt.name)
aux_model_iis.constrs = iis
aux_model_iis.optimize()
if aux_model_iis.status == mip.OptimizationStatus.INFEASIBLE:
return iis
elif aux_model_iis.status in [
mip.OptimizationStatus.FEASIBLE,
mip.OptimizationStatus.OPTIMAL,
]:
testing_crt_set = mip.ConstrList(model=aux_model_testing)
for (
crt
) in (
iis
): # basically this loop is for set T=I // aux_model_iis = iis.copy()
testing_crt_set.add(crt.expr, name=crt.name)
break
def deletion_filter_milp_ir_lc_bd(self) -> mip.ConstrList:
"""Integer deletion filter algorithm (milp_ir_lc_bd)
Raises:
NotImplementedError: [description]
Returns:
mip.ConstrList: [description]
"""
raise NotImplementedError("WIP")
# major constraint sets definition
t_aux_model = mip.Model(name="t_auxiliary_model")
iis_aux_model = mip.Model(name="t_auxiliary_model")
linear_constraints = mip.ConstrList(
model=t_aux_model
) # all the linear model constraints
variable_bound_constraints = mip.ConstrList(
model=t_aux_model
) # all the linear model constrants related specifically for the variable bounds
integer_varlist_crt = mip.VarList(
model=t_aux_model
) # the nature vars constraints for vartype in Integer/Binary
# fill the above sets with the constraints
for crt in self.model.constrs:
linear_constraints.add(crt.expr, name=crt.name)
for var in self.model.vars:
if var.lb != -mip.INF:
variable_bound_constraints.add(
var >= var.lb, name="{}_lb_crt".format(var.name)
)
if var.ub != mip.INF:
variable_bound_constraints.add(
var <= var.ub, name="{}_ub_crt".format(var.name)
)
for var in self.model.vars:
if var.var_type in (mip.INTEGER, mip.BINARY):
integer_varlist_crt.add(var)
status = "IIS"
# add all LC,BD to the incumbent, T= LC + BD
for (
var
) in (
self.model.vars
): # add all variables as if they where CONTINUOUS and without bonds (because this will be separated)
iis_aux_model.add_var(
name=var.name, lb=-mip.INF, ub=mip.INF, var_type=mip.CONTINUOUS
)
for crt in linear_constraints + variable_bound_constraints:
iis_aux_model.add_constr(crt.expr, name=crt.name, priority=crt.priority)
iis_aux_model.optimize()
if iis_aux_model.status == mip.OptimizationStatus.INFEASIBLE:
# if infeasible means that this is a particular version of an LP
return self.deletion_filter() # (STEP 2)
# add all the integer constraints to the model
iis_aux_model.vars.remove(
[var for var in integer_varlist_crt]
) # remove all integer variables
for var in integer_varlist_crt:
iis_aux_model.add_var(
name=var.name,
lb=-mip.INF,
ub=mip.INF,
var_type=var.var_type, # this will add the var with his original type
)
# filter IR constraints that create infeasibility (STEP 1)
for var in integer_varlist_crt:
iis_aux_model.vars.remove(iis_aux_model.var_by_name(var.name))
iis_aux_model.add_var(
name=var.name,
lb=-mip.INF,
ub=mip.INF,
var_type=mip.CONTINUOUS, # relax the integer constraint over var
)
iis_aux_model.optimize()
# if infeasible then update incumbent T = T-{ir_var_crt}
# else continue
# STEP 2 filter lc constraints
# STEP 3 filter BD constraints
# return IS o IIS
def deletion_filter_milp_lc_ir_bd(self) -> mip.ConstrList:
raise NotImplementedError # TODO
class ConflictRelaxer:
def __init__(self, model: mip.Model):
if model.status == mip.OptimizationStatus.LOADED:
print("model not runned yet, checking if feasible or not")
model.emphasis = 1 # feasibility
model.preprocess = 1 # -1 automatic, 0 off, 1 on.
model.optimize()
assert (
model.status == mip.OptimizationStatus.INFEASIBLE
), "model is not linear infeasible"
self.model = model
self.iis_num_iterations = 0
self.iis_iterations = []
self.relax_slack_iterations = []
@property
def slack_by_crt(self) -> dict:
answ = {}
for slack_dict_iter in self.relax_slack_iterations:
for crt_name in slack_dict_iter.keys():
if crt_name in answ.keys():
answ[crt_name] += slack_dict_iter[crt_name]
else:
answ[crt_name] = slack_dict_iter[crt_name]
return answ
def hierarchy_relaxer(
self,
relaxer_objective: str = "min_abs_slack_val",
default_priority: mip.constants.ConstraintPriority = mip.constants.ConstraintPriority.MANDATORY,
cbc_verbose: bool = False
) -> mip.Model:
"""hierarchy relaxer algorithm, it's gonna find a IIS and then relax it using the objective function defined (`relaxer_objective`) and then update the model
with the relaxed constraints. This process runs until there's not more IIS on the model.
Args:
relaxer_objective (str, optional): objective function of the relaxer model (IIS relaxer model). Defaults to 'min_abs_slack_val'.
default_priority (ConstraintPriority, optional): If a constraint does not have a supported substring priority in the name, it will assign a default priority.
Defaults to ConstraintPriority.MANDATORY.
Raises:
NonRelaxableModel: [description]
Returns:
mip.Model: relaxed model
"""
relaxed_model = self.model.copy()
relaxed_model._status = self.model._status # TODO solve this in a different way
# map unmaped constraitns to default
for crt in relaxed_model.constrs:
if not crt.priority:
crt.priority = default_priority
iis_it = 0
iis_dict = {}
taboo_list_iis = []
cf = ConflictFinder(relaxed_model)
while True:
# 1. find iis
iis = cf.find_iis(IISFinderAlgorithm.DELETION_FILTER)
self.iis_iterations.append([crt.name for crt in iis]) # track iteration
self.iis_num_iterations += 1 # track iteration
iis_priority_set = set([crt.priority for crt in iis])
# check if "relaxable" model mapping
if iis_priority_set == set([mip.constants.ConstraintPriority.MANDATORY]):
raise NonRelaxableModel("Infeasible model, is not possible to relax MANDATORY constraints")
# 2. relax iis
with contextlib.nullcontext() if cbc_verbose else ignore_output() as iow:
for level, relaxing_level in enumerate(sorted(iis_priority_set, key=lambda x: x.value)):
# highest case (raise exception)
if relaxing_level == mip.constants.ConstraintPriority.MANDATORY:
raise NonRelaxableModel("Infeasible model, is not possible to relax MANDATORY constraints")
try:
slack_dict = self.relax_iis(iis, relaxer_objective=relaxer_objective, lowest_priority=relaxing_level)
except SubRelaxationInfeasible as e:
warnings.warn(f'Warning relaxing more than one level, currently on l{level} : {relaxing_level}')
continue
else:
# relaxable iis, this is will continue with the next iteration then
break
self.relax_slack_iterations.append(slack_dict)
# 3. add the slack variables to the original problem
with contextlib.nullcontext() if cbc_verbose else ignore_output() as iow:
relaxed_model = self.relax_constraints(relaxed_model, slack_dict)
# 4. check if feasible
relaxed_model.emphasis = 1 # feasibility
with contextlib.nullcontext() if cbc_verbose else ignore_output() as iow:
relaxed_model.optimize()
if relaxed_model.status in [
mip.OptimizationStatus.FEASIBLE,
mip.OptimizationStatus.OPTIMAL,
]:
print("finished relaxation process !")
break
else:
print(
"relaxed the current IIS, still infeasible, searching for a new IIS to relax"
)
print("relaxed constraints {0}".format(list(slack_dict.keys())))
iis_it += 1
# print(f'found iis_{iis_it} = {[crt.name for crt in iis]}')
iis_dict[iis_it] = {}
iis_crt = [crt.name for crt in iis]
iis_dict[iis_it]['iis'] = [{'name': crt.name, 'priority': str(crt.priority).split('.')[1]} for crt in iis]
print(f'found iis_{iis_it} : len = {len(iis_crt)} in_taboo = {(iis_crt in taboo_list_iis)}')
taboo_list_iis.append(iis_crt)
iis_dict[iis_it]['slack'] = slack_dict
return relaxed_model
@classmethod
def relax_iis(
cls, iis: mip.ConstrList, relaxer_objective: str = "min_abs_slack_val", lowest_priority: 'mip.constants.ConstraintPriority' = None
) -> dict:
"""This function is the sub module that finds the optimum relaxation for an IIS, given a crt priority mapping and a objective function
Args:
iis (mip.ConstrList): | |
self.vm4_macvlan_ip, int(vm1_vrf_id))
output = self.inputs.run_cmd_on_server(
vm1_node_ip, stitched_mac_cmd).split("(")[0]
assert not output, "Stitched mac address is present."
cmd = ['ip link delete macvlan1']
self.vm4_fixture.run_cmd_on_vm(cmd, as_sudo=True)
# end test_health_check_failure
@preposttest_wrapper
def test_delete_vlan_intf(self):
'''
Description: Verify that routes corresponding to MAC-IP pairs learnt on VM interface goes down when VLAN sub intf is deleted
Test steps:
1. Create macvlan intf on vlan intf on vm1 and vm4. Intf subnet is diff.
2. Create vlan vmi on vm1 and vm4 respectively
3. Delete vlan intf on vm1 and vm4.
Pass criteria:
1. Ping from vm to macvlan intf should not go
2. MAC route should be deleted in evpn table
3. Derived bridge route with peer as EVPN is deleted for MAC2
4. POD IP is deleted from inet table in agent and vrouter
Maintainer : <EMAIL>
'''
vn2_gw_ip = self.vn2_fixture.get_subnets()[1]['gateway_ip']
vm1_vlan_ip = ":".join(vn2_gw_ip.split(
':')[:-1]) + ":" + str(int(vn2_gw_ip.split(':')[-1]) + 5) + \
'/128'
vm1_vlan_macvlan_ip = ":".join(vn2_gw_ip.split(
':')[:-1]) + ":" + str(int(vn2_gw_ip.split(':')[-1]) + 6) + \
'/128'
vm4_vlan_ip = ":".join(vn2_gw_ip.split(
':')[:-1]) + ":" + str(int(vn2_gw_ip.split(':')[-1]) + 7) + \
'/128'
vm4_vlan_macvlan_ip = ":".join(vn2_gw_ip.split(
':')[:-1]) + ":" + str(int(vn2_gw_ip.split(':')[-1]) + 8) + \
'/128'
cmds_vm1 = ['ip link add link eth0 name eth0.100 type vlan id 100',
'ip link set dev eth0.100 up',
'ip -6 addr add %s dev eth0.100 scope global' % (
vm1_vlan_ip.split('/')[0] + "/64"),
'ip link add macvlan1 link eth0.100 type macvlan',
'ip link set dev macvlan1 up',
'ip -6 addr add %s dev macvlan1 scope global' % (
vm1_vlan_macvlan_ip.split('/')[0] + "/64"),
'ifup --force eth0']
cmds_vm4 = ['ip link add link eth0 name eth0.100 type vlan id 100',
'ip link set dev eth0.100 up',
'ip -6 addr add %s dev eth0.100 scope global' % (
vm4_vlan_ip.split('/')[0] + "/64"),
'ip link add macvlan1 link eth0.100 type macvlan',
'ip link set dev macvlan1 up',
'ip -6 addr add %s dev macvlan1 scope global' % (
vm4_vlan_macvlan_ip.split('/')[0] + "/64"),
'ifup --force eth0']
self.vm1_fixture.run_cmd_on_vm(cmds_vm1, as_sudo=True)
self.vm4_fixture.run_cmd_on_vm(cmds_vm4, as_sudo=True)
mac_cmd = ['ifconfig macvlan1 | grep HWaddr | awk \'{ print $5 }\'']
vm1_macvlan_mac_addr = list(
self.vm1_fixture.run_cmd_on_vm(mac_cmd).values())[0]
vm4_macvlan_mac_addr = list(
self.vm4_fixture.run_cmd_on_vm(mac_cmd).values())[0]
mac_cmd = ['ifconfig eth0.100 | grep HWaddr | awk \'{ print $5 }\'']
vm1_vlan_mac_addr = list(
self.vm1_fixture.run_cmd_on_vm(mac_cmd).values())[0]
vm4_vlan_mac_addr = list(
self.vm4_fixture.run_cmd_on_vm(mac_cmd).values())[0]
parent_vmi_vm1 = self.vnc_lib.virtual_machine_interface_read(
id=self.vm1_fixture.get_vmi_id(self.vn1_fixture.vn_fq_name))
parent_vmi_vm4 = self.vnc_lib.virtual_machine_interface_read(
id=self.vm4_fixture.get_vmi_id(self.vn1_fixture.vn_fq_name))
self.setup_vmi(self.vn2_fixture.uuid,
parent_vmi=parent_vmi_vm1,
api_type="contrail",
project_obj=self.project.project_obj,
vlan_id="100",
mac_address=vm1_vlan_mac_addr,
fixed_ips=[{'subnet_id': self.get_cidr_mask_vmi_id(self.vn2_fixture, ipv6=True)['v6'][2],
'ip_address':vm1_vlan_ip.split('/')[0]}])
self.setup_vmi(self.vn2_fixture.uuid,
parent_vmi=parent_vmi_vm4,
api_type="contrail",
project_obj=self.project.project_obj,
vlan_id="100",
mac_address=vm4_vlan_mac_addr,
fixed_ips=[{'subnet_id': self.get_cidr_mask_vmi_id(self.vn2_fixture, ipv6=True)['v6'][2],
'ip_address':vm4_vlan_ip.split('/')[0]}])
delete_vlan_cmd = ['ip link delete eth0.100']
self.vm1_fixture.run_cmd_on_vm(delete_vlan_cmd, as_sudo=True)
self.vm4_fixture.run_cmd_on_vm(delete_vlan_cmd, as_sudo=True)
time.sleep(60)
assert not self.vm1_fixture.ping_to_ip(vm4_vlan_macvlan_ip.split('/')[0])
assert not self.vm4_fixture.ping_to_ip(vm1_vlan_macvlan_ip.split('/')[0])
# from vm1 to mac4 intf
assert not self.vm1_fixture.ping_to_ip(
vm4_vlan_macvlan_ip.split('/')[0])
# checking evpn table
vm1_node_ip = self.vm1_fixture.vm_node_ip
vm1_vrf_id = self.get_vrf_id(self.vn2_fixture, self.vm1_fixture)
try:
evpn_route = self.agent_inspect[vm1_node_ip].get_vna_evpn_route(
vm1_vrf_id,
vxlanid=self.vn1_vxlan_id,
mac=vm4_macvlan_mac_addr,
ip=vm4_vlan_macvlan_ip)['mac']
except TypeError:
evpn_route = None
assert not evpn_route, "Mac route for macvlan4 is present in EVPN table. "
# 0 ip should also be deleted
try:
evpn_route = self.agent_inspect[vm1_node_ip].get_vna_evpn_route(
vm1_vrf_id,
vxlanid=self.vn1_vxlan_id,
mac=vm4_macvlan_mac_addr,
ip="0.0.0.0/32")['mac']
except TypeError:
evpn_route = None
assert not evpn_route, "Mac route for macvlan4 is present in EVPN table. "
# checking bridge table
try:
peer = self.agent_inspect[vm1_node_ip].get_vna_layer2_route(
vm1_vrf_id, mac=vm4_macvlan_mac_addr)['routes'][0]['path_list'][0]['peer']
except TypeError:
peer = None
assert not peer, "MAC2 bridge route is present"
# checking inet table for vm1 pod ip
inspect_h = self.agent_inspect[vm1_node_ip]
route = inspect_h.get_vna_route(
vrf_id=vm1_vrf_id,
ip=vm4_vlan_macvlan_ip.split("/")[0])
assert not route, ('Route seen in vrouter for %s' %
(vm4_vlan_macvlan_ip.split("/")[0]))
# checking Vrouter inet table in vm1 for vm4_vlan_macvlan_ip
# checking route in vrouter got deleted
route_ppl_cmd = 'contrail-tools rt --dump %d --family inet6 | grep %s | awk \'{print $2}\'' % (
int(vm1_vrf_id), vm4_vlan_macvlan_ip.split('/')[0])
output = self.inputs.run_cmd_on_server(vm1_node_ip, route_ppl_cmd)
assert output != "128", "Route not deleted in vrouter inet table."
return True
# end test_delete_vlan_intf
@preposttest_wrapper
def test_move_ip_locally_l2l3(self):
'''
Description: verify that when IP is moved locally, routes get updated correctly. VN forwarding mode is L2/L3.
Test steps:
1. Create vm - vm5 with same vn and compute as vm1 and vm4
2. launch pod1 with MAC1/IP1 in vm1
3. bring down pod1 and launch pod2 with MAC2/IP1 in vm4
Pass criteria:
1. verify routes corresponding to MAC1/IP1 pair when pod1 is launched
2. ping from vm5 to pod1 should go through fine when pod1 is launched
3. After pod1 is deleted: MAC1/IP1 is deleted from vm5 evpn table
Derived bridge route is deleted from vm5
4. After pod2 is launched: MAC2/IP1 is added in vm5 evpn table
Derived bridge route is added in vm5
inet route is updated
stitched mac addr is updated with MAC2
inet route mpls label and nh is changed
5. On vrouter: Ip is updated in vm5 inet table
Maintainer : <EMAIL>
'''
vm5_name = get_random_name('vm5')
vm5_fixture = self.create_vm(
vn_fixture=self.vn1_fixture,
image_name='ubuntu',
vm_name=vm5_name,
node_name=self.node1)
assert vm5_fixture.wait_till_vm_is_up()
cmds_vm1 = ['ip link add macvlan1 link eth0 type macvlan',
'ip link set dev macvlan1 up',
'ip -6 addr add %s dev macvlan1 scope global' % (self.vm1_macvlan_ip.split('/')[0] + "/64"),
'ifup --force eth0']
self.vm1_fixture.run_cmd_on_vm(cmds_vm1, as_sudo=True)
mac_cmd = ['ifconfig macvlan1 | grep HWaddr | awk \'{ print $5 }\'']
vm1_macvlan_mac_addr = list(
self.vm1_fixture.run_cmd_on_vm(mac_cmd).values())[0]
# from vm5 to mac1 intf
assert vm5_fixture.ping_to_ip(self.vm1_macvlan_ip.split('/')[0])
# checking evpn table
vm5_node_ip = vm5_fixture.vm_node_ip
vm5_vrf_id = self.get_vrf_id(self.vn1_fixture, vm5_fixture)
evpn_route = self.agent_inspect[vm5_node_ip].get_vna_evpn_route(
vm5_vrf_id,
vxlanid=self.vn1_vxlan_id,
mac=vm1_macvlan_mac_addr,
ip=self.vm1_macvlan_ip)['mac']
assert evpn_route == str(self.vn1_vxlan_id) + "-" + vm1_macvlan_mac_addr + \
"-" + self.vm1_macvlan_ip, "Mac route for macvlan1 is absent in EVPN table. "
# checking bridge table
peer = self.agent_inspect[vm5_node_ip].get_vna_layer2_route(
vm5_vrf_id, mac=vm1_macvlan_mac_addr)['routes'][0]['path_list'][0]['peer']
assert peer == "EVPN", "Peer is not EVPN."
# checking if macvlan_ip1 is present in vm5 inet table
inspect_h = self.agent_inspect[vm5_node_ip]
route = inspect_h.get_vna_route(
vrf_id=vm5_vrf_id,
ip=self.vm1_macvlan_ip.split("/")[0])
assert route, ('No route seen in inet table for %s' %
(self.vm1_macvlan_ip.split("/")[0]))
vm5_mpls_label = route['routes'][0]['path_list'][0]['label']
vm5_inet_nh_id = route['routes'][0]['path_list'][0]['nh']['nh_index']
# checking if macvlan_ip1 is present in vm5 vrouter inet table
route = inspect_h.get_vrouter_route_table(vm5_vrf_id,
prefix=self.vm1_macvlan_ip.split('/')[0],
prefix_len='128',
get_nh_details=True,
v6=True)
assert route, ('No route seen in vrouter for %s' %
(self.vm1_macvlan_ip))
# checking stitched MAC addr
stitched_mac_cmd = 'contrail-tools rt --get %s --vrf %d --family inet6 | awk \'{print $6}\'| grep \':\'' % (
self.vm1_macvlan_ip, int(vm5_vrf_id))
output = self.inputs.run_cmd_on_server(
vm5_node_ip, stitched_mac_cmd).split("(")[0]
assert EUI(output, dialect=mac_unix_expanded) == EUI(
vm1_macvlan_mac_addr, dialect=mac_unix_expanded), "Stitched mac address is invalid."
cmds_vm4 = ['ip link add macvlan1 link eth0 type macvlan',
'ip link set dev macvlan1 up',
'ip -6 addr add %s dev macvlan1 scope global' % (self.vm1_macvlan_ip.split('/')[0] + "/64"),
'ifup --force eth0']
self.vm4_fixture.run_cmd_on_vm(cmds_vm4, as_sudo=True)
mac_cmd = ['ifconfig macvlan1 | grep HWaddr | awk \'{ print $5 }\'']
vm4_macvlan_mac_addr = list(
self.vm4_fixture.run_cmd_on_vm(mac_cmd).values())[0]
cmd = ['ip link set dev macvlan1 down']
self.vm1_fixture.run_cmd_on_vm(cmd, as_sudo=True)
assert self.vm4_fixture.ping_to_ip(self.vn1_fixture.get_subnets()[1]['gateway_ip'] , intf="macvlan1")
time.sleep(60)
# from vm5 to mac4 intf
# sometimes there is little loss in packets observed while pinging, retrying to ensure pod is reachable
ping_to_macvlan = False
for i in range(0,4):
if (vm5_fixture.ping_to_ip(self.vm1_macvlan_ip.split('/')[0])):
ping_to_macvlan = True
break
self.logger.warn("Retrying ping")
assert ping_to_macvlan, "Ping to macvlan failed."
# checking evpn table
evpn_route = self.agent_inspect[vm5_node_ip].get_vna_evpn_route(
vm5_vrf_id,
vxlanid=self.vn1_vxlan_id,
mac=vm4_macvlan_mac_addr,
ip=self.vm1_macvlan_ip)['mac']
assert evpn_route == str(self.vn1_vxlan_id) + "-" + vm4_macvlan_mac_addr + \
"-" + self.vm1_macvlan_ip, "Mac route for macvlan1 is absent in EVPN table. "
# checking if MAC got deleted from vm4 evpn table
try:
evpn_route = self.agent_inspect[vm5_node_ip].get_vna_evpn_route(
vm5_vrf_id,
vxlanid=self.vn1_vxlan_id,
mac=vm1_macvlan_mac_addr,
ip=self.vm1_macvlan_ip)['mac']
except TypeError:
evpn_route = None
assert not evpn_route, "Mac route present in EVPN table. "
# checking bridge table
peer = self.agent_inspect[vm5_node_ip].get_vna_layer2_route(
vm5_vrf_id, mac=vm4_macvlan_mac_addr)['routes'][0]['path_list'][0]['peer']
assert peer == "EVPN", "Peer is not EVPN."
# checking if MAC is deleted in vm4 bridge table
try:
peer = self.agent_inspect[vm5_node_ip].get_vna_layer2_route(
vm5_vrf_id, mac=vm1_macvlan_mac_addr)['routes'][0]['path_list'][0]['peer']
except TypeError:
peer = None
assert not peer, "MAC1 bridge route is present"
# checking if macvlan4 route is there in inet table for vm1
route = inspect_h.get_vna_route(
vrf_id=vm5_vrf_id,
ip=self.vm1_macvlan_ip.split("/")[0])
assert route, ('No route seen in inet table for %s' %
(self.vm1_macvlan_ip.split("/")[0]))
assert vm5_mpls_label != route['routes'][0]['path_list'][0]['label'], "Mpls label has not changed."
assert vm5_inet_nh_id != route['routes'][0]['path_list'][0]['nh']['nh_index'], "Nh has not changed."
# checking if macvlan4 route is present in vm5 Vrouter inet table
route = inspect_h.get_vrouter_route_table(vm5_vrf_id,
prefix=self.vm1_macvlan_ip.split('/')[0],
prefix_len='128',
get_nh_details=True,
v6=True)
assert route, ('No route seen in vrouter for %s' %
(self.vm1_macvlan_ip))
| |
#publishBirth()
elif metric.name == "output/Device Metric4" or metric.alias == AliasMap.Device_Metric4:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue = metric.string_value
print ("CMD message for output/Device Metric4 - New Value: {}".format(newValue))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Metric4, MetricDataType.String, newValue)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
#publishBirth()
elif metric.name == "output/Device Metric3" or metric.alias == AliasMap.Device_Metric3:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Boolean because of how we declated it in the DBIRTH
newValue = metric.boolean_value
print ("CMD message for output/Device Metric3 - New Value: %r" % newValue)
# Create the DDATA payload - use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Metric3, MetricDataType.Boolean, newValue)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
else:
print ("Unknown command: " + metric.name)
else:
print ("Unknown command...")
print ("Done publishing")
#####################################################################
######################################################################
######################################################################
# Publish the BIRTH certificates
######################################################################
def publishBirth():
publishNodeBirth()
publishDeviceBirth()
######################################################################
######################################################################
# Publish the NBIRTH certificate
######################################################################
def publishNodeBirth():
print ("Publishing Node Birth")
# Create the node birth payload
payload = sparkplug.getNodeBirthPayload()
# Set up the Node Controls
addMetric(payload, "Node Control/Next Server", AliasMap.Next_Server, MetricDataType.Boolean, False)
addMetric(payload, "Node Control/Rebirth", AliasMap.Rebirth, MetricDataType.Boolean, False)
addMetric(payload, "Node Control/Reboot", AliasMap.Reboot, MetricDataType.Boolean, False)
# Publish the node birth certificate
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/NBIRTH/" + myNodeName, byteArray, 0, False)
######################################################################
######################################################################
# Publish the DBIRTH certificate
######################################################################
def publishDeviceBirth():
print ("Publishing Device Birth")
# Get the payload
payload = sparkplug.getDeviceBirthPayload()
# Add some device metrics
addMetric(payload, "input/Frame Number", AliasMap.Device_frame_numberx, MetricDataType.Int16, frame_numberx )
addMetric(payload, "input/Device Metric0", AliasMap.Device_Metric0, MetricDataType.String, "hello device")
addMetric(payload, "input/Device Metric1", AliasMap.Device_Metric1, MetricDataType.Boolean, True)
addMetric(payload, "input/Number of Objects", AliasMap.Device_num_rectsx, MetricDataType.Int16, num_rectsx )
addMetric(payload, "output/Device Metric2", AliasMap.Device_Metric2, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input1", AliasMap.Device_Input1, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input2", AliasMap.Device_Input2, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input3", AliasMap.Device_Input3, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input4", AliasMap.Device_Input4, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input5", AliasMap.Device_Input5, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input6", AliasMap.Device_Input6, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input7", AliasMap.Device_Input7, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input8", AliasMap.Device_Input8, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input9", AliasMap.Device_Input9, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input10", AliasMap.Device_Input10, MetricDataType.Int16, 0)
addMetric(payload,"input/Device Output1", AliasMap.Device_Output1, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output2", AliasMap.Device_Output2, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output3", AliasMap.Device_Output3, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output4", AliasMap.Device_Output4, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output5", AliasMap.Device_Output5, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output6", AliasMap.Device_Output6, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output7", AliasMap.Device_Output7, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output8", AliasMap.Device_Output8, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output9", AliasMap.Device_Output9, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output10", AliasMap.Device_Output10, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Metric3", AliasMap.Device_Metric3, MetricDataType.Boolean, True)
addMetric(payload, "output/Device Metric4", AliasMap.Device_Metric4, MetricDataType.String, "start")
# Publish the initial data with the Device BIRTH certificate
totalByteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DBIRTH/" + myNodeName + "/" + myDeviceName, totalByteArray, 0, False)
######################################################################
######################################################################
def osd_sink_pad_buffer_probe(pad,info,u_data):
global frame_numberx
global num_rectsx
global Object1
global Object2
global Object3
global Object4
global Object5
global Object6
global Object7
global Object8
global Object9
global Object10
#Intiallizing object counter with 0.
obj_counter = {
PGIE_CLASS_ID_TOOTHBRUSH:0,
PGIE_CLASS_ID_HAIR_DRYER:0,
PGIE_CLASS_ID_TEDDY_BEAR:0,
PGIE_CLASS_ID_SCISSORS:0,
PGIE_CLASS_ID_VASE:0,
PGIE_CLASS_ID_CLOCK:0,
PGIE_CLASS_ID_BOOK:0,
PGIE_CLASS_ID_REFRIGERATOR:0,
PGIE_CLASS_ID_SINK:0,
PGIE_CLASS_ID_TOASTER:0,
PGIE_CLASS_ID_OVEN:0,
PGIE_CLASS_ID_MICROWAVE:0,
PGIE_CLASS_ID_CELL_PHONE:0,
PGIE_CLASS_ID_KEYBOARD:0,
PGIE_CLASS_ID_REMOTE:0,
PGIE_CLASS_ID_MOUSE:0,
PGIE_CLASS_ID_LAPTOP:0,
PGIE_CLASS_ID_TVMONITOR:0,
PGIE_CLASS_ID_TOILET:0,
PGIE_CLASS_ID_DININGTABLE:0,
PGIE_CLASS_ID_BED:0,
PGIE_CLASS_ID_POTTEDPLANT:0,
PGIE_CLASS_ID_SOFA:0,
PGIE_CLASS_ID_CHAIR:0,
PGIE_CLASS_ID_CAKE:0,
PGIE_CLASS_ID_DONUT:0,
PGIE_CLASS_ID_PIZZA:0,
PGIE_CLASS_ID_HOT_DOG:0,
PGIE_CLASS_ID_CARROT:0,
PGIE_CLASS_ID_BROCCOLI:0,
PGIE_CLASS_ID_ORANGE:0,
PGIE_CLASS_ID_SANDWICH:0,
PGIE_CLASS_ID_APPLE:0,
PGIE_CLASS_ID_BANANA:0,
PGIE_CLASS_ID_BOWL:0,
PGIE_CLASS_ID_SPOON:0,
PGIE_CLASS_ID_KNIFE:0,
PGIE_CLASS_ID_FORK:0,
PGIE_CLASS_ID_CUP:0,
PGIE_CLASS_ID_WINE_GLASS:0,
PGIE_CLASS_ID_BOTTLE:0,
PGIE_CLASS_ID_TENNIS_RACKET:0,
PGIE_CLASS_ID_SURFBOARD:0,
PGIE_CLASS_ID_SKATEBOARD:0,
PGIE_CLASS_ID_BASEBALL_GLOVE:0,
PGIE_CLASS_ID_BASEBALL_BAT:0,
PGIE_CLASS_ID_KITE:0,
PGIE_CLASS_ID_SPORTS_BALL:0,
PGIE_CLASS_ID_SNOWBOARD:0,
PGIE_CLASS_ID_SKIS:0,
PGIE_CLASS_ID_FRISBEE:0,
PGIE_CLASS_ID_SUITCASE:0,
PGIE_CLASS_ID_TIE:0,
PGIE_CLASS_ID_HANDBAG:0,
PGIE_CLASS_ID_UMBRELLA:0,
PGIE_CLASS_ID_BACKPACK:0,
PGIE_CLASS_ID_GIRAFFE:0,
PGIE_CLASS_ID_ZEBRA:0,
PGIE_CLASS_ID_BEAR:0,
PGIE_CLASS_ID_ELEPHANT:0,
PGIE_CLASS_ID_COW:0,
PGIE_CLASS_ID_SHEEP:0,
PGIE_CLASS_ID_HORSE:0,
PGIE_CLASS_ID_DOG:0,
PGIE_CLASS_ID_CAT:0,
PGIE_CLASS_ID_BIRD:0,
PGIE_CLASS_ID_BENCH:0,
PGIE_CLASS_ID_PARKING_METER:0,
PGIE_CLASS_ID_STOP_SIGN:0,
PGIE_CLASS_ID_FIRE_HYDRANT:0,
PGIE_CLASS_ID_TRAFFIC_LIGHT:0,
PGIE_CLASS_ID_BOAT:0,
PGIE_CLASS_ID_TRUCK:0,
PGIE_CLASS_ID_TRAIN:0,
PGIE_CLASS_ID_BUS:0,
PGIE_CLASS_ID_AEROPLANE:0,
PGIE_CLASS_ID_MOTORBIKE:0,
PGIE_CLASS_ID_VEHICLE:0,
PGIE_CLASS_ID_BICYCLE:0,
PGIE_CLASS_ID_PERSON:0
}
num_rects=0
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting is done by pyds.NvDsFrameMeta.cast()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
frame_number=frame_meta.frame_num
frame_numberx=frame_meta.frame_num
num_rects = frame_meta.num_obj_meta
num_rectsx = frame_meta.num_obj_meta
l_obj=frame_meta.obj_meta_list
while l_obj is not None:
try:
# Casting l_obj.data to pyds.NvDsObjectMeta
obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
break
obj_counter[obj_meta.class_id] += 1
try:
l_obj=l_obj.next
except StopIteration:
break
# Acquiring a display meta object. The memory ownership remains in
# the C code so downstream plugins can still access it. Otherwise
# the garbage collector will claim it when this probe function exits.
display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
display_meta.num_labels = 1
py_nvosd_text_params = display_meta.text_params[0]
# Setting display text to be shown on screen
# Note that the pyds module allocates a buffer for the string, and the
# memory will not be claimed by the garbage collector.
# Reading the display_text field here will return the C address of the
# allocated string. Use pyds.get_string() to get the string content.
py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Bird_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_CUP], obj_counter[PGIE_CLASS_ID_BOTTLE])
Object1 = obj_counter[newValue1]
Object2 = obj_counter[newValue2]
Object3 = obj_counter[newValue3]
Object4 = obj_counter[newValue4]
Object5 = obj_counter[newValue5]
Object6 = obj_counter[newValue6]
Object7 = obj_counter[newValue7]
Object8 = obj_counter[newValue8]
Object9 = obj_counter[newValue9]
Object10 = obj_counter[newValue10]
# Now set the offsets where the string should appear
py_nvosd_text_params.x_offset = 10
py_nvosd_text_params.y_offset = 12
# Font , font-color and font-size
py_nvosd_text_params.font_params.font_name = "Serif"
py_nvosd_text_params.font_params.font_size = 10
# set(red, green, blue, alpha); set to White
py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)
# Text background color
py_nvosd_text_params.set_bg_clr = 1
# set(red, green, blue, alpha); set to Black
py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
# Using pyds.get_string() to get display_text as string
# print(pyds.get_string(py_nvosd_text_params.display_text))
#pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
try:
l_frame=l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
######################################################################
def main(args):
# Check input arguments
if len(args) != 2:
sys.stderr.write("usage: %s <v4l2-device-path>\n" % args[0])
sys.exit(1)
# Standard GStreamer initialization
GObject.threads_init()
Gst.init(None)
# Create gstreamer elements
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
if not pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
# Source element for reading from the file
print("Creating Source \n ")
source = Gst.ElementFactory.make("v4l2src", "usb-cam-source")
if not source:
sys.stderr.write(" Unable to create Source \n")
caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps")
if not caps_v4l2src:
sys.stderr.write(" Unable to create v4l2src capsfilter \n")
print("Creating Video Converter \n")
# Adding videoconvert -> nvvideoconvert as not all
# raw formats are supported by nvvideoconvert;
# Say YUYV is unsupported - which is the common
# raw format for many logi usb cams
# In case we have a camera with raw format supported in
# nvvideoconvert, GStreamer plugins' capability negotiation
# shall be intelligent enough to reduce compute by
# videoconvert doing passthrough (TODO we need to confirm this)
# videoconvert to make sure a superset of raw formats are supported
vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1")
if not vidconvsrc:
sys.stderr.write(" Unable to create videoconvert \n")
# nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API)
nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2")
if not nvvidconvsrc:
sys.stderr.write(" Unable to create Nvvideoconvert \n")
caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
if not caps_vidconvsrc:
sys.stderr.write(" Unable to create capsfilter | |
<gh_stars>1-10
#!/usr/bin/env python
import numpy as np
import pandas as pd
import sys, os
import argparse as ap
import glob
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.gridspec as gridspec
import seaborn as sns
from functools import partial
import itertools as it
import subprocess as sb
from matplotlib import rc
from scipy import stats
from mpl_toolkits.axes_grid1 import make_axes_locatable
matplotlib.rcParams["svg.fonttype"] = "none"
matplotlib.rcParams["font.family"] = "Arial"
matplotlib.rcParams["font.size"] = 14
from scipy import stats as sts
from scipy.stats import rankdata
from scipy.spatial import distance
from scipy.cluster import hierarchy
import statsmodels.api as sm
import statsmodels.formula.api as smf
import pingouin as pg
from sklearn.metrics import pairwise_distances
from sklearn.metrics import roc_auc_score
from metareg import RE_meta_binary, RE_meta, singleStudyEffect
## python oral_introgression.py Oral_Richness ## DISEASES
## python oral_introgression.py Oral_Richness -la age -tm REG -dt datas/age_u_species.tsv
## python oral_introgression.py Oral_Richness -la BMI -tm REG -dt datas/bmi_u_species.tsv
## python oral_introgression.py Oral_Richness -la gender:male:female -tm SEX -dt datas/sex_u_species.tsv
class oral_introgression(object):
def readargs(self, args):
p = ap.ArgumentParser()
add = p.add_argument
add("Oral_Richness", type=str)
add("-dt", "--dataset", type=str, default="nested/the_big_usable.tsv")
add("-st", "--stratify", type=str, default="dataset_name")
add("-la", "--lookat", type=str, default="your_problem:positive:negative")
####add("-i", "--index", type=str, default="Oral_Ri")
add("-os", "--oral_species", type=str, default="cMD3_oral_samples.tsv")
add("-z", "--featid", type=str, default="s__")
add("-ma", "--min_ab", type=float, default=0.01)
add("-mp", "--min_prev", type=float, default=5)
add("-tm", "--type_of_meta", type=str, default="CLS", choices=["CLS", "REG", "SEX"])
return p.parse_args()
def __init__(self, args):
self.args = self.readargs(args)
self.featid = self.args.featid
if self.args.type_of_meta in ["CLS", "SEX"]:
self.condition, self.positive, self.negative = tuple(self.args.lookat.split(":"))
elif self.args.type_of_meta == "REG":
self.condition = self.args.lookat
self.input = pd.read_csv(self.args.dataset, sep="\t", header=0, index_col=0, low_memory=False, engine="c")
self.oral_cMD = self.define_oral_bases(self.args.oral_species)
self.oral_estimate = self.quantify_oral_introgression()
self.mapping_with_diseases = self.map_2_diseases()
def define_oral_bases(self, oral_cmd):
if os.path.isfile(oral_cmd):
return pd.read_csv(oral_cmd, sep="\t", header=0, index_col=0, low_memory=False, engine="c").fillna("NA")
else:
OP = pd.read_csv("March_2021_usable.tsv", sep="\t", header=0, index_col=0, low_memory=False, engine="c").fillna("NA")
OP.loc["days_from_first_collection"] = [(0.0 if (str(c)=="NA") else float(c)) for c in OP.loc["days_from_first_collection"]]
OP = OP.loc[:, ~(OP.loc["days_from_first_collection"]>0.0)]
OP = OP.loc[:, OP.loc["body_site"]=="oralcavity"]
OP = OP.T.drop_duplicates(subset="subjectID", keep="first").T
feats = [i for i in OP.index.tolist() if ((self.args.featid in i) and (np.sum(OP.loc[i].values.astype(float))>0.))]
OP.index.name = "sampleID"
OP.to_csv(oral_cmd, sep="\t", header=True, index=True)
return OP
def quantify_oral_introgression(self):
print("Initializing oral-score estimation...")
feats = [i for i in self.oral_cMD.index.tolist() if (self.featid in i)]
abuns = dict([(ft, np.mean(self.oral_cMD.loc[ft].values.astype(float))) for ft in feats])
prevs = dict([(ft, (np.count_nonzero(self.oral_cMD.loc[ft].values.astype(float))/float(self.oral_cMD.shape[1]))*100.) for ft in feats])
oral_taxa = [ft for ft in feats if ((abuns[ft]>self.args.min_ab) and (prevs[ft]>self.args.min_prev))]
print("The amount of oral taxa you have is ", len(oral_taxa))
with open("Oral_species_March_21.tsv", "w") as Os:
for sp in oral_taxa: Os.write(sp + "\n")
oral_sample = self.oral_cMD.loc[oral_taxa, :]
def entropy(proportions_array):
P = np.array(proportions_array, dtype=np.float64)
log_of_P = [ (np.log(n) if (float(n)!=0.) else 0.0) for n in proportions_array ]
return -1.*(np.sum(P * np.array(log_of_P, dtype=np.float64)))
def ginismp(proportions_array):
return np.sum(proportions_array**2.)
def oral_fraction(tot_proportions_array, or_proportions_array):
return np.count_nonzero(or_proportions_array) / float(np.count_nonzero(tot_proportions_array))
metadata = [i for i in self.input.index.tolist() if (not self.featid in i)]
OSP = [osp for osp in oral_taxa if osp in (self.input.index.tolist())]
oral_estimate = self.input.copy()
oral_richness = dict([(samplename, np.sum(oral_estimate.loc[OSP, samplename].values.astype(float)/100.)) \
for samplename in oral_estimate.columns.tolist()])
oral_entropy = dict([(samplename, entropy(oral_estimate.loc[OSP, samplename].values.astype(float)/100.)) \
for samplename in oral_estimate.columns.tolist()])
oral_ginisimp = dict([(samplename, ginismp(oral_estimate.loc[OSP, samplename].values.astype(float)/100.)) \
for samplename in oral_estimate.columns.tolist()])
oral_fraction_ = dict([(samplename, oral_fraction(\
oral_estimate.loc[[i for i in oral_estimate.index.tolist() if (self.featid in i)], samplename].values.astype(float)/100., \
oral_estimate.loc[OSP, samplename].values.astype(float)/100. \
)) for samplename in oral_estimate.columns.tolist()])
Get = lambda dic,key : dic[key] if (not np.isnan(dic[key])) else 0.0
oral_estimate.loc["Oral_Richness"] = [Get(oral_richness, samplename) for samplename in oral_estimate.columns.tolist()]
oral_estimate.loc["Oral_Entropy"] = [Get(oral_entropy, samplename) for samplename in oral_estimate.columns.tolist()]
oral_estimate.loc["Oral_Gini"] = [Get(oral_ginisimp, samplename) for samplename in oral_estimate.columns.tolist()]
oral_estimate.loc["Oral_Fraction"] = [Get(oral_fraction_, samplename) for samplename in oral_estimate.columns.tolist()]
print("Finished oral-score estimation.")
return oral_estimate
def segregate_datasets(self):
print("Initiating dataset segregation...")
single_datasets = []
for dataset in self.oral_estimate.loc["dataset_name"].unique():
if (not dataset in ["XieH_2016", "HMP_2019_ibdmdb", "NielsenHB_2014"]):
this_estimate = self.oral_estimate.loc[:, self.oral_estimate.loc["dataset_name"]==dataset]
key = dataset
single_datasets += [(key, this_estimate)]
else:
if dataset in ["HMP_2019_ibdmdb", "NielsenHB_2014"]:
for dis,anti_dis in [("UC", "CD"), ("CD", "UC")]:
this_estimate = self.oral_estimate.loc[:, self.oral_estimate.loc["dataset_name"]==dataset]
this_estimate = this_estimate.loc[:, this_estimate.loc["disease_subtype"]!=anti_dis]
key = dataset + "_" + dis
single_datasets += [(key, this_estimate)]
elif dataset == "XieH_2016":
for dis,anti_dis in [("asthma", "migraine"), ("migraine", "asthma")]:
this_estimate = self.oral_estimate.loc[:, self.oral_estimate.loc["dataset_name"]==dataset]
this_estimate = this_estimate.loc[:, this_estimate.loc["study_condition"]!=anti_dis]
key = dataset + "_" + dis
single_datasets += [(key, this_estimate)]
print("Terminated dataset aggregation")
return dict(single_datasets)
def regression(self, index, data, problem):
datat = data.T
#print(datat)
datat.fillna("NA", inplace=True)
datat = datat.loc[datat["age"] != "NA"]
datat = datat.loc[datat["gender"] != "NA"]
datat = datat.loc[datat["BMI"] != "NA"]
datat[index] = datat[index].values.astype(float)
Lens = len(datat)
covariates = {\
"gender": ["age", "BMI"], \
"age": ["BMI", "gender"], \
"BMI": ["age", "gender"], \
"your_problem": ["BMI", "age", "gender"]}
datat["age"] = datat["age"].values.astype(float)
datat["BMI"] = datat["BMI"].values.astype(float)
#datat["gender"] = [(1.0 if(p=="male") else 0.0) for p in datat["gender"].tolist()]
datat = datat[[index, problem] + covariates[problem]]
formula = ("%s ~ 1 + " %index) + " + ".join([problem] + covariates[problem])
md = smf.ols(formula, data=datat)
model_fit = md.fit()
t = model_fit.tvalues.loc[problem]
n = float(len(datat))
r = float(t) / np.sqrt(np.float((t**2.) + (n - 1.)))
Zr = np.arctanh(r) #0.5 * np.log((1. + r) / float/(1. - r))
SEr = 1/np.sqrt(n - 3)
r_lw = Zr - (1.96*SEr)
r_up = Zr + (1.96*SEr)
return np.tanh(r), model_fit.pvalues.loc[problem], n, 0.0, "N"
def std_mean_diff(self, index, data, problem, positive_class, negative_class):
datat = data.T
datat.fillna("NA", inplace=True)
datat = datat.loc[datat["age"] != "NA"]
datat = datat.loc[datat["gender"] != "NA"]
datat = datat.loc[datat["BMI"] != "NA"]
datat[index] = datat[index].values.astype(float)
datat["age"] = datat["age"].values.astype(float)
datat["BMI"] = datat["BMI"].values.astype(float)
covariates = {\
"gender": ["age", "BMI"], \
"age": ["BMI", "gender"], \
"BMI": ["age", "gender"], \
"your_problem": ["BMI", "age", "gender"]}
datat = datat[[index, problem] + covariates[problem]]
formula = ("%s ~ 1 + " %index) + " + ".join([problem] + covariates[problem])
md = smf.ols(formula, data=datat)
model_fit = md.fit()
#print(model_fit.summary())
t = model_fit.tvalues.loc["%s[T.%s]" %(self.condition, self.positive)] * (-1.)
n1 = float(len(datat.loc[(datat[problem]==negative_class)]))
n2 = float(len(datat.loc[(datat[problem]==positive_class)]))
wilco = sts.ranksums(\
datat.loc[datat[problem].isin([positive_class]), index].values.astype(float), \
datat.loc[datat[problem].isin([negative_class]), index].values.astype(float))[1]
wald = model_fit.pvalues.loc["%s[T.%s]" %(self.condition, self.positive)]
d = (t*(n1+n2))/float(np.sqrt(n1*n2)*np.sqrt(n1+n2-2))
SEd = np.sqrt(((n1+n2-1)/float(n1+n2-3)) * ((4./float(n1+n2))*(1+((d**2.)/8.))))
d_lw = d-(1.96*SEd)
d_up = d+(1.96*SEd)
return d, wald, wilco, (n1, n2), 0.0, "N"
def compute_auc(self, cohort_frame, oral_index):
print("Starting AUC computation...", end="")
observed = [(1.0 if c==self.positive else 0.0) for c in cohort_frame.loc[self.condition]]
predicted = cohort_frame.loc[oral_index]
print("Finished.")
return roc_auc_score(observed, predicted)
def wilcoxon(self, cohort_frame, oral_index):
print("Starting Stat Test comput...", end="")
positives = cohort_frame.loc[oral_index, cohort_frame.loc[self.condition]==self.positive]
negatives = cohort_frame.loc[oral_index, cohort_frame.loc[self.condition]==self.negative]
print("Finished.")
return sts.ranksums(positives, negatives)[1]
def score_arrays(self, cohort_frame, oral_index):
positives = cohort_frame.loc[oral_index, cohort_frame.loc[self.condition]==self.positive]
negatives = cohort_frame.loc[oral_index, cohort_frame.loc[self.condition]==self.negative]
return positives,negatives
def map_2_diseases(self):
dats = [\
"XieH_2016_asthma", "XieH_2016_migraine", \
"ZhuF_2020", "JieZ_2017", "QinN_2014", \
"RubelMA_2020", "YeZ_2018", \
"NagySzakalD_2017", "HMP_2019_ibdmdb_UC", \
"NielsenHB_2014_UC", "HMP_2019_ibdmdb_CD", \
"NielsenHB_2014_CD", \
"QinJ_2012", "KarlssonFH_2013", \
"SankaranarayananK_2015", "ZellerG_2014", \
"YuJ_2015", "FengQ_2015", \
"VogtmannE_2016", "ThomasAM_2019_a", \
"ThomasAM_2019_b", "WirbelJ_2018", \
"GuptaA_2019", "HanniganGD_2017", \
"YachidaS_2019"]
conds = [\
"asthma", "migraine", "schizofrenia", "ACVD", "cirrhosis", "STH", \
"BD", "ME/CFS", "UC", "UC", "CD", "CD", "T2D", "T2D", \
"T2D", "CRC", "CRC", "CRC", "CRC", "CRC", "CRC", "CRC", "CRC", "CRC", "CRC"]
mapper = dict([(dt,cn) for dt,cn in zip(dats,conds)])
return mapper
def regr_validation(self, an_oral_index):
dats = self.input.loc[self.args.stratify].unique().tolist()
dat_2_frame = dict([(dt, self.oral_estimate.loc[:, self.oral_estimate.loc[self.args.stratify]==dt]) for dt in dats])
out_metaanalysis_file = an_oral_index + "_METAANALYSIS_" + self.condition
self.meta_analysis(dat_2_frame, an_oral_index, dats, self.condition, None, \
None, out_metaanalysis_file, self.args.type_of_meta)
def sex_validation(self, an_oral_index):
dats = self.input.loc[self.args.stratify].unique().tolist()
dat_2_frame = dict([(dt, self.oral_estimate.loc[:, self.oral_estimate.loc[self.args.stratify]==dt]) for dt in dats])
out_metaanalysis_file = an_oral_index + "_METAANALYSIS_" + self.condition
AUCs = dict([(cohort, self.compute_auc(dat_2_frame[cohort], an_oral_index)) for cohort in dats])
for c in AUCs:
print(c, " ==> ", AUCs[c])
self.meta_analysis(dat_2_frame, an_oral_index, dats, self.condition, self.positive, \
self.negative, out_metaanalysis_file, "CLS")
def main_validation(self, an_oral_index):
segregated = self.segregate_datasets()
non_crc_datasets = [\
"XieH_2016_asthma", "XieH_2016_migraine", \
"ZhuF_2020", "JieZ_2017", "QinN_2014", \
"RubelMA_2020", "YeZ_2018", \
"NagySzakalD_2017", "HMP_2019_ibdmdb_UC", \
"NielsenHB_2014_UC", "HMP_2019_ibdmdb_CD", \
"NielsenHB_2014_CD", \
"QinJ_2012", "KarlssonFH_2013", \
"SankaranarayananK_2015"]
crc_datasets = [\
"ZellerG_2014", "YuJ_2015", "FengQ_2015", \
"VogtmannE_2016", "ThomasAM_2019_a", \
"ThomasAM_2019_b", "WirbelJ_2018", \
"GuptaA_2019", "HanniganGD_2017", \
"YachidaS_2019"]
AUCs = dict([(cohort, self.compute_auc(segregated[cohort], an_oral_index)) \
for cohort in (non_crc_datasets + crc_datasets)])
Pvals = dict([(cohort, self.wilcoxon(segregated[cohort], an_oral_index)) \
for cohort in (non_crc_datasets + crc_datasets)])
out_metaanalysis_file = an_oral_index + "_METAANALYSIS_" + self.condition
self.meta_analysis(segregated, an_oral_index, crc_datasets + non_crc_datasets, self.condition, self.positive, \
self.negative, out_metaanalysis_file, self.args.type_of_meta)
DATS = crc_datasets + non_crc_datasets
score_arrays = dict([(cohort, self.score_arrays(segregated[cohort], an_oral_index)) for cohort in DATS])
self.build_the_main_figure(\
segregated, DATS, crc_datasets, score_arrays, an_oral_index, \
AUCs, Pvals, out_metaanalysis_file, self.args.type_of_meta)
def meta_analysis(self, single_datasets, INDEX, datasets, \
problem, positive_class, negative_class, out_metaanalysis_file, TYPE):
study2lens = {}
singleStudiesClass = []
if TYPE=="CLS":
for dat in datasets:
print("Estimating data %s [ %s ]" %(dat, problem), end=" ==> ")
cohenD, p_value_cor, p_value_crude, Lens, fake_p, ci = \
self.std_mean_diff(INDEX, single_datasets[dat], problem, | |
<filename>src/vehicleDynamics_drifter.py<gh_stars>0
import logging
from commonroad.steeringConstraints import steeringConstraints
from commonroad.accelerationConstraints import accelerationConstraints
from commonroad.vehicleDynamics_KS import vehicleDynamics_KS
from commonroad import vehicleParameters
from commonroad import tireModel
import math
from typing import *
# from .vehicleParameters import VehicleParameters, vehicle_params_type
from src.l2race_utils import my_logger
logger = my_logger(__name__)
logger.setLevel(logging.DEBUG)
KS_SWITCH_SPEED=2.0
import cython
if cython.compiled:
logger.info("check_cython: {} is compiled Cython.".format(__file__))
else:
logger.warning("check_cython: {} is still just a slowly interpreted script.".format(__file__))
from numba import jit, float64, deferred_type
import numba as nb
fa=nb.types.List(nb.float64, reflected=False) # define numba type of list of float
# @jit(fa(fa, fa, vehicle_params_type))
def vehicleDynamics_drifter(x,uInit,p):
# vehicleDynamics_drifter - drifter model from
# Goh, <NAME>., <NAME>, and <NAME>. 2020. “Toward Automated Vehicle Control Beyond the Stability Limits: Drifting Along a General Path.” Journal of Dynamic Systems, Measurement, and Control 142 (2). https://doi.org/10.1115/1.4045320.
#
# Syntax:
# f = vehicleDynamics_MB(t,x,u,p)
#
# Inputs:
# x - vehicle state vector
# u - vehicle input vector
# p - vehicle parameter vector
#
# Outputs:
# f - right-hand side of differential equations
#
# Example:
#
# See also: ---
# Author: from paper, by <NAME>
# Written: 25 July 2020
#------------- BEGIN CODE --------------
# set gravity constant
g = 9.81 #[m/s^2]
#states
#x1 = x-position in a global coordinate system
#x2 = y-position in a global coordinate system
#x3 = steering angle of front wheels
#x4 = speed along body axis
#x5 = yaw angle
#x6 = yaw rate
#x11 = velocity in y-direction
#x12 = z-position
#x13 = velocity in z-direction
#u1 = steering angle velocity of front wheels
#u2 = acceleration
#consider steering constraints
u = [
steeringConstraints(x[2],uInit[0],p.steering),
accelerationConstraints(x[3],uInit[1],p.longitudinal)
]
#compute slip angle at cg
#switch to kinematic model for small velocities
# switch to kinematic model for small velocities
if x[3] < 0 or abs(x[3]) < KS_SWITCH_SPEED: # tobi added for reverse gear and increased to 2m/s to reduce numerical instability at low speed by /speed - hint from matthias
# wheelbase
lwb = p.a + p.b
# system dynamics
x_ks = [x[0], x[1], x[2], x[3], x[4]]
f_ks = vehicleDynamics_KS(x_ks, u, p)
f = [f_ks[0], f_ks[1], f_ks[2], f_ks[3], f_ks[4],
u[1] / lwb * math.tan(x[2]) + x[3] / (lwb * math.cos(x[2]) ** 2) * u[0],
0]
return f
vel = math.sqrt(x[3]**2 + x[10]**2)
#vertical tire forces
F_z_LF = (x[16] + p.R_w*(math.cos(x[13]) - 1) - 0.5*p.T_f*math.sin(x[13]))*p.K_zt
F_z_RF = (x[16] + p.R_w*(math.cos(x[13]) - 1) + 0.5*p.T_f*math.sin(x[13]))*p.K_zt
F_z_LR = (x[21] + p.R_w*(math.cos(x[18]) - 1) - 0.5*p.T_r*math.sin(x[18]))*p.K_zt
F_z_RR = (x[21] + p.R_w*(math.cos(x[18]) - 1) + 0.5*p.T_r*math.sin(x[18]))*p.K_zt
#obtain individual tire speeds
u_w_lf = (x[3] + 0.5*p.T_f*x[5])*math.cos(x[2]) + (x[10] + p.a*x[5])*math.sin(x[2])
u_w_rf = (x[3] - 0.5*p.T_f*x[5])*math.cos(x[2]) + (x[10] + p.a*x[5])*math.sin(x[2])
u_w_lr = x[3] + 0.5*p.T_r*x[5]
u_w_rr = x[3] - 0.5*p.T_r*x[5]
#compute longitudinal slip
#switch to kinematic model for small velocities
if x[3]<0 or abs(x[3]) < 2.0:
s_lf = 0
s_rf = 0
s_lr = 0
s_rr = 0
else:
s_lf = 1 - p.R_w*x[23]/u_w_lf
s_rf = 1 - p.R_w*x[24]/u_w_rf
s_lr = 1 - p.R_w*x[25]/u_w_lr
s_rr = 1 - p.R_w*x[26]/u_w_rr
#lateral slip angles
#switch to kinematic model for small velocities
if abs(x[3]) < KS_SWITCH_SPEED:
alpha_LF = 0
alpha_RF = 0
alpha_LR = 0
alpha_RR = 0
else:
alpha_LF = math.atan((x[10] + p.a*x[5] - x[14]*(p.R_w - x[16]))/(x[3] + 0.5*p.T_f*x[5])) - x[2]
alpha_RF = math.atan((x[10] + p.a*x[5] - x[14]*(p.R_w - x[16]))/(x[3] - 0.5*p.T_f*x[5])) - x[2]
alpha_LR = math.atan((x[10] - p.b*x[5] - x[19]*(p.R_w - x[21]))/(x[3] + 0.5*p.T_r*x[5]))
alpha_RR = math.atan((x[10] - p.b*x[5] - x[19]*(p.R_w - x[21]))/(x[3] - 0.5*p.T_r*x[5]))
#auxiliary suspension movement
z_SLF = (p.h_s - p.R_w + x[16] - x[11])/math.cos(x[6]) - p.h_s + p.R_w + p.a*x[8] + 0.5*(x[6] - x[13])*p.T_f
z_SRF = (p.h_s - p.R_w + x[16] - x[11])/math.cos(x[6]) - p.h_s + p.R_w + p.a*x[8] - 0.5*(x[6] - x[13])*p.T_f
z_SLR = (p.h_s - p.R_w + x[21] - x[11])/math.cos(x[6]) - p.h_s + p.R_w - p.b*x[8] + 0.5*(x[6] - x[18])*p.T_r
z_SRR = (p.h_s - p.R_w + x[21] - x[11])/math.cos(x[6]) - p.h_s + p.R_w - p.b*x[8] - 0.5*(x[6] - x[18])*p.T_r
dz_SLF = x[17] - x[12] + p.a*x[9] + 0.5*(x[7] - x[14])*p.T_f
dz_SRF = x[17] - x[12] + p.a*x[9] - 0.5*(x[7] - x[14])*p.T_f
dz_SLR = x[22] - x[12] - p.b*x[9] + 0.5*(x[7] - x[19])*p.T_r
dz_SRR = x[22] - x[12] - p.b*x[9] - 0.5*(x[7] - x[19])*p.T_r
#camber angles
gamma_LF = x[6] + p.D_f*z_SLF + p.E_f*(z_SLF)**2
gamma_RF = x[6] - p.D_f*z_SRF - p.E_f*(z_SRF)**2
gamma_LR = x[6] + p.D_r*z_SLR + p.E_r*(z_SLR)**2
gamma_RR = x[6] - p.D_r*z_SRR - p.E_r*(z_SRR)**2
#compute longitudinal tire forces using the magic formula for pure slip
F0_x_LF = tireModel.mFormulaLongitudinal(s_lf, gamma_LF, F_z_LF, p.tire)
F0_x_RF = tireModel.mFormulaLongitudinal(s_rf, gamma_RF, F_z_RF, p.tire)
F0_x_LR = tireModel.mFormulaLongitudinal(s_lr, gamma_LR, F_z_LR, p.tire)
F0_x_RR = tireModel.mFormulaLongitudinal(s_rr, gamma_RR, F_z_RR, p.tire)
#compute lateral tire forces using the magic formula for pure slip
res = tireModel.mFormulaLateral(alpha_LF, gamma_LF, F_z_LF, p.tire)
F0_y_LF = res[0]
mu_y_LF = res[1]
res = tireModel.mFormulaLateral(alpha_RF, gamma_RF, F_z_RF, p.tire)
F0_y_RF = res[0]
mu_y_RF = res[1]
res = tireModel.mFormulaLateral(alpha_LR, gamma_LR, F_z_LR, p.tire)
F0_y_LR = res[0]
mu_y_LR = res[1]
res = tireModel.mFormulaLateral(alpha_RR, gamma_RR, F_z_RR, p.tire)
F0_y_RR = res[0]
mu_y_RR = res[1]
#compute longitudinal tire forces using the magic formula for combined slip
F_x_LF = tireModel.mFormulaLongitudinalComb(s_lf, alpha_LF, F0_x_LF, p.tire)
F_x_RF = tireModel.mFormulaLongitudinalComb(s_rf, alpha_RF, F0_x_RF, p.tire)
F_x_LR = tireModel.mFormulaLongitudinalComb(s_lr, alpha_LR, F0_x_LR, p.tire)
F_x_RR = tireModel.mFormulaLongitudinalComb(s_rr, alpha_RR, F0_x_RR, p.tire)
#compute lateral tire forces using the magic formula for combined slip
F_y_LF = tireModel.mFormulaLateralComb(s_lf, alpha_LF, gamma_LF, mu_y_LF, F_z_LF, F0_y_LF, p.tire)
F_y_RF = tireModel.mFormulaLateralComb(s_rf, alpha_RF, gamma_RF, mu_y_RF, F_z_RF, F0_y_RF, p.tire)
F_y_LR = tireModel.mFormulaLateralComb(s_lr, alpha_LR, gamma_LR, mu_y_LR, F_z_LR, F0_y_LR, p.tire)
F_y_RR = tireModel.mFormulaLateralComb(s_rr, alpha_RR, gamma_RR, mu_y_RR, F_z_RR, F0_y_RR, p.tire)
#auxiliary movements for compliant joint equations
delta_z_f = p.h_s - p.R_w + x[16] - x[11]
delta_z_r = p.h_s - p.R_w + x[21] - x[11]
delta_phi_f = x[6] - x[13]
delta_phi_r = x[6] - x[18]
dot_delta_phi_f = x[7] - x[14]
dot_delta_phi_r = x[7] - x[19]
dot_delta_z_f = x[17] - x[12]
dot_delta_z_r = x[22] - x[12]
dot_delta_y_f = x[10] + p.a*x[5] - x[15]
dot_delta_y_r = x[10] - p.b*x[5] - x[20]
delta_f = delta_z_f*math.sin(x[6]) - x[27]*math.cos(x[6]) - (p.h_raf - p.R_w)*math.sin(delta_phi_f)
delta_r = delta_z_r*math.sin(x[6]) - x[28]*math.cos(x[6]) - (p.h_rar - p.R_w)*math.sin(delta_phi_r)
dot_delta_f = (delta_z_f*math.cos(x[6]) + x[27]*math.sin(x[6]))*x[7] + dot_delta_z_f*math.sin(x[6]) - dot_delta_y_f*math.cos(x[6]) - (p.h_raf - p.R_w)*math.cos(delta_phi_f)*dot_delta_phi_f
dot_delta_r = (delta_z_r*math.cos(x[6]) + x[28]*math.sin(x[6]))*x[7] + dot_delta_z_r*math.sin(x[6]) - dot_delta_y_r*math.cos(x[6]) - (p.h_rar - p.R_w)*math.cos(delta_phi_r)*dot_delta_phi_r
#compliant joint forces
F_RAF = delta_f*p.K_ras + dot_delta_f*p.K_rad
F_RAR = delta_r*p.K_ras + dot_delta_r*p.K_rad
#auxiliary suspension forces (bump stop neglected squat/lift forces neglected)
F_SLF = p.m_s*g*p.b/(2*(p.a+p.b)) - z_SLF*p.K_sf - dz_SLF*p.K_sdf + (x[6] - x[13])*p.K_tsf/p.T_f
F_SRF = p.m_s*g*p.b/(2*(p.a+p.b)) - z_SRF*p.K_sf - dz_SRF*p.K_sdf - (x[6] - x[13])*p.K_tsf/p.T_f
F_SLR = p.m_s*g*p.a/(2*(p.a+p.b)) - z_SLR*p.K_sr - dz_SLR*p.K_sdr + (x[6] - x[18])*p.K_tsr/p.T_r
F_SRR = p.m_s*g*p.a/(2*(p.a+p.b)) - z_SRR*p.K_sr - dz_SRR*p.K_sdr - (x[6] - x[18])*p.K_tsr/p.T_r
#auxiliary variables sprung mass
sumX = F_x_LR + F_x_RR + (F_x_LF + F_x_RF)*math.cos(x[2]) - (F_y_LF + F_y_RF)*math.sin(x[2])
sumN = (F_y_LF + F_y_RF)*p.a*math.cos(x[2]) + (F_x_LF + F_x_RF)*p.a*math.sin(x[2]) \
+ (F_y_RF - F_y_LF)*0.5*p.T_f*math.sin(x[2]) + (F_x_LF - F_x_RF)*0.5*p.T_f*math.cos(x[2]) \
+ (F_x_LR - F_x_RR)*0.5*p.T_r - (F_y_LR + F_y_RR)*p.b
sumY_s = (F_RAF + F_RAR)*math.cos(x[6]) + (F_SLF + F_SLR + F_SRF + F_SRR)*math.sin(x[6])
sumL = 0.5*F_SLF*p.T_f + 0.5*F_SLR*p.T_r - 0.5*F_SRF*p.T_f - 0.5*F_SRR*p.T_r \
- F_RAF/math.cos(x[6])*(p.h_s - x[11] - p.R_w + x[16] - (p.h_raf - p.R_w)*math.cos(x[13])) \
- F_RAR/math.cos(x[6])*(p.h_s - x[11] - p.R_w + x[21] - (p.h_rar - p.R_w)*math.cos(x[18]))
sumZ_s = (F_SLF + F_SLR + F_SRF + F_SRR)*math.cos(x[6]) - (F_RAF + F_RAR)*math.sin(x[6])
sumM_s = p.a*(F_SLF + F_SRF) - p.b*(F_SLR + F_SRR) + ((F_x_LF + F_x_RF)*math.cos(x[2]) \
- (F_y_LF + F_y_RF)*math.sin(x[2]) + F_x_LR + F_x_RR)*(p.h_s - x[11])
#auxiliary variables unsprung mass
sumL_uf = 0.5*F_SRF*p.T_f - 0.5*F_SLF*p.T_f - F_RAF*(p.h_raf - p.R_w) \
+ F_z_LF*(p.R_w*math.sin(x[13]) + 0.5*p.T_f*math.cos(x[13]) - p.K_lt*F_y_LF) \
- F_z_RF*(-p.R_w*math.sin(x[13]) + 0.5*p.T_f*math.cos(x[13]) + | |
2541], [4413, 677], [1966, 3412],
[2108, 796], [1516, 1715], [3760, 2349], [3275, 4385], [2910, 3629], [2176, 2622], [3927, 3298],
[3804, 3377], [3266, 3507], [3812, 1680], [2368, 1154], [3842, 817], [1364, 2415], [1470, 496],
[2620, 587], [3029, 4178], [2791, 4154], [3444, 3490], [4255, 2945], [878, 2019], [4206, 3954],
[2700, 517], [2480, 2032], [3756, 1635], [308, 1017], [3920, 779], [2856, 2570], [1450, 1602],
[4336, 3299], [3654, 738], [150, 2923], [4995, 4621], [642, 3704], [4483, 4976], [1649, 1324],
[1105, 2745], [1288, 572], [4729, 2329], [3493, 1374], [4321, 2418], [4832, 3400], [21, 849], [3509, 3328],
[3975, 4574], [117, 2718], [4199, 2131], [2753, 4630], [1216, 4001], [2230, 1835], [3545, 4241],
[1490, 4210], [3505, 2604], [828, 2343], [1715, 3772], [2835, 1221], [2983, 111], [2209, 2637],
[4647, 3127], [775, 2494], [2439, 3459], [4691, 1640], [3771, 4411], [3659, 1375], [2571, 4186],
[1890, 4680], [92, 4759], [4916, 2427], [4254, 3481], [3177, 2020], [4900, 3150], [2590, 1441],
[2566, 132], [3569, 2465], [2642, 1445], [1314, 999], [2408, 2168], [2586, 4410], [1620, 56], [527, 1610],
[2582, 47], [838, 1887], [988, 2454], [1284, 3347], [663, 1067], [3001, 1365], [3816, 4281], [3723, 494],
[2051, 4633], [4669, 433], [2682, 4881], [485, 3333], [3939, 812], [3098, 768], [3690, 4387], [3078, 4292],
[2538, 45], [1307, 275], [1983, 147], [4551, 4662], [4076, 4929], [1095, 1584], [912, 3754], [3724, 517],
[1067, 4934], [1254, 4014], [1571, 2520], [4623, 3253], [4656, 2715], [3397, 3255], [3865, 8],
[3858, 1616], [2416, 3880], [306, 3725], [2003, 4527], [3205, 2855], [2874, 571], [855, 837], [1230, 4049],
[1521, 1629], [4015, 1527], [2652, 4949], [4287, 2636], [3113, 2920], [1159, 3921], [1350, 2967],
[708, 1998], [2014, 915], [4665, 588], [3216, 1392], [1750, 490], [2446, 3365], [2145, 1681], [3309, 1151],
[18, 338], [1205, 2979], [1137, 1048], [1882, 2288], [4083, 1822], [387, 4725], [1456, 270], [2944, 2228],
[214, 4369], [4106, 4717], [1623, 91], [110, 2644], [3109, 293], [3919, 154], [4435, 3341], [1404, 1779],
[78, 2045], [119, 1519], [2013, 969], [1772, 3524], [3503, 3407], [855, 3346], [1616, 3371], [2031, 3523],
[3748, 2901], [4583, 1162], [3235, 4978], [4202, 1826], [3809, 2030], [1574, 790], [4131, 4835],
[1449, 2409], [4450, 2529], [3240, 141], [647, 4672], [3104, 2372], [2286, 2599], [1506, 89], [3166, 1649],
[2070, 1654], [2499, 3971], [4952, 3096], [3137, 2420], [2103, 1793], [3574, 2], [2268, 1683],
[2902, 1652], [1904, 1910], [3473, 4724], [1881, 1378], [2676, 3632], [2233, 2128], [1902, 937],
[3139, 1014], [4793, 1425], [664, 1789], [2374, 4009], [3825, 834], [971, 2855], [2200, 1982],
[3730, 1140], [48, 1267], [4491, 696], [4011, 1744], [1863, 2263], [2928, 23], [181, 1457], [4500, 3100],
[1273, 2123], [1910, 4284], [288, 3713], [3471, 106], [1821, 989], [3975, 2245], [3667, 4805],
[3100, 3467], [3850, 863], [3021, 4775], [4056, 4652], [1720, 2576], [1400, 4680], [4341, 1277],
[1373, 3777], [3922, 3197], [4844, 916], [2454, 1245], [2048, 3336], [1570, 3445], [2294, 2354],
[1392, 1033], [2630, 1312], [2097, 2996], [3859, 3396], [260, 4548], [4294, 2974], [3174, 1347],
[78, 1787], [3400, 546], [1263, 2284], [3441, 2057], [775, 3174], [1473, 70], [2632, 1386], [2771, 1493],
[1450, 4469], [2048, 1475], [3534, 294], [4133, 3250], [4193, 4877], [1959, 1903], [2179, 446],
[4312, 4003], [4018, 1045], [24, 3830], [1235, 4251], [2378, 3693], [3226, 1407], [341, 1810],
[2045, 1501], [2607, 3791], [2861, 3849], [1143, 4493], [400, 2803], [2225, 1654], [2249, 2650],
[2856, 667], [4313, 2632], [321, 4502], [1, 2327], [4088, 4034], [2443, 4666], [3694, 4877], [3561, 1445],
[925, 2191], [3500, 3912], [1219, 1316], [4720, 2765], [4936, 238], [3801, 480], [2571, 218], [2160, 1974],
[1699, 3179], [862, 1320], [3916, 176], [426, 1434], [429, 4833], [875, 4356], [1540, 3214], [1134, 1114],
[747, 3266], [3602, 3835], [2514, 2017], [2346, 866], [4487, 3038], [4396, 3523], [4006, 2875],
[553, 1917], [4738, 1939], [2433, 297], [1686, 1356], [4638, 1899], [3644, 3503], [3206, 4366],
[3998, 2751], [487, 1511], [1899, 3992], [2085, 2088], [3213, 579], [2765, 4336], [2984, 3454],
[2162, 4985], [1155, 2], [706, 3919], [1420, 1177], [1957, 3051], [1951, 3012], [4562, 622], [2776, 127],
[3035, 3030], [4160, 1199], [2272, 3307], [2814, 3692], [881, 1916], [1653, 3309], [3593, 1154],
[56, 2545], [1636, 1820], [2900, 3517], [3702, 2734], [4452, 1212], [1477, 4815], [3780, 2546],
[2280, 3607], [3140, 1000], [1923, 4098], [1156, 3637], [3802, 1125], [3672, 454], [4616, 3977],
[1053, 4234], [4578, 3587], [3271, 1492], [1576, 3063], [3158, 2541], [1585, 3507], [1571, 3149],
[2109, 4279], [4416, 3872], [2898, 919], [2996, 419], [4896, 2905], [2752, 4922], [3445, 699], [1883, 71],
[4357, 591], [4711, 3178], [416, 3816], [2322, 724], [3119, 4137], [974, 3751], [621, 4989], [1573, 2788],
[2586, 1953], [1538, 2138], [3041, 3294], [4112, 846], [4510, 2242], [3082, 312], [3849, 2154],
[2373, 304], [701, 3895], [1792, 2899], [534, 2720], [3940, 472], [426, 4688], [814, 461], [960, 20],
[1208, 169], [1551, 822], [1500, 2595], [743, 3624], [4522, 329], [3701, 967], [43, 4433], [1680, 3804],
[2666, 3956], [4242, 3045], [4921, 3002], [4971, 1359], [3595, 2474], [3763, 4071], [4258, 1730],
[2912, 24], [2126, 1827], [3952, 3889], [3792, 3690], [2732, 2202], [1017, 972], [2281, 604], [2949, 3437],
[4761, 2656], [888, 3973], [1799, 2642], [2161, 4196], [1506, 1867], [2520, 4827], [2319, 4245],
[2714, 4208], [4169, 2235], [684, 2750], [3052, 724], [3693, 372], [946, 4024], [1252, 1458], [3189, 1103],
[2211, 330], [3563, 4959], [2701, 526], [3710, 1436], [4256, 2982], [2348, 4481], [2942, 3784],
[4207, 4293], [3558, 1641], [3038, 2438], [1812, 4011], [3018, 4539], [2509, 2968], [447, 718],
[1498, 3671], [2205, 3485], [596, 1510], [226, 4885], [515, 3620], [478, 1493], [4368, 1062], [2955, 691],
[71, 1721], [2441, 647], [4753, 4776], [1605, 4683], [3538, 305], [3272, 3718], [294, 2052], [2243, 689],
[1171, 1654], [1323, 2138], [268, 3903], [4930, 3544], [4486, 1196], [4429, 762], [3332, 1513],
[2380, 2642], [4949, 3774], [4443, 2745], [2468, 2793], [1994, 4816], [2012, 3579], [4096, 4377],
[66, 589], [218, 904], [3574, 4230], [2631, 4654]]) # 供应地坐标
coordinates2goods = [] # 供应地物资
data()
truck_coordinates = [[4292, 4798, 1]]
'''for p in range(len(truck_coordinates)): #画出最优路径
drawpicture(p)'''
min_time = min(allmintime) # 最短时间
number = np.argmin(allmintime) # 该次趟数
'''print("\n所有路径需要最短时间为:",min_time)
print("此次趟数是:",number)
print("该次路径:",alllujing[number])'''
l1 = alllujing[int(np.argmin(allmintime))]
lst = []
for el in l1:
if lst.count(el) < 1:
lst.append(el)
'''print(lst)
print(len(lst))'''
class Truck2:
def __init__(self, x, y, volume):
self.init = 0 # 起始出发点
self.drivedistance = 0.0 # 行驶距离
self.goods = 0
self.lujing = []
self.last_lujing = []
self.drawpath = [[x, y]]
self.volume = volume
self.involume = 0 # 当前运载的货量
def __str__(self):
return '出发点{} 总共走了{}距离 已经装运{}'. \
format(self.init, self.drivedistance, self.goods)
coordinates1goods = np.array(
[10, 8, 5, 3, 6, 7, 10, 7, 1, 1, 10, 5, 1, 8, 9, 5, 10, 5, 2, 5, 5, 3, 6, 3, 7, 1, 8, 10, 2, 10, 1, 10, 6, 10,
6, 6, 6, 2, 3, 1, 10, 10, 8, 5, 7, 2, 3, 2, 8, 10, 4, 2, 5, 7, 1, 9, 9, 8, 4, 9, 2, 2, 1, 10, 8, 8, 3, 7, 6,
10, 2, 3, 6, 5, 9, 5, 7, 6, 6, 9, 3, 5, 7, 3, 5, 4, 6, 5, 4, 5, 2, 5, 10, 4, 10, 9, 8, 3, 1, 4, 9, 6, 3, 3, 8,
2, 5, 5, 6, 4, 8, 2, 1, 3, 9, 5, 5, 2, 2, 5, 9, 6, 3, 5, 6, 5, 5, 5, 2, 6, 4, 9, 3, 9, 10, 5, 8, 2, 2, 1, 4, 7,
7, 8, 7, 7, 5, 6, 2, 7, 6, 1, 1, 10, 4, 7, 9, 4, 4, 1, 5, 7, 6, 3, 10, 1, 7, 9, 1, 1, 6, 9, 3, 10, 2, 3, 5, 9,
7, 7, 5, 10, 4, 3, 3, 9, 10, 2, 9, 2, 6, 3, 8, 6, 8, 9, 8, 10, 4, 5, 2, 5, 5, 7, 9, 9, 5, 6, 3, 4, 8, 1, 6, 9,
2, 1, 4, 3, 8, 5, 10, 9, 1, 10, 8, 4, 7, 5, 9, 5, 9, 7, 8, 3, 1, 3, 8, 1, 4, 6, 1, 1, 8, 8, 10, 7, 9, 9, 8, 1,
5, 3, 10, 1, 1, 1, 7, 6, 1, 4, 10, 6, 3, | |
def FloatValue(self, bFloat: 'double') -> "double":
return _gskernel.GsConfig_FloatValue(self, bFloat)
def Append(self, config: 'GsConfig') -> "void":
r""" 将另外的分支添加到这个GsConfig中"""
return _gskernel.GsConfig_Append(self, config)
def Clear(self) -> "void":
r""" 清空所有的子配置"""
return _gskernel.GsConfig_Clear(self)
def Remove(self, childName: 'char const *') -> "void":
r""" 删除一个子配置"""
return _gskernel.GsConfig_Remove(self, childName)
# Register GsConfig in _gskernel:
_gskernel.GsConfig_swigregister(GsConfig)
class GsGlobeConfig(object):
r""" 全局配置类"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
@staticmethod
def Instance() -> "GsConfig &":
r""" 全局的配置类根对象。"""
return _gskernel.GsGlobeConfig_Instance()
@staticmethod
def Save(*args) -> "GsString":
r"""
*Overload 1:*
将全局的配置保存为文件
|
*Overload 2:*
将全局的配置保存为字符串
"""
return _gskernel.GsGlobeConfig_Save(*args)
@staticmethod
def Load(strFileOrXML: 'char const *', bFile: 'bool'=True) -> "bool":
r""" 从xml或者配置中load配置信息"""
return _gskernel.GsGlobeConfig_Load(strFileOrXML, bFile)
def __init__(self):
_gskernel.GsGlobeConfig_swiginit(self, _gskernel.new_GsGlobeConfig())
__swig_destroy__ = _gskernel.delete_GsGlobeConfig
# Register GsGlobeConfig in _gskernel:
_gskernel.GsGlobeConfig_swigregister(GsGlobeConfig)
def GsGlobeConfig_Instance() -> "GsConfig &":
r""" 全局的配置类根对象。"""
return _gskernel.GsGlobeConfig_Instance()
def GsGlobeConfig_Save(*args) -> "GsString":
r"""
*Overload 1:*
将全局的配置保存为文件
|
*Overload 2:*
将全局的配置保存为字符串
"""
return _gskernel.GsGlobeConfig_Save(*args)
def GsGlobeConfig_Load(strFileOrXML: 'char const *', bFile: 'bool'=True) -> "bool":
r""" 从xml或者配置中load配置信息"""
return _gskernel.GsGlobeConfig_Load(strFileOrXML, bFile)
class GsGlobalConfig(object):
r""" 全局配置类"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
@staticmethod
def Instance() -> "GsConfig &":
r""" 全局的配置类根对象。"""
return _gskernel.GsGlobalConfig_Instance()
@staticmethod
def Save(*args) -> "GsString":
r"""
*Overload 1:*
将全局的配置保存为文件
|
*Overload 2:*
将全局的配置保存为字符串
"""
return _gskernel.GsGlobalConfig_Save(*args)
@staticmethod
def Load(strFileOrXML: 'char const *', bFile: 'bool'=True) -> "bool":
r""" 从xml或者配置中load配置信息"""
return _gskernel.GsGlobalConfig_Load(strFileOrXML, bFile)
def __init__(self):
_gskernel.GsGlobalConfig_swiginit(self, _gskernel.new_GsGlobalConfig())
__swig_destroy__ = _gskernel.delete_GsGlobalConfig
# Register GsGlobalConfig in _gskernel:
_gskernel.GsGlobalConfig_swigregister(GsGlobalConfig)
def GsGlobalConfig_Instance() -> "GsConfig &":
r""" 全局的配置类根对象。"""
return _gskernel.GsGlobalConfig_Instance()
def GsGlobalConfig_Save(*args) -> "GsString":
r"""
*Overload 1:*
将全局的配置保存为文件
|
*Overload 2:*
将全局的配置保存为字符串
"""
return _gskernel.GsGlobalConfig_Save(*args)
def GsGlobalConfig_Load(strFileOrXML: 'char const *', bFile: 'bool'=True) -> "bool":
r""" 从xml或者配置中load配置信息"""
return _gskernel.GsGlobalConfig_Load(strFileOrXML, bFile)
class GsByteBuffer(object):
r""" 直接内存块"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _gskernel.delete_GsByteBuffer
def BufferSize(self) -> "unsigned int":
r""" 缓冲区的长度"""
return _gskernel.GsByteBuffer_BufferSize(self)
def SetBufferValue(self, *args) -> "void":
return _gskernel.GsByteBuffer_SetBufferValue(self, *args)
def Append(self, *args) -> "void":
return _gskernel.GsByteBuffer_Append(self, *args)
def Insert(self, nPos: 'int', val: 'signed char *', nLen: 'int') -> "void":
return _gskernel.GsByteBuffer_Insert(self, nPos, val, nLen)
def Allocate(self, nLen: 'unsigned int') -> "void":
return _gskernel.GsByteBuffer_Allocate(self, nLen)
def Clear(self) -> "void":
r""" 清除内存数据"""
return _gskernel.GsByteBuffer_Clear(self)
def Reset(self) -> "void":
r""" 释放内存"""
return _gskernel.GsByteBuffer_Reset(self)
def Copy(self, pBuff: 'signed char *', nLen: 'int') -> "void":
return _gskernel.GsByteBuffer_Copy(self, pBuff, nLen)
def CopyToArray(self, pBuff: 'signed char *', nLen: 'int') -> "bool":
return _gskernel.GsByteBuffer_CopyToArray(self, pBuff, nLen)
def ToBase64(self) -> "GsString":
r""" 将二进制内存块转换为base64编码字符串"""
return _gskernel.GsByteBuffer_ToBase64(self)
def FromBase64(self, strBase64: 'char const *') -> "bool":
r""" 从Base64字符串解码成为内存块"""
return _gskernel.GsByteBuffer_FromBase64(self, strBase64)
# Register GsByteBuffer in _gskernel:
_gskernel.GsByteBuffer_swigregister(GsByteBuffer)
class GsGrowByteBuffer(GsByteBuffer):
r""" 增长型直接内存块,内存只增加不减少"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
r""" 构造函数,分配nLen个字符内存"""
_gskernel.GsGrowByteBuffer_swiginit(self, _gskernel.new_GsGrowByteBuffer(*args))
__swig_destroy__ = _gskernel.delete_GsGrowByteBuffer
# Register GsGrowByteBuffer in _gskernel:
_gskernel.GsGrowByteBuffer_swigregister(GsGrowByteBuffer)
class GsRefObject(object):
r""" 引用对象,通过继承此类实现对象对引用计数的支持。"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _gskernel.delete_GsRefObject
def AddRef(self) -> "int":
r"""
增加一个引用计数:rtype: int
:return: 返回增加后的引用计数值
"""
return _gskernel.GsRefObject_AddRef(self)
def Release(self) -> "int":
r"""
减少一个引用计数:rtype: int
:return: 返回减少后的引用计数值
"""
return _gskernel.GsRefObject_Release(self)
def RefCount(self) -> "int":
r"""
获取引用计数的值:rtype: int
:return: 返回当前引用计数的值
"""
return _gskernel.GsRefObject_RefCount(self)
# Register GsRefObject in _gskernel:
_gskernel.GsRefObject_swigregister(GsRefObject)
class GsClassFactory(object):
r""" GsRefObjectPtr 类工厂,用于注册类创建函数和创建类。"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
@staticmethod
def CreateInstance(className: 'char const *') -> "GsRefObject *":
r""" 创建类的实例"""
return _gskernel.GsClassFactory_CreateInstance(className)
def __init__(self):
_gskernel.GsClassFactory_swiginit(self, _gskernel.new_GsClassFactory())
__swig_destroy__ = _gskernel.delete_GsClassFactory
# Register GsClassFactory in _gskernel:
_gskernel.GsClassFactory_swigregister(GsClassFactory)
def GsClassFactory_CreateInstance(className: 'char const *') -> "GsRefObject *":
r""" 创建类的实例"""
return _gskernel.GsClassFactory_CreateInstance(className)
eErrorType = _gskernel.eErrorType
eBoolType = _gskernel.eBoolType
r""" BOOL类型"""
eIntType = _gskernel.eIntType
r""" 32位的整型"""
eUIntType = _gskernel.eUIntType
r""" 32位的无符号整型"""
eInt64Type = _gskernel.eInt64Type
r""" 64位的整型"""
eUInt64Type = _gskernel.eUInt64Type
r""" 64位的无符号整型"""
eStringType = _gskernel.eStringType
r""" 字符串类型"""
eBlobType = _gskernel.eBlobType
r""" 二进制类型"""
eFloatType = _gskernel.eFloatType
r""" 浮点型"""
eDoubleType = _gskernel.eDoubleType
r""" 双精度浮点型"""
eGeometryType = _gskernel.eGeometryType
r""" 几何类型"""
eDateType = _gskernel.eDateType
r""" 日期类型"""
class GsColor(object):
r""" 颜色对象"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
r"""
*Overload 1:*
缺省构造
|
*Overload 2:*
根据32位整数构造
|
*Overload 3:*
根据32位整数构造
|
*Overload 4:*
根据R,G,B,A通道构造 :type r: unsigned char
:param r: 红色通道[0,255] :type g: unsigned char
:param g: 绿色通道[0,255] :type b: unsigned char
:param b: 蓝色通道[0,255] :type a: unsigned char
:param a: 透明通道[0,255]
|
*Overload 5:*
根据R,G,B,A通道构造 :type r: unsigned char
:param r: 红色通道[0,255] :type g: unsigned char
:param g: 绿色通道[0,255] :type b: unsigned char
:param b: 蓝色通道[0,255] :param a: 透明通道[0,255]
|
*Overload 6:*
拷贝构造
|
*Overload 7:*
改变透明度构造已有颜色
"""
_gskernel.GsColor_swiginit(self, _gskernel.new_GsColor(*args))
def FlipRGB(self) -> "void":
r""" 交换RGB中R和B的通道"""
return _gskernel.GsColor_FlipRGB(self)
@staticmethod
def FromCSS(csscolor: 'char const *') -> "GsColor":
r""" 从CSS颜色描述构造"""
return _gskernel.GsColor_FromCSS(csscolor)
@staticmethod
def FromCOLORREF(rgb: 'unsigned int') -> "GsColor":
r""" 从Win32的RGB颜色构造"""
return _gskernel.GsColor_FromCOLORREF(rgb)
@staticmethod
def FromARGB(r: 'unsigned char', g: 'unsigned char', b: 'unsigned char', a: 'unsigned char'=255) -> "GsColor":
r"""
根据R,G,B,A通道构造 :type r: unsigned char
:param r: 红色通道[0,255] :type g: unsigned char
:param g: 绿色通道[0,255] :type b: unsigned char
:param b: 蓝色通道[0,255] :type a: unsigned char
:param a: 透明通道[0,255]
"""
return _gskernel.GsColor_FromARGB(r, g, b, a)
@staticmethod
def FromARGBF(r: 'float', g: 'float', b: 'float', a: 'float'=1.0) -> "GsColor":
r"""
从浮点R,G,B,A通道构造 :type r: float
:param r: 红色通道[0,1] :type g: float
:param g: 绿色通道[0,1] :type b: float
:param b: 蓝色通道[0,1] :type a: float
:param a: 透明通道[0,1]
"""
return _gskernel.GsColor_FromARGBF(r, g, b, a)
@staticmethod
def FromColor(rhs: 'GsColor', a: 'unsigned char') -> "GsColor":
r""" 改变透明度构造已有颜色"""
return _gskernel.GsColor_FromColor(rhs, a)
@staticmethod
def FromColorF(rhs: 'GsColor', a: 'float') -> "GsColor":
r""" 改变透明度构造已有颜色"""
return _gskernel.GsColor_FromColorF(rhs, a)
@staticmethod
def FromName(strName: 'char const *') -> "GsColor":
r""" 从颜色名称构造"""
return _gskernel.GsColor_FromName(strName)
def Name(self) -> "GsString":
r""" 颜色的名称,如果存在的话"""
return _gskernel.GsColor_Name(self)
def IsKnownColor(self) -> "bool":
r""" 是否是已知的颜色。"""
return _gskernel.GsColor_IsKnownColor(self)
@staticmethod
def Random() -> "GsColor":
r""" 生成一个随机的颜色"""
return _gskernel.GsColor_Random()
@staticmethod
def FromHSV(h: 'float', s: 'float', v: 'float', a: 'unsigned char'=255) -> "GsColor":
r"""
从HSV颜色构造 :type h: float
:param h: Hue 色调 :type s: float
:param s: Saturation 饱和度 :type v: float
:param v: Value 亮度
"""
return _gskernel.GsColor_FromHSV(h, s, v, a)
def SetCOLORREF(self, rgb: 'unsigned int', a: 'unsigned char'=255) -> "void":
r""" 用Win32 RGB颜色设置颜色值"""
return _gskernel.GsColor_SetCOLORREF(self, rgb, a)
def ToCOLORREF(self) -> "unsigned int":
r""" 转换为win32 RGB颜色值"""
return _gskernel.GsColor_ToCOLORREF(self)
def ToHtml(self) -> "GsString":
r""" 以html的形式返回颜色值#RRGGBB"""
return _gskernel.GsColor_ToHtml(self)
def ToHtmlRGBA(self) -> "GsString":
r""" 以html的形式返回颜色值rgba(r,g,b,af);"""
return _gskernel.GsColor_ToHtmlRGBA(self)
def ToUInt(self) -> "unsigned int":
r""" 转换为32位无符号整数"""
return _gskernel.GsColor_ToUInt(self)
def ToHSV(self, hsv: 'float *') -> "bool":
r""" 转换HSV的颜色"""
return _gskernel.GsColor_ToHSV(self, hsv)
def ToInt(self) -> "int":
r""" 转换为32位符号整数"""
return _gskernel.GsColor_ToInt(self)
def SetARGBF(self, r: 'float', g: 'float', b: 'float', a: 'float'=1.0) -> "void":
r""" 根据R,G,B,A通道设置颜色值"""
return _gskernel.GsColor_SetARGBF(self, r, g, b, a)
def SetARGB(self, *args) -> "void":
r"""
*Overload 1:*
根据R,G,B,A通道设置颜色值
|
*Overload 2:*
根据ARGB值设置
"""
return _gskernel.GsColor_SetARGB(self, *args)
def SetHSV(self, h: 'float', s: 'float', v: 'float', a: 'unsigned char'=255) -> "void":
r"""
从HSV颜色设置 :type h: float
:param h: Hue 色调 :type s: float
:param s: Saturation 饱和度 :type v: float
:param v: Value 亮度
"""
return _gskernel.GsColor_SetHSV(self, h, s, v, a)
def RedF(self) -> "float":
r"""
浮点数R通道值 :rtype: float
:return: 返回R通道值[0,1]
"""
return _gskernel.GsColor_RedF(self)
def GreenF(self) -> "float":
r"""
浮点数G通道值 :rtype: float
:return: 返回G通道值[0,1]
"""
return _gskernel.GsColor_GreenF(self)
def AlphaF(self) -> "float":
r"""
浮点数A通道值 :rtype: float
:return: 返回A通道值[0,1]
"""
return _gskernel.GsColor_AlphaF(self)
def BlueF(self) -> "float":
r"""
浮点数B通道值 :rtype: float
:return: 返回B通道值[0,1]
"""
return _gskernel.GsColor_BlueF(self)
AliceBlue = _gskernel.GsColor_AliceBlue
AntiqueWhite = _gskernel.GsColor_AntiqueWhite
Aqua = _gskernel.GsColor_Aqua
Aquamarine = _gskernel.GsColor_Aquamarine
Azure = _gskernel.GsColor_Azure
Beige = _gskernel.GsColor_Beige
Bisque = _gskernel.GsColor_Bisque
Black = _gskernel.GsColor_Black
BlanchedAlmond = _gskernel.GsColor_BlanchedAlmond
Blue = _gskernel.GsColor_Blue
BlueViolet = _gskernel.GsColor_BlueViolet
Brown = _gskernel.GsColor_Brown
BurlyWood = _gskernel.GsColor_BurlyWood
CadetBlue = _gskernel.GsColor_CadetBlue
Chartreuse = _gskernel.GsColor_Chartreuse
Chocolate = _gskernel.GsColor_Chocolate
Coral = _gskernel.GsColor_Coral
CornflowerBlue = _gskernel.GsColor_CornflowerBlue
Cornsilk = _gskernel.GsColor_Cornsilk
Crimson = _gskernel.GsColor_Crimson
Cyan = _gskernel.GsColor_Cyan
DarkBlue = _gskernel.GsColor_DarkBlue
DarkCyan = _gskernel.GsColor_DarkCyan
DarkGoldenrod = _gskernel.GsColor_DarkGoldenrod
DarkGray = _gskernel.GsColor_DarkGray
DarkGreen = _gskernel.GsColor_DarkGreen
DarkKhaki = _gskernel.GsColor_DarkKhaki
DarkMagenta = _gskernel.GsColor_DarkMagenta
DarkOliveGreen = _gskernel.GsColor_DarkOliveGreen
DarkOrange = _gskernel.GsColor_DarkOrange
DarkOrchid = _gskernel.GsColor_DarkOrchid
DarkRed = _gskernel.GsColor_DarkRed
DarkSalmon = _gskernel.GsColor_DarkSalmon
DarkSeaGreen = _gskernel.GsColor_DarkSeaGreen
DarkSlateBlue = _gskernel.GsColor_DarkSlateBlue
DarkSlateGray = _gskernel.GsColor_DarkSlateGray
DarkTurquoise = _gskernel.GsColor_DarkTurquoise
DarkViolet = _gskernel.GsColor_DarkViolet
DeepPink = _gskernel.GsColor_DeepPink
DeepSkyBlue = _gskernel.GsColor_DeepSkyBlue
DimGray = _gskernel.GsColor_DimGray
DodgerBlue = _gskernel.GsColor_DodgerBlue
Feldspar = _gskernel.GsColor_Feldspar
Firebrick = _gskernel.GsColor_Firebrick
FloralWhite = _gskernel.GsColor_FloralWhite
ForestGreen = _gskernel.GsColor_ForestGreen
Fuchsia = _gskernel.GsColor_Fuchsia
Gainsboro = _gskernel.GsColor_Gainsboro
| |
is to record custom attributes that are
stored on individuals in certain kinds of experiments. Here's how you
would record the values of `ind.foo` and `ind.bar` for every individual
in the population. We write to a stream object this time to demonstrate
how to use the probe without a dataframe:
>>> import io
>>> stream = io.StringIO()
>>> probe = AttributesCSVProbe(attributes=['foo', 'bar'], stream=stream)
>>> context['leap']['generation'] = 100
>>> r = probe(test_population)
>>> print(stream.getvalue())
step,foo,bar
100,GREEN,Colorless
100,15,green
100,BLUE,ideas
100,72.81,sleep
<BLANKLINE>
"""
def __init__(self, attributes=(), stream=sys.stdout, do_dataframe=False,
best_only=False, header=True, do_fitness=False,
do_genome=False,
notes=None, extra_metrics=None, job=None,
context=context):
assert ((stream is None) or hasattr(stream, 'write'))
self.context = context
self.stream = stream
self.attributes = attributes
self.best_only = best_only
self.do_fitness = do_fitness
self.do_genome = do_genome
self.notes = notes if notes else {}
self.extra_metrics = extra_metrics if extra_metrics else {}
self.job = job
self.do_dataframe = do_dataframe
if (not do_dataframe) and stream is None:
raise ValueError(
"Both 'stream'=None and 'do_dataframe'=False, but at least one must be enabled.")
fieldnames = []
if job is not None:
fieldnames.append('job')
for name in self.notes.keys():
fieldnames.append(name)
fieldnames.append('step')
fieldnames.extend(list(attributes))
if do_fitness:
fieldnames.append('fitness')
if do_genome:
fieldnames.append('genome')
for name in self.extra_metrics.keys():
fieldnames.append(name)
self.fieldnames = fieldnames
if self.do_dataframe:
# We'll store rows of data as dicts in this list as we collect them
self.data = []
self.writer = None
if stream is not None:
# We'll write rows of data to this stream as we collect them
self.writer = csv.DictWriter(
stream, fieldnames=fieldnames, lineterminator='\n')
if header:
self.writer.writeheader()
@property
def dataframe(self):
"""Property for retrieving a Pandas DataFrame representation of the
collected data. """
if not self.do_dataframe:
raise ValueError(
'Tried to retrieve a dataframe of results, but this ' +
f'{type(AttributesCSVProbe).__name__} was initialized with dataframe=False.')
# We create the DataFrame on demand because it's inefficient to append to a DataFrame,
# so we only want to create it after we are done generating data.
return pd.DataFrame(self.data, columns=self.fieldnames)
def __call__(self, population):
"""When called (i.e. as part of an operator pipeline), take a
population of individuals and collect data from it. """
assert (population is not None)
assert ('leap' in self.context)
assert ('generation' in self.context['leap'])
individuals = [max(population)] if self.best_only else population
for ind in individuals:
row = self.get_row_dict(ind)
if self.writer is not None:
self.writer.writerow(row)
if self.do_dataframe:
self.data.append(row)
return population
def get_row_dict(self, ind):
"""Compute a full row of data from a given individual."""
row = {'step': self.context['leap']['generation']}
for attr in self.attributes:
if attr not in ind.__dict__:
raise ValueError(
'Attribute "{0}" not found in individual "{1}".'.format(
attr, ind.__repr__()))
row[attr] = ind.__dict__[attr]
if self.job is not None:
row['job'] = self.job
for k, v in self.notes.items():
row[k] = v
if self.do_fitness:
row['fitness'] = ind.fitness
if self.do_genome:
row['genome'] = str(ind.genome)
for k, f in self.extra_metrics.items():
row[k] = f(row)
return row
##############################
# Class PopulationMetricsPlotProbe
##############################
class PopulationMetricsPlotProbe:
def __init__(self, ax=None,
metrics=None,
xlim=(0, 100), ylim=(0, 1), modulo=1, title='Population Metrics',
x_axis_value=None, context=context):
if ax is None:
_, ax = plt.subplots()
self.metrics = metrics
self.modulo = modulo
# x-axis defaults to generation
if x_axis_value is None:
x_axis_value = lambda: context['leap']['generation']
self.x_axis_value = x_axis_value
self.context = context
# Create an empty line for each metric
self.x = np.array([])
self.y = [ np.array([]) for _ in range(len(metrics)) ]
for _ in range(len(metrics)):
ax.plot([], [])
# Set axis limits, and some variables we'll use for real-time scaling
ax.set_ylim(ylim)
ax.set_xlim(xlim)
self.ax = ax
self.left, self.right = xlim
self.bottom, self.top = ylim
plt.title(title)
def __call__(self, population):
assert (population is not None)
assert ('leap' in self.context)
assert ('generation' in self.context['leap'])
step = self.context['leap']['generation']
if step % self.modulo == 0:
self.x = np.append(self.x, self.x_axis_value())
for i, m in enumerate(self.metrics):
self.y[i] = np.append(self.y[i], m(population))
line = self.ax.lines[i]
line.set_xdata(self.x)
line.set_ydata(self.y[i])
self.__rescale_ax()
self.ax.figure.canvas.draw()
plt.pause(0.000001)
#plt.ion() # XXX Not sure this is needed
return population
def __rescale_ax(self):
if np.min(self.x) < self.left:
self.ax.set_xlim(left=np.min(self.x))
if np.max(self.x) > self.right:
self.ax.set_xlim(right=np.max(self.x))
if np.min(self.y) < self.bottom:
self.ax.set_ylim(bottom=np.min(self.y))
if np.max(self.y) > self.top:
self.ax.set_ylim(top=np.max(self.y))
##############################
# Function pairwise_distance_metric()
##############################
def pairwise_squared_distance_metric(population: list):
"""Computes the genetic diversity of a population by considering the
sum of squared Euclidean distances between individual genomes.
We compute this in :math:`O(n)` by writing the sum in terms of
distance from the population centroid :math:`c`:
.. math::
\\mathcal{D}(\\text{population}) = \\sum_{i=1}^n \\sum_{j=1}^n \\| x_i - x_j \\|^2 = 2n \\sum_{i=1}^n \\| x_i - c \\|^2
"""
# Create one big matrix from the population
genomes = [ ind.genome for ind in population ]
genomes_matrix = np.stack(genomes)
centroid = np.mean(genomes_matrix, axis=0) # Compute c
distances = genomes_matrix - centroid # Compute x_i - c for all i
norms = np.linalg.norm(distances, axis=1) # Compute \|x_i - c\|
sq_norms = np.power(norms, 2) # Compute \|x_i - c\|^2
return 2*len(population)*np.sum(sq_norms) # Return 2n\sum_{i=1}^n \|x_i - c\|^2
##############################
# Function sum_of_variances_metric()
##############################
def sum_of_variances_metric(population: list):
"""Computes the genetic diversity of a population by considering the sum of
the variances of each variable in the genome.
.. math::
\\mathcal{D}(\\text{population}) = \\sum_{i=1}^L \\mathbb{E}_{j \\in P}\\left[ x_j[i] - \\mathbb{E}[x_j[i]] \\right]
This is a so-called "column-wise" metric, in the sense that it considers
each element of the solution vectors independently.
"""
# Create one big matrix from the population
genomes = [ ind.genome for ind in population ]
genomes_matrix = np.stack(genomes)
variances = np.std(genomes_matrix, axis=0)**2
return sum(variances)
##############################
# Function num_fixated_metrics()
##############################
def num_fixated_metric(population: list):
"""Computes the genetic diversity of the population by counting the number
of variables in the genome that have zero variance.
This is a so-called "column-wise" metric, in the sense that it considers
each element of the solution vectors independently.
"""
# Create one big matrix from the population
genomes = [ ind.genome for ind in population ]
genomes_matrix = np.stack(genomes)
variances = np.std(genomes_matrix, axis=0)**2
return sum(np.isclose(variances, 0))
##############################
# Class FitnessPlotProbe
##############################
class FitnessPlotProbe(PopulationMetricsPlotProbe):
"""
Measure and plot a population's fitness trajectory.
:param Axes ax: Matplotlib axes to plot to (if `None`, a new figure will
be created).
:param xlim: Bounds of the horizontal axis.
:type xlim: (float, float)
:param ylim: Bounds of the vertical axis.
:type ylim: (float, float)
:param int modulo: take and plot a measurement every `modulo` steps (
default 1).
:param title: title to print on the plot
:param x_axis_value: optional function to define what value gets plotted
on the x axis. Defaults to pulling the 'generation' value out of the
default `context` object.
:param context: set a context object to query for the current generation.
Defaults to the standard `leap_ec.context` object.
Attach this probe to matplotlib :class:`Axes` and then insert it into an
EA's operator pipeline.
>>> import matplotlib.pyplot as plt
>>> from leap_ec.probe import FitnessPlotProbe
>>> from leap_ec.representation import Representation
>>> f = plt.figure() # Setup a figure to plot to
>>> plot_probe = FitnessPlotProbe(ylim=(0, 70), ax=plt.gca())
>>> # Create an algorithm that contains the probe in the operator pipeline
>>> from leap_ec.individual import Individual
>>> from leap_ec.decoder import IdentityDecoder
>>> from leap_ec import ops
>>> from leap_ec.real_rep.problems import SpheroidProblem
>>> from leap_ec.real_rep.ops import mutate_gaussian
>>> from leap_ec.real_rep.initializers import create_real_vector
>>> from leap_ec.algorithm import generational_ea
>>> l = 10
>>> pop_size = 10
>>> ea = generational_ea(max_generations=100, pop_size=pop_size,
... problem=SpheroidProblem(maximize=False),
...
... representation=Representation(
... individual_cls=Individual,
... decoder=IdentityDecoder(),
... initialize=create_real_vector(bounds=[[-5.12, 5.12]] * l)
... ),
...
... pipeline=[
... plot_probe, # Insert the probe into the pipeline like so
... ops.tournament_selection,
... ops.clone,
... mutate_gaussian(std=0.2, expected_num_mutations='isotropic'),
... ops.evaluate,
... ops.pool(size=pop_size)
... ])
>>> result = list(ea);
.. plot::
import matplotlib.pyplot as plt
from leap_ec.probe import FitnessPlotProbe
from leap_ec.representation import Representation
f = plt.figure() # Setup a figure to plot to
plot_probe = FitnessPlotProbe(ylim=(0, 70), ax=plt.gca())
# Create an algorithm that contains the probe in the operator pipeline
from leap_ec.individual import Individual
from leap_ec.decoder import IdentityDecoder
from leap_ec import ops
from leap_ec.real_rep.problems import SpheroidProblem
from leap_ec.real_rep.ops import mutate_gaussian
from leap_ec.real_rep.initializers import create_real_vector
from leap_ec.algorithm import generational_ea
l = 10
pop_size = 10
ea = generational_ea(generations=100, pop_size=pop_size,
problem=SpheroidProblem(maximize=False),
representation=Representation(
individual_cls=Individual,
decoder=IdentityDecoder(),
initialize=create_real_vector(bounds=[[-5.12, 5.12]] * l)
),
pipeline=[
plot_probe, # Insert the probe into the pipeline like so
ops.tournament_selection,
ops.clone,
mutate_gaussian(std=0.2, expected_num_mutations='isotropic'),
ops.evaluate,
ops.pool(size=pop_size)
])
result = | |
import datetime
import io
import os
import shutil
import chardet
import regex
from chardet.universaldetector import UniversalDetector
from natsort import natsort_keygen, natsorted
nkey = natsort_keygen()
def execute_proper_function(function_number, path, error_file_path):
if function_number == 1:
move_wkt_files_to_their_folders(path, error_file_path)
elif function_number == 2:
add_precinct_name_to_folder(path)
elif function_number == 3:
move_wkt_for_files_to_main_wkt(path, error_file_path)
elif function_number == 4:
delete_parenthesis_if_not_multipolygon(path)
elif function_number == 5:
match_wkt_folders_to_operat_folder(path, error_file_path)
elif function_number == 6:
check_if_all_operat_from_list_matched(path, error_file_path)
elif function_number == 7:
find_duplicate_lines_in_cdc_file(path, error_file_path)
elif function_number == 8:
remove_from_cdc_lines_moved_to_duplicated(path, error_file_path)
elif function_number == 9:
copy_wkt_to_corresponding_operat_folder(path, error_file_path)
elif function_number == 10:
delete_successfuly_moved_files(path, error_file_path)
elif function_number == 11:
cleanup_more_than_one_fit(path, error_file_path)
elif function_number == 12:
check_for_pdf_equivalent_for_wkt(path, error_file_path)
elif function_number == 13:
check_error_wkt_creation_date(path, error_file_path)
elif function_number == 14:
check_wkt_structure(path, error_file_path)
elif function_number == 15:
merge_wkt_with_wrong_structure(path, error_file_path)
elif function_number == 16:
check_file_encoding_chardet(path, error_file_path)
elif function_number == 17:
check_file_encoding_universal_detector(path, error_file_path)
elif function_number == 18:
save_files_with_utf_8(path)
elif function_number == 19:
copy_wkt_files_to_sketches(path)
elif function_number == 20:
copy_wkt_files_to_required_documents(path, error_file_path)
elif function_number == 21:
check_for_wkt_to_wrong_files(path, error_file_path)
elif function_number == 22:
check_wkt_files_for_required_documents(path, error_file_path)
elif function_number == 23:
check_for_any_wkt_if_main_not_exists(path, error_file_path)
elif function_number == 24:
check_if_there_is_main_wkt(path, error_file_path)
elif function_number == 25:
move_modernization_wkt(path, error_file_path)
def txt_list_based_or_path_based():
print(
"If you want to provide list of paths, enter the path to folder\
where paths.txt file is.\n\
Otherwise just paste in main folder path."
)
file_or_single_path = input("> ")
paths_file = os.path.join(file_or_single_path, "paths.txt")
if os.path.exists(paths_file):
paths_list = []
with open(paths_file, "r", encoding="utf-8") as read_paths:
for line in read_paths:
paths_list.append(line.strip())
return paths_list
else:
return list(file_or_single_path)
def move_wkt_files_to_their_folders(path, error_file_path):
count = 1
for subdir, _, files in os.walk(path):
for file in natsorted(files):
from_here = os.path.join(subdir, file)
move_there = os.path.join(
subdir,
file.split("__")[0],
(file.split("__")[1]).split(".wkt")[0],
file,
)
folder = os.path.join(
subdir,
file.split("__")[0],
(file.split("__")[1]).split(".wkt")[0],
)
if not os.path.exists(folder):
os.makedirs(folder)
try:
shutil.move(from_here, move_there)
print(count)
count += 1
except:
errors_file = os.path.join(
error_file_path, "wkt_not_moved.txt"
)
with open(errors_file, "a", encoding="utf-8") as write_errors:
write_errors.write(from_here)
def add_precinct_name_to_folder(path):
for subdir, dirs, _ in os.walk(path):
dirs.sort(key=nkey)
if regex.match(r".*2\.00..$", subdir):
precincts_dict = os.path.join(
input(
"""
Where is precincts_dict.txt?
> """
),
"precincts_dict.txt",
)
with open(precincts_dict, "r", encoding="utf-8") as dictionary:
for line in dictionary:
if (
os.path.basename(subdir)
== (line.split("\t")[1]).split("\n")[0]
):
os.rename(subdir, subdir + "_" + line.split("\t")[0])
def move_wkt_for_files_to_main_wkt(path, error_file_path):
count = 1
operats_path = input("Path to operat wkt folder:\n> ")
documents_path = input("Path to documents wkt folder:\n> ")
for subdir, dirs, files in os.walk(operats_path):
dirs.sort(key=nkey)
for file in natsorted(files):
operat_path = subdir
document_path = file.split(".wkt")[0]
for subdir, _, files in os.walk(documents_path):
for file in natsorted(files):
new_name = regex.sub(
r"^(.+)?(?=(_.-|_..-)|_...-).+", "\\1", file
)
if new_name.endswith(".wkt"):
new_name = new_name.split(".wkt")[0]
if new_name == document_path:
from_here = os.path.join(subdir, file)
move_there = os.path.join(operat_path, file)
try:
shutil.move(from_here, move_there)
print(count)
count += 1
except:
errors_file = os.path.join(
error_file_path, "documents_wkt_not_moved.txt"
)
with open(
errors_file, "a", encoding="utf-8"
) as write_errors:
write_errors.write(from_here)
def delete_parenthesis_if_not_multipolygon(path):
for subdir, dirs, files in os.walk(path):
for file in files:
if file.endswith(".wkt"):
temporary_lines = ""
wkt_file = os.path.join(subdir, file)
with open(wkt_file, "r", encoding="utf-8") as wkt:
for line in wkt:
if regex.match(r"^POLYGON.+", line):
new_line = regex.sub(
r"(^.+)\((\(\(.+\)\))\)", "\\1\\2", line
)
temporary_lines += new_line
if temporary_lines:
with open(
wkt_file, "w", encoding="utf-8"
) as update_wkt_file:
update_wkt_file.write(temporary_lines)
def match_wkt_folders_to_operat_folder(path, error_file_path):
count = 1
wkt_files_list_path = os.path.join(error_file_path, "wkt_list.txt")
pdf_files_list_path = os.path.join(error_file_path, "pdf_list.txt")
with open(wkt_files_list_path, "r", encoding="utf-8",) as from_here:
for line in from_here:
print(count)
count += 1
full_line_wkt = line.strip()
name_wkt = (
os.path.basename(
os.path.dirname(os.path.dirname(full_line_wkt))
)
+ os.path.basename(os.path.dirname(full_line_wkt))
+ os.path.basename(full_line_wkt)
)
with open(
pdf_files_list_path, "r", encoding="utf-8",
) as move_there:
for line in move_there:
full_line_pdf = line.strip()
name_pdf = (
os.path.basename(
os.path.dirname(os.path.dirname(full_line_pdf))
)
+ os.path.basename(os.path.dirname(full_line_pdf))
+ os.path.basename(full_line_pdf)
)
if name_pdf == name_wkt:
print(name_wkt)
print("\t" + name_pdf)
matched_files_list = os.path.join(
error_file_path, "matched_wkt_to_pdf.txt"
)
with open(
matched_files_list, "a", encoding="utf-8",
) as what_to_what:
what_to_what.write(
full_line_wkt + "\t" + full_line_pdf + "\n"
)
def check_if_all_operat_from_list_matched(path, error_file_path):
matched_lines = set()
with open(
os.path.join(error_file_path, "matched_wkt_to_pdf.txt"),
"r",
encoding="utf-8",
) as cdc:
for line in cdc:
matched_lines.add(line.split("\t")[0])
with open(
os.path.join(error_file_path, "wkt_list.txt"), "r", encoding="utf-8",
) as from_here:
for line in from_here:
if not line.strip() in matched_lines:
with open(
os.path.join(error_file_path, "unmatched_wkt.txt"),
"a",
encoding="utf-8",
) as unmatched:
unmatched.write(line.strip() + "\n")
def find_duplicate_lines_in_cdc_file(path, error_file_path):
previous = first = ""
with open(
os.path.join(error_file_path, "matched_wkt_to_pdf.txt"),
"r",
encoding="utf-8",
) as cdc:
for line in cdc:
linia = line.split("\t")[0]
if previous == "":
first = line
previous = linia
czy1 = 1
continue
if linia != previous:
first = line
czy1 = 1
previous = linia
continue
elif previous == linia:
if czy1 == 1:
with open(
os.path.join(error_file_path, "several_matches.txt"),
"a",
) as kilka:
kilka.write(first)
czy1 = 0
with open(
os.path.join(error_file_path, "several_matches.txt"), "a",
) as kilka:
kilka.write(line)
def remove_from_cdc_lines_moved_to_duplicated(path, error_file_path):
remove_them = set()
with open(
os.path.join(error_file_path, "several_matches.txt"),
"r",
encoding="utf-8",
) as several_matches:
for line in several_matches:
remove_them.add(line)
with open(
os.path.join(error_file_path, "matched_wkt_to_pdf.txt"),
"r",
encoding="utf-8",
) as cdc:
for line in cdc:
if line not in remove_them:
with open(
os.path.join(
error_file_path, "new_matche_wkt_to_pdf.txt"
), # noqa
"a",
) as ncdc:
ncdc.write(line)
def copy_wkt_to_corresponding_operat_folder(path, error_file_path):
count = 1
with io.open(
os.path.join(error_file_path, "new_matched_wkt_to_pdf.txt"),
"r",
encoding="utf-8",
) as cdc:
for line in cdc:
print(count)
count += 1
from_here = line.split("\t")[0]
move_there = (line.split("\t")[1]).strip()
for _, _, files in os.walk(from_here):
for file in natsorted(files):
if file.upper().endswith(".WKT"):
wkt_file = os.path.join(from_here, file)
destination = os.path.join(move_there, file)
if not os.path.exists(destination):
try:
shutil.copy2(wkt_file, destination)
with io.open(
os.path.join(
error_file_path, "can_be_removed.txt"
),
"a",
encoding="utf-8",
) as remove_them:
remove_them.write(wkt_file + "\n")
except:
with io.open(
os.path.join(
error_file_path, "copying_errors.txt"
),
"a",
encoding="utf-8",
) as errors:
errors.write(wkt_file + "\n")
else:
with io.open(
os.path.join(
error_file_path, "wkt_already_exists.txt"
),
"a",
encoding="utf-8",
) as already_exists:
already_exists.write(wkt_file + "\n")
def delete_successfuly_moved_files(path, error_file_path):
count = 1
with open(
os.path.join(error_file_path, "can_be_removed.txt"),
"r",
encoding="utf-8",
) as remove_them:
for line in remove_them:
print(count)
count += 1
try:
os.remove(line.strip())
except:
with open(
os.path.join(error_file_path, "cant_be_deleted.txt"),
"a",
encoding="utf-8",
) as errors:
errors.write(line)
def cleanup_more_than_one_fit(path, error_file_path):
with open(
os.path.join(error_file_path, "several_matches.txt"),
"r",
encoding="utf-8",
) as several_matches:
for line in several_matches:
precinct = (
regex.sub(
r"^.+do_operatow.+\.[0-9][0-9][0-9][0-9](.+?)\t.+",
"\\1",
line,
)
).strip()
if precinct.upper() in (line.split("\t")[1]).upper():
with io.open(
os.path.join(
error_file_path, "several_matches_cleared.txt"
),
"a",
encoding="utf-8",
) as clean:
clean.write(line)
def check_for_pdf_equivalent_for_wkt(path, error_file_path):
count = 1
for subdir, dirs, files in os.walk(path):
dirs.sort(key=nkey)
for file in natsorted(files):
if file.endswith(".wkt"):
wkt_file = os.path.join(subdir, file)
print(str(count))
count += 1
if (
not os.path.exists((wkt_file.split(".wkt")[0]) + ".PDF")
and os.path.basename(subdir) != file.split(".wkt")[0]
):
with io.open(
os.path.join(
error_file_path, "wkt_without_pdf_equivalent.txt"
),
"a",
encoding="utf-8",
) as missing_equivalent:
missing_equivalent.write(wkt_file + "\n")
def check_error_wkt_creation_date(path, error_file_path):
with open(
os.path.join(error_file_path, "wkt_without_pdf_equivalent.txt"),
"r",
encoding="utf-8",
) as missing_equivalent:
for line in missing_equivalent:
wkt_file_path = line.strip()
creation_date = (
str(
datetime.datetime.fromtimestamp(
os.path.getmtime(wkt_file_path)
)
)
).split(" ")[0]
with open(
os.path.join(error_file_path, "wkt_creation_date.txt"),
"a",
encoding="utf-8",
) as wkt_date:
wkt_date.write(wkt_file_path + "\t" + creation_date + "\n")
def check_wkt_structure(path, error_file_path):
count = 1
wrong_wkt_structure = 0
for subdir, dirs, files in os.walk(path):
dirs.sort(key=nkey)
for file in natsorted(files):
if file.endswith(".wkt"):
print(count)
count += 1
wkt_file = os.path.join(subdir, file)
with open(wkt_file, "r", encoding="utf-8") as check_this:
for line in check_this:
if not regex.match(r"^POLYGON|MULTI.*", line.upper()):
wrong_wkt_structure += 1
print("\t\t" + str(wrong_wkt_structure))
with open(
os.path.join(
error_file_path, "wkt_wrong_structure.txt",
),
"a",
) as errors:
errors.write(wkt_file + "\n")
break
def merge_wkt_with_wrong_structure(path, error_file_path):
with open(
os.path.join(error_file_path, "wkt_wrong_structure.txt"),
"r",
encoding="utf-8",
) as wrong_structure:
for line in wrong_structure:
files_separator_on_off = 1
wkt_file_path = line.strip()
with open(wkt_file_path, "r", encoding="utf-8") as wkt_file:
for line in wkt_file:
with open(
os.path.join(
error_file_path, "merged_wkt_wrong_structure.txt"
),
"a",
encoding="utf-8",
) as errors:
if files_separator_on_off == 1:
errors.write(
"\n~~~~~~~~~~~~~~~~~~~~~~~~~\n"
+ wkt_file_path
+ "\n\n"
)
files_separator_on_off = 0
errors.write(line)
def check_file_encoding_chardet(path, error_file_path):
count = 1
for subdir, dirs, files in os.walk(path):
dirs.sort(key=nkey)
for file in natsorted(files):
if file.endswith(".wkt"):
print(count)
count += 1
wkt_file_path = os.path.join(subdir, file)
rawdata = open(wkt_file_path, "rb").read()
result = chardet.detect(rawdata)
print(result)
charenc = result["encoding"]
with open(
os.path.join(error_file_path, "files_encoding.txt"),
"a",
encoding="utf-8",
) as encoding:
encoding.write(wkt_file_path + "\t" + charenc + "\n")
def check_file_encoding_universal_detector(path, error_file_path):
count = 1
detector = UniversalDetector()
for subdir, dirs, files in os.walk(path):
dirs.sort(key=nkey)
for file in natsorted(files):
if file.endswith(".xml"):
print(count)
count += 1
wkt_file_path = os.path.join(subdir, file)
detector.reset()
with open(wkt_file_path, "rb") as check_it:
for line in check_it:
detector.feed(line)
if detector.done:
break
detector.close()
if "utf-8" not in str(detector.result):
with open(
os.path.join(error_file_path, "files_encoding.txt"),
"a",
encoding="utf-8",
) as encoding:
encoding.write(
wkt_file_path + "\t" + str(detector.result) + "\n"
)
def save_files_with_utf_8(path):
for subdir, dirs, files in os.walk(path):
dirs.sort(key=nkey)
for file in natsorted(files):
from_here = os.path.join(subdir, file)
with open(from_here, "r", encoding="utf-16") as | |
import os
import datetime
import hashlib
import hmac
import logging
import re
import uuid
from collections import namedtuple
from datetime import date, timedelta
from django.contrib.sites.models import Site
from django.core import validators
from django.core.urlresolvers import reverse
from django.conf import settings
from django.dispatch import receiver
from django.db import models, connection
from django.db.models import Q, Avg
from django.contrib.contenttypes.models import ContentType
from django.db.models.signals import post_init, post_save
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import UserManager, Permission
from django.contrib.contenttypes import fields as generic
from django.contrib.auth.signals import user_logged_in
from django.utils.timezone import now
from django_pgjson.fields import JsonBField
from allauth.socialaccount.models import SocialAccount
from allauth.socialaccount.signals import social_account_added
from allauth.account.adapter import get_adapter
from model_utils.managers import QueryManager
from rest_framework.renderers import JSONRenderer
from djorm_pgarray.fields import ArrayField
from monthdelta import monthdelta
from model_utils import FieldTracker
from linkedin import linkedin
from linkedin.exceptions import LinkedInError
from notifications.signals import notify
from phonenumber_field.modelfields import PhoneNumberField
from med_social.utils import (get_current_tenant, this_month, this_week,
humanized_datetime, today, humanized_date)
from med_social.auth_avatar import copy_avatar
from med_social.constants import OnboardingConstantsMixin as ONB
from med_social.constants import UserKindConstantsMixin as KIND
from med_social.libs.mixins import BaseUser
from med_social.utils import track_event
from projects.models import Project, ProposedResourceStatus
from reviews.models import Review
from reviews.db.fields import ReviewsField
from vendors.models import ProcurementContact
from .tasks import first_time_login_task, signup_task
logger = logging.getLogger(__name__)
class ActiveUserManager(UserManager):
def get_queryset(self):
return super(ActiveUserManager, self).get_queryset().filter(
is_deleted=False)
def all_with_deleted(self):
return super(ActiveUserManager, self).get_queryset().all()
def _get_resume_upload_path(instance, filename):
tenant = connection.tenant
t = "{tenant_id}".format(tenant_id=tenant.id)
u = "{user_id}".format(user_id=instance.id)
fn, ext = os.path.splitext(filename)
fname = "{username}-resume.{ext}".format(username=instance.username,
ext=ext)
return os.path.join(settings.PROTECTED_ROOT, t, 'r', u, fname)
class ReviewableMixin(models.Model):
reviews = ReviewsField(Review)
class Meta:
abstract = True
def get_all_reviews(self):
proposed = self.proposed.values_list('id', flat=True)
proposed_ct = ContentType.objects.get_for_model(self.proposed.model)
user_ct = ContentType.objects.get_for_model(self)
q = Q(content_type=proposed_ct, object_id__in=proposed)
q = q | Q(content_type=user_ct, object_id=self.id)
return Review.objects.filter(q)
def __Reviews__denorm_reviews__(self, commit=True):
reviews = self.get_all_reviews()
self.avg_score = reviews.aggregate(
Avg('score'))['score__avg']
self.reviews_count = reviews.count()
if commit:
self.save(update_fields=('avg_score', 'reviews_count'))
def _default_user_meta():
return {}
class User(BaseUser, ReviewableMixin, ONB, KIND):
search_template = 'users/search_result.html'
NONE = 0
KIND_CLIENT = 1
KIND_VENDOR = 2
KIND_CHOICES = (
(KIND_CLIENT, 'Client',),
(KIND_VENDOR, 'Vendor',),
)
ROLE_SOFTWARE_DEV = 1
ROLE_BUSINESS_ANALYSIS = 2
ROLE_ACCOUNT_MANAGEMENT = 3
ROLE_VENDOR_MANAGEMENT = 4
ROLE_PROJECT_MANAGEMENT = 4
ROLE_CHOICES = (
(ROLE_SOFTWARE_DEV, 'Software Development'),
(ROLE_BUSINESS_ANALYSIS, 'Business Analysis'),
(ROLE_ACCOUNT_MANAGEMENT, 'Account Management'),
(ROLE_VENDOR_MANAGEMENT, 'Vendor Management'),
(ROLE_PROJECT_MANAGEMENT, 'Project Management'),
)
PERMISSIONS = (
('invite_user', 'Can invite user', Permission.ALL, False),
('delete_user', 'Can delete user', None, True),
('manage_features', 'Can manage features', Permission.CLIENT, True),
)
# manage_features: should be on customers but customers table is only
# synched to public schema so we have put it here instead
_cached_following = []
communities = models.ManyToManyField('customers.Customer', related_name='users', blank=True)
username = models.CharField(
_('username'), max_length=30, unique=True,
help_text=_('30 characters or fewer. Letters, numbers, _ and . (dot) characters'),
validators=[
validators.RegexValidator(re.compile('^%s$' % '^\w+[\w+.-]*'), _('Enter a valid username.'), 'invalid')
], default=None)
email = models.EmailField(_('email address'), max_length=254, unique=True, blank=False, null=False)
bio = models.TextField(default='', max_length=140)
meta = JsonBField(default=_default_user_meta, blank=True)
invited_by = models.ManyToManyField(
'self', related_name='invited_users', through='UserInvitation', symmetrical=False)
# Relationships
vendor = models.ForeignKey('vendors.Vendor', blank=True, null=True, related_name='users')
categories = models.ManyToManyField('categories.Category', related_name='users')
divisions = models.ManyToManyField(
'divisions.Division', through='UserDivisionRel', related_name='users', blank=True)
# State fields
next_available = models.DateField(blank=True, null=True, editable=False, default=None)
is_deleted = models.BooleanField(_('delete'), default=False)
is_staffable = models.BooleanField(
'Staffable', default=True, help_text=_('Can this person be staffed on projects?'))
kind = models.PositiveSmallIntegerField(choices=KIND_CHOICES, default=KIND_VENDOR)
pending_setup_steps = ArrayField(verbose_name=_('Pending steps'), default='{1, 3}', dbtype="int", dimension=1)
# pending_setup_steps = ArrayField(
# verbose_name=_('Pending steps'), default=ONB.SETUP_STEPS_DEFAULT, dbtype="int", dimension=1)
first_login = models.DateTimeField(null=True, blank=True)
# Profile fields
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
headline = models.TextField(_('headline'), max_length=140, blank=True, default='')
summary = models.TextField(_('summary'), max_length=1023, blank=True, default='')
roles = models.ManyToManyField('roles.Role', related_name='users')
resume = models.FileField(null=True, upload_to=_get_resume_upload_path)
linkedin_profile_url = models.URLField(null=True)
location = models.ForeignKey('locations.Location', null=True)
skill_level = models.ForeignKey('categories.SkillLevel', null=True)
phone = PhoneNumberField(null=True, blank=True)
organization_name = models.CharField(_('organization'), max_length=124, blank=True)
tracker = FieldTracker(fields=['password'])
# generic relation reverse API enablers
notes = generic.GenericRelation('notes.Note')
metric = generic.GenericRelation('metrics.Metric')
objects = UserManager()
staffable = QueryManager(is_staffable=True)
# Notification Settings
text_rfp_new = models.BooleanField('TEXT: When a new RFP that fits my services is posted', default=True)
text_rfp_message = models.BooleanField('TEXT: When I get a question on my proposal', default=True)
text_bid_change = models.BooleanField('TEXT: When a vendor posts a bid', default=True)
text_bid_win = models.BooleanField('TEXT: When my bid wins the contract', default=True)
text_bid_lose = models.BooleanField('TEXT: When my bid loses the contract', default=True)
email_rfp_new = models.BooleanField('EMAIL: When a new RFP that fits my services is posted', default=True)
email_rfp_message = models.BooleanField('EMAIL: When I get a question on my proposal', default=True)
email_bid_change = models.BooleanField('EMAIL: When a vendor posts a bid', default=True)
email_bid_win = models.BooleanField('EMAIL: When my bid wins the contract', default=True)
email_bid_lose = models.BooleanField('EMAIL: When my bid loses the contract', default=True)
def save(self, *args, **kwargs):
if self._orig_kind != self.kind:
self.pending_setup_steps = self.SETUP_STEPS
pending_set = set(self.pending_setup_steps)
allowed_set = set(self.SETUP_STEPS)
self.pending_setup_steps = list(allowed_set.intersection(pending_set))
self.pending_setup_steps.sort()
self.headline = self.headline.strip()
self.summary = self.summary.strip()
self.username = self.username.lower()
if self.has_joined:
if (self.tracker.has_changed('password') and self.tracker.changed().get('password') and
self.SETUP_PASSWORD_SET in self.pending_setup_steps):
self.pending_setup_steps.remove(self.SETUP_PASSWORD_SET)
return super(User, self).save(*args, **kwargs)
def __unicode__(self):
return self.get_name_display()
@staticmethod
def autocomplete_search_fields():
return ('id__iexact', 'username__icontains', 'email__icontains')
def get_name_display(self):
return self.get_full_name() or self.get_short_name() or self.email
def get_company_display(self):
if self.vendor_id:
return self.vendor.name
else:
if connection.tenant.is_public_instance:
return self.organization_name
return get_current_tenant().name
def get_suggested_clients(self):
suggested = self.meta.get('suggested_clients', [])
from vendor_profiles.models import Client
return Client.objects.filter(is_partner=True, id__in=suggested)
def get_user_hash(self):
INTERCOM_SECRET = 'xtsAmAxmU7kW0B9XTE1qrjmg5vJFxTJ3Fu4_9AOH'
return hmac.new(INTERCOM_SECRET, str(self.pk), digestmod=hashlib.sha256).hexdigest()
def get_sender_line(self):
return '{} <{}>'.format(self, self.email)
@classmethod
def get_suggested_resources(cls, **kwargs):
fields = {
'location': 'location',
'skill_level': 'skill_level',
'categories': 'categories__in',
'role': 'roles'
}
query_kwargs = {}
for key, value in kwargs.items():
if value and key in fields:
query_kwargs[fields[key]] = value
if query_kwargs:
users = cls.objects.filter(**query_kwargs)
else:
users = cls.objects.none()
start = kwargs.get('start_date')
if start:
users = users.exclude(proposed__start_date__lte=start, proposed__end_date__gte=start)
end = kwargs.get('end_date')
if end:
users = users.exclude(proposed__start_date__lte=end, proposed__end_date__gte=end)
if start and end:
users = users.exclude(proposed__start_date__gte=start, proposed__start_date__lte=end)
users = users.exclude(proposed__end_date__gte=start, proposed__end_date__lte=end)
return users.distinct()
@classmethod
def get_management_user(cls):
return cls.objects.get(username='management')
@property
def SETUP_STEPS(self):
"""
Currently the system has 3 on-boarding steps:
SETUP_LINKEDIN_FETCH = 1
- fetch data from the linkedin
- this step gets completed in the post_save signal after
the social accounts get created. (check signals below)
"""
if connection.schema_name == 'public':
return []
if self.is_vendor:
if self.is_first_user:
return self.SETUP_STEPS_FIRST_VENDOR
return self.SETUP_STEPS_VENDOR
elif self.is_client:
if self.is_first_user:
return self.SETUP_STEPS_FIRST_CLIENT
return self.SETUP_STEPS_CLIENT
else:
return self.SETUP_STEPS_DEFAULT
@property
def last_updated_availability(self):
return self.meta.get('last_updated_availability', None)
@last_updated_availability.setter
def last_updated_availability(self, value):
self.meta['last_updated_availability'] = value
@property
def natural_last_udpated_availability(self):
return humanized_datetime(self.last_updated_availability)
@property
def is_first_user(self):
return self.meta.get('is_first_user', False)
@property
def is_client(self):
return self.kind == self.KIND_CLIENT
@property
def is_allowed_change(self):
return self.is_client and self.is_superuser
@property
def is_vendor(self):
return self.kind == self.KIND_VENDOR
@property
def has_joined(self):
return bool(self.first_login)
@property
def next_setup_step(self):
try:
return self.pending_setup_steps[0]
except (IndexError, TypeError):
return None
@property
def next_step_display(self):
return self.SETUP_STEPS_DICT.get(self.next_setup_step, '').replace('_', ' ').capitalize()
@property
def has_completed_onboarding(self):
return False if len(self.pending_setup_steps) else True
@property
def setup_step_url(self):
next_step = self.next_setup_step
if next_step:
args = ()
if self.is_vendor and self.vendor and next_step == self.SETUP_VENDOR_PROFILE:
args = (self.vendor.id,)
return reverse('user_setup:setup_step_%s' % self.SETUP_STEPS_DICT[next_step], args=args)
else:
return None
@property
def linkedin_account(self):
return self.socialaccount_set.filter(provider='linkedin').first()
@property
def cached_following(self):
if not self._cached_following:
self._cached_following = self.following.values_list('id',
flat=True)
return self._cached_following
def as_json(self):
from API.serializers.user_serializers import UserSerializer
return JSONRenderer().render(UserSerializer(self).data)
def get_avatar_url(self):
try:
return self.avatar_set.get(primary=True).avatar_url(45)
except:
return None
def get_initials(self):
return '{}{}'.format(self.first_name[0] if self.first_name else '',
self.last_name[0] if self.last_name else '')
@property
def next_available_in_future(self):
available = self.get_next_available_date
return available > today()
@property
def get_next_available_date(self):
return self.next_available
@property
def get_next_available_date_display(self):
if self.get_next_available_date:
return humanized_date(self.get_next_available_date)
else:
return 'Unknown'
@property
def get_next_available_css_class(self):
_today = today()
date = self.get_next_available_date
if date <= _today:
return 'text-danger'
elif date <= _today + timedelta(days=14):
return 'text-warning'
return ''
@property
def is_procurement(self):
if self.is_client:
return ProcurementContact.objects.filter(user=self).exists()
return False
def calculate_next_available_date(self):
# FIXME: optimize by comparing ranges, not dates.
# Fetch future allocations sorted by start_date
_today = today()
_range = self.proposed.filter(
start_date__lte=_today, end_date__gte=_today, allocation__gt=0,
).order_by('start_date').values_list('start_date', 'end_date')
if not _range:
last_allocation = self.proposed.filter(end_date__lte=_today, allocation__gt=0).order_by('-end_date').first()
if last_allocation:
return last_allocation.end_date
else:
return None
# create sorted list of all unavailabel dates
dates = []
for R in _range:
date = R[0]
while date <= R[1]:
dates.append(date)
date = date + timedelta(days=1)
dates = sorted(set(dates))
# find the first missing date in busy dates
natural_date = dates[0]
for date in dates:
if date != natural_date:
break
natural_date += timedelta(days=1)
return natural_date
def get_availability(self):
months = {}
_this_month = this_month()
fetch_upto = _this_month + monthdelta(4)
weeks = self.availability_weeks.filter(date__gte=_this_month,
date__lte=fetch_upto)
for week in weeks:
months[week.month] = months.get(week.month, [])
months[week.month].append(week)
return sorted(tuple(months.items()))
def get_availability_as_weeks(self):
_this_week = this_week()
fetch_upto = this_month() + monthdelta(2)
db_weeks = iter(self.availability_weeks.filter(date__gte=_this_week,
date__lte=fetch_upto))
weeks = []
# last_day_of_last_week = fetch_upto + datetime.timedelta(days=6)
last_day_of_last_week = fetch_upto
day = _this_week
| |
from onelang_core import *
import OneLang.One.Ast.Expressions as exprs
import OneLang.One.Ast.Statements as stats
import OneLang.One.Ast.Types as types
import OneLang.One.Ast.AstTypes as astTypes
import OneLang.One.Ast.References as refs
import OneLang.Generator.GeneratedFile as genFile
import OneLang.Generator.NameUtils as nameUtils
import OneLang.Generator.IGenerator as iGen
import OneLang.One.Ast.Interfaces as ints
import OneLang.Generator.IGeneratorPlugin as iGenPlug
import OneLang.One.ITransformer as iTrans
import OneLang.Generator.TemplateFileGeneratorPlugin as templFileGenPlug
import OneLang.VM.Values as vals
import json
import re
class PhpGenerator:
def __init__(self):
self.usings = None
self.current_class = None
self.reserved_words = ["Generator", "Array", "List", "Interface", "Class"]
self.field_to_method_hack = ["length"]
self.plugins = []
def get_lang_name(self):
return "PHP"
def get_extension(self):
return "php"
def get_transforms(self):
return []
def add_include(self, include):
self.usings[include] = None
def add_plugin(self, plugin):
self.plugins.append(plugin)
# TODO: hack?
if isinstance(plugin, templFileGenPlug.TemplateFileGeneratorPlugin):
plugin.model_globals["escape"] = templFileGenPlug.LambdaValue(lambda args: vals.StringValue(self.escape(args[0])))
plugin.model_globals["escapeBackslash"] = templFileGenPlug.LambdaValue(lambda args: vals.StringValue(self.escape_backslash(args[0])))
def escape(self, value):
if isinstance(value, templFileGenPlug.ExpressionValue) and isinstance(value.value, exprs.RegexLiteral):
return json.dumps("/" + re.sub("/", "\\\\/", value.value.pattern) + "/", separators=(',', ':'))
elif isinstance(value, templFileGenPlug.ExpressionValue) and isinstance(value.value, exprs.StringLiteral):
return json.dumps(value.value.string_value, separators=(',', ':'))
elif isinstance(value, vals.StringValue):
return json.dumps(value.value, separators=(',', ':'))
raise Error(f'''Not supported VMValue for escape()''')
def escape_backslash(self, value):
if isinstance(value, templFileGenPlug.ExpressionValue) and isinstance(value.value, exprs.StringLiteral):
return json.dumps(re.sub("\\\\", "\\\\\\\\", value.value.string_value), separators=(',', ':'))
raise Error(f'''Not supported VMValue for escape()''')
def name_(self, name):
if name in self.reserved_words:
name += "_"
if name in self.field_to_method_hack:
name += "()"
name_parts = re.split("-", name)
i = 1
while i < len(name_parts):
name_parts[i] = name_parts[i][0].upper() + name_parts[i][1:]
i = i + 1
name = "".join(name_parts)
return name
def leading(self, item):
result = ""
if item.leading_trivia != None and len(item.leading_trivia) > 0:
result += item.leading_trivia
#if (item.attributes !== null)
# result += Object.keys(item.attributes).map(x => `# @${x} ${item.attributes[x]}\n`).join("");
return result
def pre_arr(self, prefix, value):
return f'''{prefix}{", ".join(value)}''' if len(value) > 0 else ""
def pre_if(self, prefix, condition):
return prefix if condition else ""
def pre(self, prefix, value):
return f'''{prefix}{value}''' if value != None else ""
def type_args(self, args):
return f'''<{", ".join(args)}>''' if args != None and len(args) > 0 else ""
def type_args2(self, args):
return self.type_args(list(map(lambda x: self.type(x), args)))
def type(self, t, mutates = True):
if isinstance(t, astTypes.ClassType):
#const typeArgs = this.typeArgs(t.typeArguments.map(x => this.type(x)));
if t.decl.name == "TsString":
return "string"
elif t.decl.name == "TsBoolean":
return "bool"
elif t.decl.name == "TsNumber":
return "int"
elif t.decl.name == "TsArray":
if mutates:
return f'''List_'''
else:
return f'''{self.type(t.type_arguments[0])}[]'''
elif t.decl.name == "Promise":
return self.type(t.type_arguments[0])
elif t.decl.name == "Object":
#this.usings.add("System");
return f'''object'''
elif t.decl.name == "TsMap":
return f'''Dictionary'''
if t.decl.parent_file.export_scope == None:
return f'''\\OneLang\\Core\\{self.name_(t.decl.name)}'''
else:
return self.name_(t.decl.name)
elif isinstance(t, astTypes.InterfaceType):
return f'''{self.name_(t.decl.name)}{self.type_args(list(map(lambda x: self.type(x), t.type_arguments)))}'''
elif isinstance(t, astTypes.VoidType):
return "void"
elif isinstance(t, astTypes.EnumType):
return f'''{self.name_(t.decl.name)}'''
elif isinstance(t, astTypes.AnyType):
return f'''object'''
elif isinstance(t, astTypes.NullType):
return f'''null'''
elif isinstance(t, astTypes.GenericsType):
return f'''{t.type_var_name}'''
elif isinstance(t, astTypes.LambdaType):
is_func = not (isinstance(t.return_type, astTypes.VoidType))
param_types = list(map(lambda x: self.type(x.type), t.parameters))
if is_func:
param_types.append(self.type(t.return_type))
return f'''{("Func" if is_func else "Action")}<{", ".join(param_types)}>'''
elif t == None:
return "/* TODO */ object"
else:
return "/* MISSING */"
def is_ts_array(self, type):
return isinstance(type, astTypes.ClassType) and type.decl.name == "TsArray"
def vis(self, v, is_property):
return "private " if v == types.VISIBILITY.PRIVATE else "protected " if v == types.VISIBILITY.PROTECTED else ("public " if is_property else "") if v == types.VISIBILITY.PUBLIC else "/* TODO: not set */" + ("public " if is_property else "")
def var_wo_init(self, v, attr):
# let type: string;
# if (attr !== null && attr.attributes !== null && "php-type" in attr.attributes)
# type = attr.attributes["php-type"];
# else if (v.type instanceof ClassType && v.type.decl.name === "TsArray") {
# if (v.mutability.mutated) {
# type = `List<${this.type(v.type.typeArguments[0])}>`;
# } else {
# type = `${this.type(v.type.typeArguments[0])}[]`;
# }
# } else {
# type = this.type(v.type);
# }
return f'''${self.name_(v.name)}'''
def var(self, v, attrs):
return self.var_wo_init(v, attrs) + (f''' = {self.expr(v.initializer)}''' if v.initializer != None else "")
def expr_call(self, type_args, args):
return self.type_args2(type_args) + f'''({", ".join(list(map(lambda x: self.expr(x), args)))})'''
def mutate_arg(self, arg, should_be_mutable):
# if (this.isTsArray(arg.actualType)) {
# if (arg instanceof ArrayLiteral && !shouldBeMutable) {
# return `Array(${arg.items.map(x => this.expr(x)).join(', ')})`;
# }
# let currentlyMutable = shouldBeMutable;
# if (arg instanceof VariableReference)
# currentlyMutable = arg.getVariable().mutability.mutated;
# else if (arg instanceof InstanceMethodCallExpression || arg instanceof StaticMethodCallExpression)
# currentlyMutable = false;
# if (currentlyMutable && !shouldBeMutable)
# return `${this.expr(arg)}.ToArray()`;
# else if (!currentlyMutable && shouldBeMutable) {
# return `${this.expr(arg)}.ToList()`;
# }
# }
return self.expr(arg)
def mutated_expr(self, expr, to_where):
if isinstance(to_where, refs.VariableReference):
v = to_where.get_variable()
if self.is_ts_array(v.type):
return self.mutate_arg(expr, v.mutability.mutated)
return self.expr(expr)
def call_params(self, args, params):
arg_reprs = []
i = 0
while i < len(args):
arg_reprs.append(self.mutate_arg(args[i], params[i].mutability.mutated) if self.is_ts_array(params[i].type) else self.expr(args[i]))
i = i + 1
return f'''({", ".join(arg_reprs)})'''
def method_call(self, expr):
return self.name_(expr.method.name) + self.type_args2(expr.type_args) + self.call_params(expr.args, expr.method.parameters)
def infer_expr_name_for_type(self, type):
if isinstance(type, astTypes.ClassType) and ArrayHelper.every(lambda x, _: isinstance(x, astTypes.ClassType), type.type_arguments):
full_name = "".join(list(map(lambda x: (x).decl.name, type.type_arguments))) + type.decl.name
return nameUtils.NameUtils.short_name(full_name)
return None
def expr(self, expr):
for plugin in self.plugins:
result = plugin.expr(expr)
if result != None:
return result
res = "UNKNOWN-EXPR"
if isinstance(expr, exprs.NewExpression):
res = f'''new {self.type(expr.cls_)}{self.call_params(expr.args, expr.cls_.decl.constructor_.parameters if expr.cls_.decl.constructor_ != None else [])}'''
elif isinstance(expr, exprs.UnresolvedNewExpression):
res = f'''/* TODO: UnresolvedNewExpression */ new {self.type(expr.cls_)}({", ".join(list(map(lambda x: self.expr(x), expr.args)))})'''
elif isinstance(expr, exprs.Identifier):
res = f'''/* TODO: Identifier */ {expr.text}'''
elif isinstance(expr, exprs.PropertyAccessExpression):
res = f'''/* TODO: PropertyAccessExpression */ {self.expr(expr.object)}.{expr.property_name}'''
elif isinstance(expr, exprs.UnresolvedCallExpression):
res = f'''/* TODO: UnresolvedCallExpression */ {self.expr(expr.func)}{self.expr_call(expr.type_args, expr.args)}'''
elif isinstance(expr, exprs.UnresolvedMethodCallExpression):
res = f'''/* TODO: UnresolvedMethodCallExpression */ {self.expr(expr.object)}->{expr.method_name}{self.expr_call(expr.type_args, expr.args)}'''
elif isinstance(expr, exprs.InstanceMethodCallExpression):
if isinstance(expr.object, refs.SuperReference):
res = f'''parent::{self.method_call(expr)}'''
elif isinstance(expr.object, exprs.NewExpression):
res = f'''({self.expr(expr.object)})->{self.method_call(expr)}'''
else:
res = f'''{self.expr(expr.object)}->{self.method_call(expr)}'''
elif isinstance(expr, exprs.StaticMethodCallExpression):
res = f'''{self.name_(expr.method.parent_interface.name)}::{self.method_call(expr)}'''
if expr.method.parent_interface.parent_file.export_scope == None:
res = f'''\\OneLang\\Core\\{res}'''
elif isinstance(expr, exprs.GlobalFunctionCallExpression):
res = f'''{self.name_(expr.func.name)}{self.expr_call([], expr.args)}'''
elif isinstance(expr, exprs.LambdaCallExpression):
res = f'''call_user_func({self.expr(expr.method)}, {", ".join(list(map(lambda x: self.expr(x), expr.args)))})'''
elif isinstance(expr, exprs.BooleanLiteral):
res = f'''{("true" if expr.bool_value else "false")}'''
elif isinstance(expr, exprs.StringLiteral):
res = re.sub("\\$", "\\\\$", json.dumps(expr.string_value, separators=(',', ':')))
elif isinstance(expr, exprs.NumericLiteral):
res = expr.value_as_text
elif isinstance(expr, exprs.CharacterLiteral):
res = f'''\'{expr.char_value}\''''
elif isinstance(expr, exprs.ElementAccessExpression):
res = f'''{self.expr(expr.object)}[{self.expr(expr.element_expr)}]'''
elif isinstance(expr, exprs.TemplateString):
parts = []
for part in expr.parts:
if part.is_literal:
lit = ""
i = 0
while i < len(part.literal_text):
chr = part.literal_text[i]
if chr == "\n":
lit += "\\n"
elif chr == "\r":
lit += "\\r"
elif chr == "\t":
lit += "\\t"
elif chr == "$":
lit += "\\$"
elif chr == "\\":
lit += "\\\\"
elif chr == "\"":
lit += "\\\""
else:
chr_code = ord(chr[0])
if 32 <= chr_code and chr_code <= 126:
lit += chr
else:
raise Error(f'''invalid char in template string (code={chr_code})''')
i = i + 1
parts.append(f'''"{lit}"''')
else:
repr = self.expr(part.expression)
is_complex = isinstance(part.expression, exprs.ConditionalExpression) or isinstance(part.expression, exprs.BinaryExpression) or isinstance(part.expression, exprs.NullCoalesceExpression)
parts.append(f'''({repr})''' if is_complex else repr)
res = " . ".join(parts)
elif isinstance(expr, exprs.BinaryExpression):
op = expr.operator
if op == "==":
op = "==="
elif op == "!=":
op = "!=="
if expr.left.actual_type != None and expr.left.actual_type.repr() == "C:TsString":
if op == "+":
op = "."
elif op == "+=":
op = ".="
# const useParen = expr.left instanceof BinaryExpression && expr.left.operator !== expr.operator;
# const leftExpr = this.expr(expr.left);
res = f'''{self.expr(expr.left)} {op} {self.mutated_expr(expr.right, expr.left if expr.operator == "=" else None)}'''
elif isinstance(expr, exprs.ArrayLiteral):
res = f'''array({", ".join(list(map(lambda x: self.expr(x), expr.items)))})'''
elif isinstance(expr, exprs.CastExpression):
res = f'''{self.expr(expr.expression)}'''
elif isinstance(expr, exprs.ConditionalExpression):
when_false_expr = self.expr(expr.when_false)
if isinstance(expr.when_false, exprs.ConditionalExpression):
when_false_expr = f'''({when_false_expr})'''
res = f'''{self.expr(expr.condition)} ? {self.expr(expr.when_true)} : {when_false_expr}'''
elif isinstance(expr, exprs.InstanceOfExpression):
res = f'''{self.expr(expr.expr)} instanceof {self.type(expr.check_type)}'''
elif isinstance(expr, exprs.ParenthesizedExpression):
res = f'''({self.expr(expr.expression)})'''
elif isinstance(expr, exprs.RegexLiteral):
res = f'''new \\OneLang\\Core\\RegExp({json.dumps(expr.pattern, separators=(',', ':'))})'''
elif isinstance(expr, types.Lambda):
params = list(map(lambda x: f'''${self.name_(x.name)}''', expr.parameters))
# TODO: captures should not be null
uses = f''' use ({", ".join(list(map(lambda x: f'${x.name}', expr.captures)))})''' if expr.captures != None and len(expr.captures) > 0 else ""
res = f'''function ({", ".join(params)}){uses} {{ {self.raw_block(expr.body)} }}'''
elif isinstance(expr, exprs.UnaryExpression) and expr.unary_type == exprs.UNARY_TYPE.PREFIX:
res = f'''{expr.operator}{self.expr(expr.operand)}'''
elif isinstance(expr, exprs.UnaryExpression) and expr.unary_type == exprs.UNARY_TYPE.POSTFIX:
res = f'''{self.expr(expr.operand)}{expr.operator}'''
elif isinstance(expr, exprs.MapLiteral):
repr = ",\n".join(list(map(lambda item: | |
# Copyright (c) 2021 <NAME>.
# Model Adaptations for FFNNs
# Presumption: Model consists only of series of dense layers
# This class provides several options to build a new FFNN consisting only of dense layers
# based on a provided base model
# base assumption: the model will have one input and one output layer, that are not to be modified
import numpy as np
from tensorflow import keras
import copy
class ModelAdapter:
def __init__(self, base_model, loss, optimizer, metrics, overwrite_base=False, index_shift=0):
self.parse_model(base_model)
self.overwrite_base = overwrite_base
self.compile_optimizer = optimizer
self.compile_loss = loss
self.compile_metrics = metrics
self.index_shift = index_shift
def parse_model(self, base_model):
self.base_model = base_model
self.base_config = base_model.get_config()
self.all_weights = self.base_model.get_weights()
self.base_weights = []
self.base_biases = []
last_was_weight = False #first will always be a weight term
for w in self.all_weights:
if not last_was_weight: #this is a weight term
self.base_weights.append(w)
last_was_weight = True
else:
if np.ndim(w) > 1:
# this is a weight term -> there was no bias term!
self.base_biases.append(None)
self.base_weights.append(w)
else:
last_was_weight = False
self.base_biases.append(w)
ln = []
for layer in self.base_model.layers:
if layer.name.startswith('dense'):
if len(layer.name) > 5:
ln.append(int(layer.name[6:]))
else:
ln.append(0)
self.layer_name = max(ln) + 1
self.layer_count = len(self.base_config['layers'])
def perform_checks(self, layer_number, neurons_to_remove=0, last_layer_allowed=False):
result = False
# handle backwards count - todo
#if layer_number < 0:
# layer_number = self.layer_count + layer_number # now -1 equals last_layer
# # todo.. need to change layer_number in caller as well - no by ref in python :(
# if layer_number < 0:
# print("Specified layer number outside of allowed range!")
# return result, layer_number
# input layer not allowed
if layer_number == 0:
print("Input layer cannot be changed")
return result, layer_number
# do not count output layer unless specified,
if (layer_number == self.layer_count - 1 and last_layer_allowed == False) or (layer_number >= self.layer_count):
print("Specified layer number %s outside of allowed range!" % layer_number)
return result, layer_number
# morde neurons to remove than exist
if neurons_to_remove > 0 and neurons_to_remove >= self.base_config['layers'][layer_number]['config']['units']:
print("Cannot remove specified number of neurons!")
return result, layer_number
result = True
return result, layer_number
def get_config(self):
return copy.deepcopy(self.base_config)
def return_compiled_model(self, model):
if self.overwrite_base:
self.parse_model(model)
model.compile(loss=self.compile_loss, optimizer=self.compile_optimizer, metrics=self.compile_metrics)
return model
def Identity(self, *args, **kwargs):
'''
Returns an unlinked copy of the original model
'''
new_model = keras.models.clone_model(self.base_model)
new_model.set_weights(self.all_weights)
return self.return_compiled_model(new_model)
def AddNeuron(self, layer_number, neurons_to_add=1, overwrite_base=None):
'''
Adds a number of units to the specified layer (starting from 1, can't add to input layer)
'''
check_ok, layer_number = self.perform_checks(layer_number)
if not check_ok:
return None
if overwrite_base is not None:
self.overwrite_base = overwrite_base
new_config = self.get_config()
old_unit_count = new_config['layers'][layer_number]['config']['units']
new_config['layers'][layer_number]['config']['units'] += neurons_to_add
use_bias_this = new_config['layers'][layer_number]['config']['use_bias']
use_bias_next = new_config['layers'][layer_number + 1]['config']['use_bias']
new_model = keras.Sequential.from_config(new_config)
# generate adapted weights
w0 = copy.deepcopy(self.base_weights[layer_number - 1 - self.index_shift]) # input layer has no weights
w1 = copy.deepcopy(self.base_weights[layer_number - self.index_shift])
if use_bias_this:
b0 = copy.deepcopy(self.base_biases[layer_number - 1 - self.index_shift])
loop_all_times = neurons_to_add // old_unit_count
loop_remainder = neurons_to_add % old_unit_count
for idx, val in enumerate(loop_remainder*[loop_all_times + 1] + (old_unit_count-loop_remainder)*[loop_all_times]):
if val > 0:
w0[:, idx] = w0[:, idx]/(val+1)
if use_bias_this:
b0[idx] = b0[idx]/(val+1)
for _ in range(val):
w0 = np.append(w0, np.transpose([w0[:, idx]]), axis=1)
w1 = np.append(w1, [w1[idx, :]], axis=0) # no need for division by value here
if use_bias_this:
b0 = np.append(b0, [b0[idx]])
# concat, set weights
new_weights = []
for idx in range(layer_number - 1 - self.index_shift):
new_weights += [self.base_weights[idx]]
bias = self.base_biases[idx]
if bias is not None:
new_weights += [bias]
new_weights += [w0]
if use_bias_this:
new_weights += [b0]
new_weights += [w1]
if use_bias_next:
new_weights += [self.base_biases[layer_number - self.index_shift]]
for idx in range(layer_number + 1 - self.index_shift, len(self.base_weights)):
new_weights += [self.base_weights[idx]]
bias = self.base_biases[idx]
if bias is not None:
new_weights += [bias]
new_model.set_weights(new_weights)
return self.return_compiled_model(new_model)
def AddLayer(self, layer_number, activation=None, use_bias=True, overwrite_base=None):
'''
Adds a layer at the specified index. Output layer will always remain at last index
Number of units will be equal to number of units in the following layer
'''
check_ok, layer_number = self.perform_checks(layer_number)
if not check_ok:
return None
if overwrite_base is not None:
self.overwrite_base = overwrite_base
new_config = self.get_config()
config_to_copy = copy.deepcopy(new_config['layers'][layer_number])
unit_count = config_to_copy['config']['units']
config_to_copy['config']['activation'] = activation
config_to_copy['config']['kernel_initializer'] = {'class_name': 'Identity', 'config': {'gain': 1.0}}
config_to_copy['config']['use_bias'] = use_bias
config_to_copy['config']['bias_initializer'] = {'class_name': 'Zeros', 'config': {}}
config_to_copy['config']['name'] = 'dense_' + str(self.layer_name)
new_config['layers'].insert(layer_number + 1, config_to_copy)
new_model = keras.Sequential.from_config(new_config)
w0 = np.identity(unit_count)
b0 = np.zeros((unit_count,))
# concat, set weights
new_weights = []
for idx in range(layer_number - self.index_shift):
new_weights += [self.base_weights[idx]]
bias = self.base_biases[idx]
if bias is not None:
new_weights += [bias]
new_weights += [w0]
if use_bias:
new_weights += [b0]
for idx in range(layer_number - self.index_shift, len(self.base_weights)):
new_weights += [self.base_weights[idx]]
bias = self.base_biases[idx]
if bias is not None:
new_weights += [bias]
new_model.set_weights(new_weights)
return self.return_compiled_model(new_model)
def RemoveLayer(self, layer_number, overwrite_base=None):
'''
Removes the specified layer (starting from 1, can't remove input layer)
'''
check_ok, layer_number = self.perform_checks(layer_number)
if not check_ok:
return None
if overwrite_base is not None:
self.overwrite_base = overwrite_base
new_config = self.get_config()
use_bias_this = new_config['layers'][layer_number]['config']['use_bias']
use_bias_next = new_config['layers'][layer_number + 1]['config']['use_bias']
use_bias = use_bias_this or use_bias_next
new_config['layers'][layer_number + 1]['config']['use_bias'] = use_bias
del new_config['layers'][layer_number] # do not count the input layer!
new_model = keras.Sequential.from_config(new_config)
# generate adapted weights
w0 = self.base_weights[layer_number - 1 - self.index_shift]
w1 = self.base_weights[layer_number - self.index_shift]
w_new = w0 @ w1
b_new = 0
if use_bias_this:
b0 = self.base_biases[layer_number - 1 - self.index_shift]
b_new += b0 @ w1
if use_bias_next:
b1 = self.base_biases[layer_number - self.index_shift]
b_new += b1
# concat, set weights
new_weights = []
for idx in range(layer_number-1 - self.index_shift):
new_weights += [self.base_weights[idx]]
bias = self.base_biases[idx]
if bias is not None:
new_weights += [bias]
new_weights += [w_new]
if use_bias:
new_weights += [b_new]
for idx in range(layer_number + 1 - self.index_shift, len(self.base_weights)):
new_weights += [self.base_weights[idx]]
bias = self.base_biases[idx]
if bias is not None:
new_weights += [bias]
new_model.set_weights(new_weights)
return self.return_compiled_model(new_model)
def PerformSVD(self, layer_number, type='truncated', neurons_to_remove=1, overwrite_base=None, activation=None):
if type == 'truncated':
return self.PerformTruncatedSVD(layer_number, neurons_to_remove=neurons_to_remove, overwrite_base=overwrite_base, activation=activation)
elif type == 'oneLayer':
return self.PerformOneLayerSVD(layer_number, neurons_to_remove=neurons_to_remove, overwrite_base=overwrite_base)
else:
print("Method not found")
return None
def PerformTruncatedSVD(self, layer_number, neurons_to_remove=1, activation=None, use_bias=False, last_layer_allowed=False, overwrite_base=None):
'''
Builds a model with one more layer, but reduced connections
'''
check_ok, layer_number = self.perform_checks(layer_number)
if not check_ok:
return None
if overwrite_base is not None:
self.overwrite_base = overwrite_base
new_config = self.get_config()
config_to_copy = copy.deepcopy(new_config['layers'][layer_number])
desired_units = config_to_copy['config']['units'] - neurons_to_remove
use_bias_next = new_config['layers'][layer_number]['config']['use_bias']
# Generate new weights before generating model, as unit number can still vary
w = self.base_weights[layer_number - 1 - self.index_shift]
u, s, vh = np.linalg.svd(w, full_matrices=True, compute_uv=True)
# s will most likely have full length, even if the rank is smaller (due to machine precision)
# check for this in any case!
desired_units = np.minimum(desired_units, len(s))
s = s[:desired_units]
u = u[:,:desired_units]
vh = vh[:desired_units, :]
# build model with required number of units
config_to_copy['config']['units'] = desired_units
config_to_copy['config']['activation'] = activation
config_to_copy['config']['use_bias'] = use_bias
config_to_copy['config']['bias_initializer'] = None
config_to_copy['config']['name'] = 'dense_' + str(self.layer_name)
self.layer_name += 1
new_config['layers'].insert(layer_number, config_to_copy)
new_model = keras.Sequential.from_config(new_config)
# concat, set weights
new_weights = []
for idx in range(layer_number-1 - self.index_shift):
new_weights += [self.base_weights[idx]]
bias = self.base_biases[idx]
if bias is not None:
new_weights += [bias]
new_weights += [u * s]
if use_bias:
new_weights += [np.zeros(desired_units)]
new_weights += [vh]
if use_bias_next:
new_weights += [self.base_biases[layer_number - 1 - self.index_shift]]
for idx in range(layer_number - self.index_shift, len(self.base_weights)):
new_weights += [self.base_weights[idx]]
bias = self.base_biases[idx]
if bias is not None:
new_weights += [bias]
new_model.set_weights(new_weights)
return self.return_compiled_model(new_model)
def ChangeActivation(self, layer_number, activation, overwrite_base=None):
'''
Builds a model with the given activation function in the specified layer
Weights are not adapted
'''
check_ok, layer_number = self.perform_checks(layer_number)
if not check_ok:
return None
if overwrite_base is not None:
self.overwrite_base = overwrite_base
new_config = self.get_config()
new_config['layers'][layer_number]['config']['activation'] = activation
new_model = keras.Sequential.from_config(new_config)
new_model.set_weights(self.all_weights)
return self.return_compiled_model(new_model)
def PerformOneLayerSVD(self, layer_number, neurons_to_remove=1, overwrite_base=None):
'''
Builds a model with less units in the specified layer by using SVD to project to
a lower rank subspace. | |
you implement the `GRU` from scratch, we will give you the necessary methods from a build in package. You can use the following packages when constructing the model:
#
#
# - `tl.Serial`: Combinator that applies layers serially (by function composition). [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.combinators.Serial) / [source code](https://github.com/google/trax/blob/e65d51fe584b10c0fa0fccadc1e70b6330aac67e/trax/layers/combinators.py#L26)
# - You can pass in the layers as arguments to `Serial`, separated by commas.
# - For example: `tl.Serial(tl.Embeddings(...), tl.Mean(...), tl.Dense(...), tl.LogSoftmax(...))`
#
# ___
#
# - `tl.ShiftRight`: Allows the model to go right in the feed forward. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.attention.ShiftRight) / [source code](https://github.com/google/trax/blob/e65d51fe584b10c0fa0fccadc1e70b6330aac67e/trax/layers/attention.py#L560)
# - `ShiftRight(n_shifts=1, mode='train')` layer to shift the tensor to the right n_shift times
# - Here in the exercise you only need to specify the mode and not worry about n_shifts
#
# ___
#
# - `tl.Embedding`: Initializes the embedding. In this case it is the size of the vocabulary by the dimension of the model. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.core.Embedding) / [source code](https://github.com/google/trax/blob/e65d51fe584b10c0fa0fccadc1e70b6330aac67e/trax/layers/core.py#L130)
# - `tl.Embedding(vocab_size, d_feature)`.
# - `vocab_size` is the number of unique words in the given vocabulary.
# - `d_feature` is the number of elements in the word embedding (some choices for a word embedding size range from 150 to 300, for example).
# ___
#
# - `tl.GRU`: `Trax` GRU layer. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.rnn.GRU) / [source code](https://github.com/google/trax/blob/e65d51fe584b10c0fa0fccadc1e70b6330aac67e/trax/layers/rnn.py#L154)
# - `GRU(n_units)` Builds a traditional GRU of n_cells with dense internal transformations.
# - `GRU` paper: https://arxiv.org/abs/1412.3555
# ___
#
# - `tl.Dense`: A dense layer. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.core.Dense) / [source code](https://github.com/google/trax/blob/e65d51fe584b10c0fa0fccadc1e70b6330aac67e/trax/layers/core.py#L34)
# - `tl.Dense(n_units)`: The parameter `n_units` is the number of units chosen for this dense layer.
# ___
#
# - `tl.LogSoftmax`: Log of the output probabilities. [docs](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.core.LogSoftmax) / [source code](https://github.com/google/trax/blob/e65d51fe584b10c0fa0fccadc1e70b6330aac67e/trax/layers/core.py#L644)
# - Here, you don't need to set any parameters for `LogSoftMax()`.
# ___
#
# <a name='ex03'></a>
# ### Exercise 03
# **Instructions:** Implement the `GRULM` class below. You should be using all the methods explained above.
#
# In[17]:
# UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: GRULM
def GRULM(vocab_size=256, d_model=512, n_layers=2, mode='train'):
"""Returns a GRU language model.
Args:
vocab_size (int, optional): Size of the vocabulary. Defaults to 256.
d_model (int, optional): Depth of embedding (n_units in the GRU cell). Defaults to 512.
n_layers (int, optional): Number of GRU layers. Defaults to 2.
mode (str, optional): 'train', 'eval' or 'predict', predict mode is for fast inference. Defaults to "train".
Returns:
trax.layers.combinators.Serial: A GRU language model as a layer that maps from a tensor of tokens to activations over a vocab set.
"""
### START CODE HERE ###
model = tl.Serial(
tl.ShiftRight(mode=mode), # Stack the ShiftRight layer
tl.Embedding(vocab_size=vocab_size, d_feature=d_model), # Stack the embedding layer
[tl.GRU(n_units=d_model) for _ in range(n_layers)], # Stack GRU layers of d_model units keeping n_layer parameter in mind (use list comprehension syntax)
tl.Dense(n_units=vocab_size), # Dense layer
tl.LogSoftmax() # Log Softmax
)
### END CODE HERE ###
return model
# In[18]:
# testing your model
model = GRULM()
print(model)
# ##### Expected output
#
# ```python
# Serial[
# Serial[
# ShiftRight(1)
# ]
# Embedding_256_512
# GRU_512
# GRU_512
# Dense_256
# LogSoftmax
# ]
# ```
# In[19]:
# Test your function
w2_unittest.test_GRULM(GRULM)
# <a name='3'></a>
# # Part 3: Training
#
# Now you are going to train your model. As usual, you have to define the cost function, the optimizer, and decide whether you will be training it on a `gpu` or `cpu`. You also have to feed in a built model. Before, going into the training, we re-introduce the `TrainTask` and `EvalTask` abstractions from the last week's assignment.
#
# To train a model on a task, Trax defines an abstraction `trax.supervised.training.TrainTask` which packages the train data, loss and optimizer (among other things) together into an object.
#
# Similarly to evaluate a model, Trax defines an abstraction `trax.supervised.training.EvalTask` which packages the eval data and metrics (among other things) into another object.
#
# The final piece tying things together is the `trax.supervised.training.Loop` abstraction that is a very simple and flexible way to put everything together and train the model, all the while evaluating it and saving checkpoints.
# Using `training.Loop` will save you a lot of code compared to always writing the training loop by hand, like you did in courses 1 and 2. More importantly, you are less likely to have a bug in that code that would ruin your training.
# In[20]:
batch_size = 32
max_length = 64
# An `epoch` is traditionally defined as one pass through the dataset.
#
# Since the dataset was divided in `batches` you need several `steps` (gradient evaluations) in order to complete an `epoch`. So, one `epoch` corresponds to the number of examples in a `batch` times the number of `steps`. In short, in each `epoch` you go over all the dataset.
#
# The `max_length` variable defines the maximum length of lines to be used in training our data, lines longer than that length are discarded.
#
# Below is a function and results that indicate how many lines conform to our criteria of maximum length of a sentence in the entire dataset and how many `steps` are required in order to cover the entire dataset which in turn corresponds to an `epoch`.
# In[21]:
def n_used_lines(lines, max_length):
'''
Args:
lines: all lines of text an array of lines
max_length - max_length of a line in order to be considered an int
output_dir - folder to save your file an int
Return:
number of efective examples
'''
n_lines = 0
for l in lines:
if len(l) <= max_length:
n_lines += 1
return n_lines
num_used_lines = n_used_lines(lines, 32)
print('Number of used lines from the dataset:', num_used_lines)
print('Batch size (a power of 2):', int(batch_size))
steps_per_epoch = int(num_used_lines/batch_size)
print('Number of steps to cover one epoch:', steps_per_epoch)
# **Expected output:**
#
# Number of used lines from the dataset: 25881
#
# Batch size (a power of 2): 32
#
# Number of steps to cover one epoch: 808
# <a name='3.1'></a>
# ### 3.1 Training the model
#
# You will now write a function that takes in your model and trains it. To train your model you have to decide how many times you want to iterate over the entire data set.
#
# <a name='ex04'></a>
# ### Exercise 04
#
# **Instructions:** Implement the `train_model` program below to train the neural network above. Here is a list of things you should do:
#
# - Create a `trax.supervised.training.TrainTask` object, this encapsulates the aspects of the dataset and the problem at hand:
# - labeled_data = the labeled data that we want to *train* on.
# - loss_fn = [tl.CrossEntropyLoss()](https://trax-ml.readthedocs.io/en/latest/trax.layers.html?highlight=CrossEntropyLoss#trax.layers.metrics.CrossEntropyLoss)
# - optimizer = [trax.optimizers.Adam()](https://trax-ml.readthedocs.io/en/latest/trax.optimizers.html?highlight=Adam#trax.optimizers.adam.Adam) with learning rate = 0.0005
#
# - Create a `trax.supervised.training.EvalTask` object, this encapsulates aspects of evaluating the model:
# - labeled_data = the labeled data that we want to *evaluate* on.
# - metrics = [tl.CrossEntropyLoss()](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.metrics.CrossEntropyLoss) and [tl.Accuracy()](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#trax.layers.metrics.Accuracy)
# - How frequently we want to evaluate and checkpoint the model.
#
# - Create a `trax.supervised.training.Loop` object, this encapsulates the following:
# - The previously created `TrainTask` and `EvalTask` objects.
# - the training model = [GRULM](#ex03)
# - optionally the evaluation model, if different from the training model. NOTE: in presence of Dropout etc we usually want the evaluation model to behave slightly differently than the training model.
#
# You will be using a cross entropy loss, with Adam optimizer. Please read the [trax](https://trax-ml.readthedocs.io/en/latest/index.html) documentation to get a full understanding. Make sure you use the number of steps provided as a parameter to train for the desired number of steps.
#
# **NOTE:** Don't forget to wrap the data generator in `itertools.cycle` to iterate on it for multiple epochs.
# In[24]:
from trax.supervised import training
# UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT)
# GRADED FUNCTION: train_model
def train_model(model, data_generator, lines, eval_lines, batch_size=32, max_length=64, n_steps=1, output_dir='model/'):
"""Function that trains the model
Args:
model (trax.layers.combinators.Serial): GRU model.
data_generator (function): Data generator function.
batch_size (int, optional): Number of lines per batch. Defaults to 32.
max_length (int, optional): Maximum length allowed for a line to be processed. Defaults to 64.
lines (list): List of lines to use for training. Defaults to lines.
eval_lines (list): List of lines to use for evaluation. Defaults to eval_lines.
n_steps (int, optional): Number of steps to train. Defaults to 1.
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=wrong-import-position, too-many-locals
"""
Create time- and DOM-independent (TDI) whole-detector Cartesian-binned Retro
table.
The generated table is useful for computing the total charge expected to be
deposited by a hypothesis across the entire detector (i.e., independent of time
and DOM).
Define a Cartesian grid that covers all of the IceCube fiducial volume, then
tabulate for each voxel the survival probability for photons coming from any
DOM at any time to reach that voxel. Also, tabulate the "average surviving
photon," defined by its x, y, and z components (which differs from the original
time- and DOM-dependent retro tables, wherein length, theta, and deltaphi are
used to characterize the average surviving photon).
Note that the length of the average surviving photon vector can be interpreted
as a measure of the directionality required for a photon to reach a DOM. I.e.,
if its length is 1, then only photons going exactly opposite that direction
will make it to a DOM (to within statistical and bin-size uncertainties used to
arrive at the average photon. If the length is _less_ than 1, then other
directions besides the average photon direction will be accepted, with
increasing likelihood as that length decreases towards 0.
The new table is in (x, y, z)--independent of time and DOM--and can be used to
scale the photons expected to reach any DOM at any time due to a hypothesis
that generates some number of photons (with an average direction / length) in
any of the voxel(s) of this table.
"""
from __future__ import absolute_import, division, print_function
__all__ = [
'generate_tdi_table_meta',
'generate_tdi_table',
'parse_args'
]
__author__ = '<NAME>, <NAME>'
__license__ = '''Copyright 2017 <NAME> and <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
from argparse import ArgumentParser
from collections import OrderedDict
from copy import deepcopy
from os.path import abspath, dirname, isdir, isfile, join
import sys
import time
import numpy as np
from astropy.io import fits
if __name__ == '__main__' and __package__ is None:
PARENT_DIR = dirname(dirname(abspath(__file__)))
if PARENT_DIR not in sys.path:
sys.path.append(PARENT_DIR)
from retro.const import (
DC_DOM_QUANT_EFF, IC_DOM_QUANT_EFF, POL_TABLE_RMAX, POL_TABLE_RPWR,
POL_TABLE_NRBINS, POL_TABLE_NTHETABINS, POL_TABLE_NTBINS
)
from retro.tables.generate_binmap import generate_binmap
from retro.tables.shift_and_bin import shift_and_bin
from retro.tables.dom_time_polar_tables import load_t_r_theta_table
from retro.tables.tdi_cart_tables import TDI_TABLE_FNAME_PROTO
from retro.utils.geom import generate_geom_meta
from retro.utils.misc import (
generate_anisotropy_str, hash_obj, hrlist2list, list2hrlist
)
def generate_tdi_table_meta(
binmap_hash, geom_hash, dom_tables_hash, times_str, x_min, x_max,
y_min, y_max, z_min, z_max, binwidth, anisotropy, ic_dom_quant_eff,
dc_dom_quant_eff, ic_exponent, dc_exponent
):
"""Generate a metadata dict for a time- and DOM-independent Cartesian
(x,y,z)-binned table.
Parameters
----------
binmap_hash : string
geom_hash : string
dom_tables_hash : string
times_str : string
x_lims, y_lims, z_lims : 2-tuples of floats
binwidth : float
anisotropy : None or tuple
ic_dom_quant_eff : float in [0, 1]
dc_dom_quant_eff : float in [0, 1]
ic_exponent : float >= 0
dc_exponent : float >= 0
Returns
-------
metadata : OrderedDict
Contains keys
'fbasename' : string
'hash' : string
'kwargs' : OrderedDict
"""
if dom_tables_hash is None:
dom_tables_hash = 'none'
kwargs = OrderedDict([
('geom_hash', geom_hash),
('binmap_hash', binmap_hash),
('dom_tables_hash', dom_tables_hash),
('times_str', times_str),
('x_min', x_min),
('x_max', x_max),
('y_min', y_min),
('y_max', y_max),
('z_min', z_min),
('z_max', z_max),
('binwidth', binwidth),
('anisotropy', anisotropy),
('ic_dom_quant_eff', ic_dom_quant_eff),
('dc_dom_quant_eff', dc_dom_quant_eff),
('ic_exponent', ic_exponent),
('dc_exponent', dc_exponent)
])
hash_params = deepcopy(kwargs)
for param in ['x_min', 'x_max', 'y_min', 'y_max', 'z_min', 'z_max']:
rounded_int = int(np.round(hash_params[param]*100))
hash_params[param] = rounded_int
kwargs[param] = float(rounded_int) / 100
for param in ['ic_dom_quant_eff', 'dc_dom_quant_eff',
'ic_exponent', 'dc_exponent']:
rounded_int = int(np.round(hash_params[param]*10000))
hash_params[param] = rounded_int
kwargs[param] = float(rounded_int) / 10000
hash_params['binwidth'] = int(np.round(hash_params['binwidth'] * 1e10))
tdi_hash = hash_obj(hash_params, fmt='hex')
anisotropy_str = generate_anisotropy_str(anisotropy)
fname = TDI_TABLE_FNAME_PROTO[-1].format(
tdi_hash=tdi_hash,
anisotropy_str=anisotropy_str,
table_name='',
**kwargs
)
fbasename = fname.rsplit('_.fits')[0]
metadata = OrderedDict([
('fbasename', fbasename),
('hash', tdi_hash),
('kwargs', kwargs)
])
return metadata
def generate_tdi_table(tables_dir, geom_fpath, dom_tables_hash, n_phibins,
x_lims, y_lims, z_lims,
binwidth, oversample, antialias, anisotropy,
ic_dom_quant_eff, dc_dom_quant_eff,
ic_exponent, dc_exponent,
strings=slice(None),
depths=slice(None),
times=slice(None),
recompute_binmap=False,
recompute_table=False):
"""Create a time- and DOM-independent Cartesian (x,y,z)-binned Retro
table (if it doesn't already exist or if the user requests that it be
re-computed) and save the table to disk.
The intermediate step of computing a bin mapping from polar (r, theta)
coordinates for the source (t,r,theta)-binned DOM Retro tables is also
performed if it hasn't already been saved to disk or if the user forces
its recomputation; the result of this is stored to disk for future use.
Parameters
----------
tables_dir
geom_fpath
dom_tables_hash
n_phibins : int
x_lims, y_lims, z_lims : 2-tuples of floats
binwidth : float
oversample : int
antialias : int
anisotropy : None or tuple
ic_dom_quant_eff : float in [0, 1]
dc_dom_quant_eff : float in [0, 1]
ic_exponent : float >= 0
dc_exponent : float >= 0
strings : int, sequence, slice
Select only these strings by indexing into the geom array
depths : int, sequence, slice
Select only these depth indices by indexing into the geom array
times : int, sequence, slice
Sum over only these times
recompute_binmap : bool
Force recomputation of bin mapping even if it already exists; existing
file will be overwritten
recompute_table : bool
Force recomputation of table files even if the already exist; existing
files will be overwritten
Returns
-------
tdi_data : OrderedDict
Contains following items:
'binned_sp : shape (nx,ny,nz) numpy ndarray, dtype float32
Survival probability table
'binned_px' : shape (nx,ny,nz) numpy ndarray, dtype float32
'binned_py' : shape (nx,ny,nz) numpy ndarray, dtype float32
'binned_pz' : shape (nx,ny,nz) numpy ndarray, dtype float32
Tables with average photon directionality, one each for x, y,
and z components, respectively
'ind_arrays'
'vol_arrays'
'tdi_meta' : OrderedDict
Return value from `generate_tdi_table_meta`
'binmap_meta' : OrderedDict
Return value from `generate_binmap_meta`
"""
assert isdir(tables_dir)
if dom_tables_hash is None:
dom_tables_hash = 'none'
r_max = POL_TABLE_RMAX
r_power = POL_TABLE_RPWR
n_rbins = POL_TABLE_NRBINS
n_costhetabins = POL_TABLE_NTHETABINS
n_tbins = POL_TABLE_NTBINS
else:
raise ValueError('Cannot handle non-None `dom_tables_hash`')
nx = int(np.round((x_lims[1] - x_lims[0]) / binwidth))
ny = int(np.round((y_lims[1] - y_lims[0]) / binwidth))
nz = int(np.round((z_lims[1] - z_lims[0]) / binwidth))
assert np.abs(x_lims[0] + nx * binwidth - x_lims[1]) < 1e-6
assert np.abs(y_lims[0] + ny * binwidth - y_lims[1]) < 1e-6
assert np.abs(z_lims[0] + nz * binwidth - z_lims[1]) < 1e-6
xyz_shape = (nx, ny, nz)
print('Generated/loaded TDI Cart table will have shape:', xyz_shape)
print('')
geom = np.load(geom_fpath)
depth_indices = np.atleast_1d(np.arange(60)[depths])
string_indices = np.atleast_1d(np.arange(87)[strings]) - 1
string_indices = string_indices[string_indices >= 0]
subdet_doms = {'ic': [], 'dc': []}
dc_strings = list(range(79, 86))
for string_idx in string_indices:
dom_coords = geom[string_idx:string_idx+1, depths, :]
if string_idx in dc_strings:
subdet_doms['dc'].append(dom_coords)
else:
subdet_doms['ic'].append(dom_coords)
for subdet in subdet_doms:
dom_string_list = subdet_doms[subdet]
if not dom_string_list:
subdet_doms.pop(subdet)
else:
subdet_doms[subdet] = np.concatenate(dom_string_list, axis=0)
geom = geom[string_indices, :, :][:, depth_indices, :]
geom_meta = generate_geom_meta(geom)
print('Geom uses strings %s, depth indices %s for a total of %d DOMs'
% (list2hrlist([i+1 for i in string_indices]),
list2hrlist(depth_indices),
geom.shape[0] * geom.shape[1]))
print('')
ind_arrays, vol_arrays, binmap_meta = generate_binmap(
r_max=r_max, r_power=r_power,
n_rbins=n_rbins, n_costhetabins=n_costhetabins, n_phibins=n_phibins,
cart_binwidth=binwidth, oversample=oversample, antialias=antialias,
tables_dir=tables_dir, recompute=recompute_binmap
)
print('')
# Figure out which time bin(s) to use to reduce source (t,r,theta) tables
# along time axis (where reduction is one minus product of one minus
# survival probabilities and average photon directionality)
all_t_bins = list(range(n_tbins))
remaining_t_bins = np.array(all_t_bins)[times].tolist()
if all_t_bins == remaining_t_bins:
times_str = 'all'
else:
times_str = list2hrlist(remaining_t_bins)
print('Marginalizing over times in source (t,r,theta) DOM Retro tables:',
times_str)
print('')
tdi_meta = generate_tdi_table_meta(
binmap_hash=binmap_meta['hash'],
geom_hash=geom_meta['hash'],
dom_tables_hash=None, # TODO: hash for dom tables not yet implemented
times_str=times_str,
x_min=x_lims[0], x_max=x_lims[1],
y_min=y_lims[0], y_max=y_lims[1],
z_min=z_lims[0], z_max=z_lims[1],
binwidth=binwidth, anisotropy=anisotropy,
ic_dom_quant_eff=ic_dom_quant_eff,
dc_dom_quant_eff=dc_dom_quant_eff,
ic_exponent=ic_exponent, dc_exponent=dc_exponent
)
print('Generating Cartesian time- and DOM-independent (TDI) Retro table')
print('tdi_kw:', tdi_meta['kwargs'])
names = [
'survival_prob',
'avg_photon_x',
'avg_photon_y',
'avg_photon_z'
]
if not recompute_table:
for name in names:
fpath = join(tables_dir,
'%s_%s.fits' % (tdi_meta['fbasename'], name))
if not isfile(fpath):
print(' Could not find table, will (re)compute\n%s\n' % fpath)
recompute_table = True
break
if not recompute_table:
print(' Loading (x,y,z)-binned TDI Retro table from disk')
for name in names:
fpath = join(tables_dir,
tdi_meta['fbasename'] + '_' + name + '.fits')
with fits.open(fpath) as fits_file:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.